hunk
dict
file
stringlengths
0
11.8M
file_path
stringlengths
2
234
label
int64
0
1
commit_url
stringlengths
74
103
dependency_score
sequencelengths
5
5
{ "id": 6, "code_window": [ "\trequire.Contains(t, out, \"will be updated\", \"no replica updates were recorded\")\n", "\trequire.Contains(t, out, fmt.Sprintf(\"Updated store(s): s%d\", node1ID),\n", "\t\t\"apply plan was not executed on requested node\")\n", "\n", "\ttcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n", "\t\tReplicationMode: base.ReplicationManual,\n", "\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n", "\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n", "\t\t},\n", "\t})\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// This logic is specific to the storage layer.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 244 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cli import ( "context" "fmt" "os" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/listenerutil" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) // TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection. // This is done by running three node cluster with disk backed storage, // stopping it and verifying content of collected replica info file. // This check verifies that: // // we successfully iterate requested stores, // data is written in expected location, // data contains info only about stores requested. func TestCollectInfoFromMultipleStores(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, 1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}}, 2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}}, }, }) tc.Start(t) defer tc.Stopper().Stop(ctx) // Wait up-replication. require.NoError(t, tc.WaitForFullReplication()) // Shutdown. tc.Stopper().Stop(ctx) replicaInfoFileName := dir + "/node-1.json" c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1", "--store=" + dir + "/store-2", replicaInfoFileName}) replicas, err := readReplicaInfoData([]string{replicaInfoFileName}) require.NoError(t, err, "failed to read generated replica info") stores := map[roachpb.StoreID]interface{}{} for _, r := range replicas.LocalInfo[0].Replicas { stores[r.StoreID] = struct{}{} } require.Equal(t, 2, len(stores), "collected replicas from stores") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version, "collected version info from stores") } // TestCollectInfoFromOnlineCluster verifies that given a test cluster with // one stopped node, we can collect replica info and metadata from remaining // nodes using an admin recovery call. func TestCollectInfoFromOnlineCluster(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ StoreSpecs: []base.StoreSpec{{InMemory: true}}, Insecure: true, }, }) tc.Start(t) defer tc.Stopper().Stop(ctx) require.NoError(t, tc.WaitForFullReplication()) tc.ToggleReplicateQueues(false) r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases") var totalRanges int require.NoError(t, r.Scan(&totalRanges), "failed to query range count") tc.StopServer(0) replicaInfoFileName := dir + "/all-nodes.json" c.RunWithArgs([]string{ "debug", "recover", "collect-info", "--insecure", "--host", tc.Server(2).AdvRPCAddr(), replicaInfoFileName, }) replicas, err := readReplicaInfoData([]string{replicaInfoFileName}) require.NoError(t, err, "failed to read generated replica info") stores := map[roachpb.StoreID]interface{}{} totalReplicas := 0 for _, li := range replicas.LocalInfo { for _, r := range li.Replicas { stores[r.StoreID] = struct{}{} } totalReplicas += len(li.Replicas) } require.Equal(t, 2, len(stores), "collected replicas from stores") require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node") require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas") require.Equal(t, totalRanges, len(replicas.Descriptors), "number of collected descriptors from metadata") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version, "collected version info from stores") } // TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow. // This test doesn't try to validate all possible test cases, but instead check that // artifacts are correctly produced and overall cluster recovery could be performed // where it would be completely broken otherwise. func TestLossOfQuorumRecovery(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() // Test cluster contains 3 nodes that we would turn into a single node // cluster using loss of quorum recovery. After it is stopped, single node // would not be able to progress, but we will apply recovery procedure and // mark on replicas on node 1 as designated survivors. After that, starting // single node should succeed. tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, }, }) tcBefore.Start(t) s := sqlutils.MakeSQLRunner(tcBefore.Conns[0]) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tcBefore.Stopper().Stop(ctx) // We use scratch range to test special case for pending update on the // descriptor which has to be cleaned up before recovery could proceed. // For that we'll ensure it is not empty and then put an intent. After // recovery, we'll check that the range is still accessible for writes as // normal. sk := tcBefore.ScratchRange(t) require.NoError(t, tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"), "failed to write value to scratch range") createIntentOnRangeDescriptor(ctx, t, tcBefore, sk) node1ID := tcBefore.Servers[0].NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tcBefore.Stopper().Stop(ctx) server1StoreDir := dir + "/store-1" replicaInfoFileName := dir + "/node-1.json" c.RunWithArgs( []string{"debug", "recover", "collect-info", "--store=" + server1StoreDir, replicaInfoFileName}) // Generate recovery plan and try to verify that plan file was generated and contains // meaningful data. This is not strictly necessary for verifying end-to-end flow, but // having assertions on generated data helps to identify which stage of pipeline broke // if test fails. planFile := dir + "/recovery-plan.json" out, err := c.RunWithCaptureArgs( []string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile, replicaInfoFileName}) require.NoError(t, err, "failed to run make-plan") require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID), "planner didn't provide correct apply instructions") require.FileExists(t, planFile, "generated plan file") planFileContent, err := os.ReadFile(planFile) require.NoError(t, err, "test infra failed, can't open created plan file") plan := loqrecoverypb.ReplicaUpdatePlan{} jsonpb := protoutil.JSONPb{} require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan), "failed to deserialize replica recovery plan") require.NotEmpty(t, plan.Updates, "resulting plan contains no updates") out, err = c.RunWithCaptureArgs( []string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir, planFile}) require.NoError(t, err, "failed to run apply plan") // Check that there were at least one mention of replica being promoted. require.Contains(t, out, "will be updated", "no replica updates were recorded") require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID), "apply plan was not executed on requested node") tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, }, }) // NB: If recovery is not performed, new cluster will just hang on startup. // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. tcAfter.Start(t) defer tcAfter.Stopper().Stop(ctx) // In the new cluster, we will still have nodes 2 and 3 remaining from the first // attempt. That would increase number of replicas on system ranges to 5 and we // would not be able to upreplicate properly. So we need to decommission old nodes // first before proceeding. adminClient := tcAfter.Server(0).GetAdminClient(t) require.NoError(t, runDecommissionNodeImpl( ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false, []roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()), "Failed to decommission removed nodes") for i := 0; i < len(tcAfter.Servers); i++ { require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(true) return nil }), "Failed to activate replication queue") } require.NoError(t, tcAfter.WaitForZoneConfigPropagation(), "Failed to ensure zone configs are propagated") require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication") for i := 0; i < len(tcAfter.Servers); i++ { require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceConsistencyQueueProcess() }), "Failed to force replicas to consistency queue") } // As a validation step we will just pick one range and get its replicas to see // if they were up-replicated to the new nodes. s = sqlutils.MakeSQLRunner(tcAfter.Conns[0]) r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1") var replicas string r.Scan(&replicas) require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery") // Validate that rangelog is updated by recovery records after cluster restarts. testutils.SucceedsSoon(t, func() error { r := s.QueryRow(t, `select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`) var recoveries int r.Scan(&recoveries) if recoveries != len(plan.Updates) { return errors.Errorf("found %d recovery events while expecting %d", recoveries, len(plan.Updates)) } return nil }) // We were using scratch range to test cleanup of pending transaction on // rangedescriptor key. We want to verify that after recovery, range is still // writable e.g. recovery succeeded. require.NoError(t, tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"), "failed to write value to scratch range after recovery") } // TestStageVersionCheck verifies that we can force plan with different internal // version onto cluster. To do this, we create a plan with internal version // above current but matching major and minor. Then we check that staging fails // and that force flag will update plan version to match local node. func TestStageVersionCheck(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() _, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() listenerReg := listenerutil.NewListenerRegistry() defer listenerReg.Close() storeReg := server.NewStickyVFSRegistry() tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgsPerNode: map[int]base.TestServerArgs{ 0: { Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ StickyVFSRegistry: storeReg, }, }, StoreSpecs: []base.StoreSpec{ {InMemory: true, StickyVFSID: "1"}, }, }, }, ReusableListenerReg: listenerReg, }) tc.Start(t) defer tc.Stopper().Stop(ctx) tc.StopServer(3) adminClient := tc.Server(0).GetAdminClient(t) v := clusterversion.ByKey(clusterversion.BinaryVersionKey) v.Internal++ // To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force // node to stage plan for verification. p := loqrecoverypb.ReplicaUpdatePlan{ PlanID: uuid.FastMakeV4(), Version: v, ClusterID: tc.Server(0).StorageClusterID().String(), DecommissionedNodeIDs: []roachpb.NodeID{4}, StaleLeaseholderNodeIDs: []roachpb.NodeID{1}, } // Attempts to stage plan with different internal version must fail. _, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{ Plan: &p, AllNodes: true, ForcePlan: false, ForceLocalInternalVersion: false, }) require.ErrorContains(t, err, "doesn't match cluster active version") // Enable "stuck upgrade bypass" to stage plan on the cluster. _, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{ Plan: &p, AllNodes: true, ForcePlan: false, ForceLocalInternalVersion: true, }) require.NoError(t, err, "force local must fix incorrect version") // Check that stored plan has version matching cluster version. ps := loqrecovery.NewPlanStore("", storeReg.Get("1")) p, ok, err := ps.LoadPlan() require.NoError(t, err, "failed to read node 0 plan") require.True(t, ok, "plan was not staged") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version, "plan version was not updated") } func createIntentOnRangeDescriptor( ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key, ) { txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1) var desc roachpb.RangeDescriptor // Pick one of the predefined split points. rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk)) if err := txn.GetProto(ctx, rdKey, &desc); err != nil { t.Fatal(err) } desc.NextReplicaID++ if err := txn.Put(ctx, rdKey, &desc); err != nil { t.Fatal(err) } // At this point the intent has been written to Pebble but this // write was not synced (only the raft log append was synced). We // need to force another sync, but we're far from the storage // layer here so the easiest thing to do is simply perform a // second write. This will force the first write to be persisted // to disk (the second write may or may not make it to disk due to // timing). desc.NextReplicaID++ if err := txn.Put(ctx, rdKey, &desc); err != nil { t.Fatal(err) } } func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() listenerReg := listenerutil.NewListenerRegistry() defer listenerReg.Close() // Test cluster contains 3 nodes that we would turn into a single node // cluster using loss of quorum recovery. To do that, we will terminate // two nodes and run recovery on remaining one. Restarting node should // bring it back to healthy (but underreplicated) state. // Note that we inject reusable listeners into all nodes to prevent tests // running in parallel from taking over ports of stopped nodes and responding // to gateway node with errors. // TODO(oleg): Make test run with 7 nodes to exercise cases where multiple // replicas survive. Current startup and allocator behaviour would make // this test flaky. sa := make(map[int]base.TestServerArgs) for i := 0; i < 3; i++ { sa[i] = base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ StickyVFSRegistry: server.NewStickyVFSRegistry(), }, }, StoreSpecs: []base.StoreSpec{ { InMemory: true, }, }, } } tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, }, ReusableListenerReg: listenerReg, ServerArgsPerNode: sa, }) tc.Start(t) s := sqlutils.MakeSQLRunner(tc.Conns[0]) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tc.Stopper().Stop(ctx) // We use scratch range to test special case for pending update on the // descriptor which has to be cleaned up before recovery could proceed. // For that we'll ensure it is not empty and then put an intent. After // recovery, we'll check that the range is still accessible for writes as // normal. sk := tc.ScratchRange(t) require.NoError(t, tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"), "failed to write value to scratch range") createIntentOnRangeDescriptor(ctx, t, tc, sk) node1ID := tc.Servers[0].NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tc.StopServer(1) tc.StopServer(2) // Generate recovery plan and try to verify that plan file was generated and contains // meaningful data. This is not strictly necessary for verifying end-to-end flow, but // having assertions on generated data helps to identify which stage of pipeline broke // if test fails. planFile := dir + "/recovery-plan.json" out, err := c.RunWithCaptureArgs( []string{ "debug", "recover", "make-plan", "--confirm=y", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), "--plan=" + planFile, }) require.NoError(t, err, "failed to run make-plan") require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID), "planner didn't provide correct apply instructions") require.FileExists(t, planFile, "generated plan file") planFileContent, err := os.ReadFile(planFile) require.NoError(t, err, "test infra failed, can't open created plan file") plan := loqrecoverypb.ReplicaUpdatePlan{} jsonpb := protoutil.JSONPb{} require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan), "failed to deserialize replica recovery plan") require.NotEmpty(t, plan.Updates, "resulting plan contains no updates") out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "apply-plan", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), "--confirm=y", planFile, }) require.NoError(t, err, "failed to run apply plan") // Check that there were at least one mention of replica being promoted. require.Contains(t, out, "updating replica", "no replica updates were recorded") require.Contains(t, out, fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID), "apply plan failed to stage on expected nodes") // Verify plan is staged on nodes out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "verify", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), planFile, }) require.NoError(t, err, "failed to run verify plan") require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet") tc.StopServer(0) // NB: If recovery is not performed, server will just hang on startup. // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. require.NoError(t, tc.RestartServer(0), "restart failed") s = sqlutils.MakeSQLRunner(tc.Conns[0]) // Verifying that post start cleanup performed node decommissioning that // prevents old nodes from rejoining. ac := tc.GetAdminClient(t, 0) testutils.SucceedsSoon(t, func() error { dr, err := ac.DecommissionStatus(ctx, &serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}}) if err != nil { return err } for _, s := range dr.Status { if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED { return errors.Newf("expecting n%d to be decommissioned", s.NodeID) } } return nil }) // Validate that rangelog is updated by recovery records after cluster restarts. testutils.SucceedsSoon(t, func() error { r := s.QueryRow(t, `select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`) var recoveries int r.Scan(&recoveries) if recoveries != len(plan.Updates) { return errors.Errorf("found %d recovery events while expecting %d", recoveries, len(plan.Updates)) } return nil }) // Verify recovery complete. out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "verify", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), planFile, }) require.NoError(t, err, "failed to run verify plan") require.Contains(t, out, "Loss of quorum recovery is complete.") // We were using scratch range to test cleanup of pending transaction on // rangedescriptor key. We want to verify that after recovery, range is still // writable e.g. recovery succeeded. require.NoError(t, tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"), "failed to write value to scratch range after recovery") // Finally split scratch range to ensure metadata ranges are recovered. _, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42})) require.NoError(t, err, "failed to split range after recovery") } func TestUpdatePlanVsClusterDiff(t *testing.T) { defer leaktest.AfterTest(t)() var empty uuid.UUID planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000") otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001") applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z") status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus { s := loqrecoverypb.NodeRecoveryStatus{ NodeID: id, } if !pending.Equal(empty) { s.PendingPlanID = &pending } if !applied.Equal(empty) { s.AppliedPlanID = &applied s.ApplyTimestamp = &applyTime } s.Error = err return s } for _, d := range []struct { name string updatedNodes []int staleLeases []int status []loqrecoverypb.NodeRecoveryStatus pending int errors int report []string }{ { name: "after staging", updatedNodes: []int{1, 2}, staleLeases: []int{3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, planID, empty, ""), status(3, planID, empty, ""), }, pending: 3, report: []string{ " plan application pending on node n1", " plan application pending on node n2", " plan application pending on node n3", }, }, { name: "partially applied", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, ""), status(3, planID, empty, ""), }, pending: 2, report: []string{ " plan application pending on node n1", " plan applied successfully on node n2", " plan application pending on node n3", }, }, { name: "fully applied", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, empty, planID, ""), status(2, empty, planID, ""), status(3, empty, planID, ""), }, report: []string{ " plan applied successfully on node n1", " plan applied successfully on node n2", " plan applied successfully on node n3", }, }, { name: "staging lost no node", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(3, planID, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application pending on node n3", " failed to find node n2 where plan must be staged", }, }, { name: "staging lost no plan", updatedNodes: []int{1, 2}, staleLeases: []int{3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, planID, empty, ""), status(3, empty, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application pending on node n2", " failed to find staged plan on node n3", }, }, { name: "partial failure", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, "found stale replica"), status(3, planID, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application failed on node n2: found stale replica", " plan application pending on node n3", }, }, { name: "no plan", status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, "found stale replica"), status(3, empty, otherPlanID, ""), }, report: []string{ " node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000", " node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica", " node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC", }, }, { name: "wrong plan", updatedNodes: []int{1, 2}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, otherPlanID, empty, ""), status(3, otherPlanID, empty, ""), }, pending: 1, errors: 2, report: []string{ " plan application pending on node n1", " unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2", " unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3", }, }, } { t.Run(d.name, func(t *testing.T) { plan := loqrecoverypb.ReplicaUpdatePlan{ PlanID: planID, } // Plan will contain single replica update for each requested node. rangeSeq := 1 for _, id := range d.updatedNodes { plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{ RangeID: roachpb.RangeID(rangeSeq), StartKey: nil, OldReplicaID: roachpb.ReplicaID(1), NewReplica: roachpb.ReplicaDescriptor{ NodeID: roachpb.NodeID(id), StoreID: roachpb.StoreID(id), ReplicaID: roachpb.ReplicaID(rangeSeq + 17), }, NextReplicaID: roachpb.ReplicaID(rangeSeq + 18), }) } for _, id := range d.staleLeases { plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id)) } diff := diffPlanWithNodeStatus(plan, d.status) require.Equal(t, d.pending, diff.pending, "number of pending changes") require.Equal(t, d.errors, diff.errors, "number of node errors") if d.report != nil { require.Equal(t, len(d.report), len(diff.report), "number of lines in diff") for i := range d.report { require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i) } } }) } } func TestTruncateKeyOutput(t *testing.T) { defer leaktest.AfterTest(t)() for _, d := range []struct { len uint result string }{ { len: 13, result: "/System/No...", }, { len: 30, result: "/System/NodeLiveness", }, { len: 3, result: "/Sy", }, { len: 4, result: "/...", }, } { t.Run("", func(t *testing.T) { helper := outputFormatHelper{ maxPrintedKeyLength: d.len, } require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix)) }) } } func TestTruncateSpanOutput(t *testing.T) { defer leaktest.AfterTest(t)() for _, d := range []struct { len uint result string }{ { len: 30, result: "/System/{NodeLiveness-Syste...", }, { len: 90, result: "/System/{NodeLiveness-SystemSpanConfigKeys}", }, { len: 3, result: "/Sy", }, { len: 4, result: "/...", }, } { t.Run("", func(t *testing.T) { helper := outputFormatHelper{ maxPrintedKeyLength: d.len, } require.Equal(t, d.result, helper.formatSpan(roachpb.Span{ Key: keys.NodeLivenessPrefix, EndKey: keys.SystemSpanConfigPrefix, })) }) } }
pkg/cli/debug_recover_loss_of_quorum_test.go
1
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.9984685778617859, 0.0697118416428566, 0.00016107082774396986, 0.00023339182371273637, 0.25255876779556274 ]
{ "id": 6, "code_window": [ "\trequire.Contains(t, out, \"will be updated\", \"no replica updates were recorded\")\n", "\trequire.Contains(t, out, fmt.Sprintf(\"Updated store(s): s%d\", node1ID),\n", "\t\t\"apply plan was not executed on requested node\")\n", "\n", "\ttcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n", "\t\tReplicationMode: base.ReplicationManual,\n", "\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n", "\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n", "\t\t},\n", "\t})\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// This logic is specific to the storage layer.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 244 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package roachpb import ( "fmt" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" "go.etcd.io/raft/v3/raftpb" ) // ReplicaSet is a set of replicas, usually the nodes/stores on which // replicas of a range are stored. type ReplicaSet struct { wrapped []ReplicaDescriptor } // TODO(aayush): Add a `Size` or `NumReplicas` method to ReplicaSet and amend // usages that call `len(replicaSet.Descriptors())` // MakeReplicaSet creates a ReplicaSet wrapper from a raw slice of individual // descriptors. func MakeReplicaSet(replicas []ReplicaDescriptor) ReplicaSet { return ReplicaSet{wrapped: replicas} } // SafeFormat implements redact.SafeFormatter. func (d ReplicaSet) SafeFormat(w redact.SafePrinter, _ rune) { for i, desc := range d.wrapped { if i > 0 { w.SafeRune(',') } w.Print(desc) } } func (d ReplicaSet) String() string { return redact.StringWithoutMarkers(d) } // Descriptors returns every replica descriptor in the set, including both voter // replicas and learner replicas. Voter replicas are ordered first in the // returned slice. func (d ReplicaSet) Descriptors() []ReplicaDescriptor { return d.wrapped } func predVoterFull(rDesc ReplicaDescriptor) bool { switch rDesc.Type { case VOTER_FULL: return true default: } return false } func predVoterFullOrIncoming(rDesc ReplicaDescriptor) bool { switch rDesc.Type { case VOTER_FULL, VOTER_INCOMING: return true default: } return false } func predVoterIncoming(rDesc ReplicaDescriptor) bool { switch rDesc.Type { case VOTER_INCOMING: return true default: } return false } func predLearner(rDesc ReplicaDescriptor) bool { return rDesc.Type == LEARNER } func predNonVoter(rDesc ReplicaDescriptor) bool { return rDesc.Type == NON_VOTER } func predVoterOrNonVoter(rDesc ReplicaDescriptor) bool { return predVoterFullOrIncoming(rDesc) || predNonVoter(rDesc) } func predVoterFullOrNonVoter(rDesc ReplicaDescriptor) bool { return predVoterFull(rDesc) || predNonVoter(rDesc) } // Voters returns a ReplicaSet of current and future voter replicas in `d`. This // means that during an atomic replication change, only the replicas that will // be voters once the change completes will be returned; "outgoing" voters will // not be returned even though they do in the current state retain their voting // rights. // // This may allocate, but it also may return the underlying slice as a // performance optimization, so it's not safe to modify the returned value. // // TODO(tbg): go through the callers and figure out the few which want a // different subset of voters. Consider renaming this method so that it's // more descriptive. func (d ReplicaSet) Voters() ReplicaSet { return d.Filter(predVoterFullOrIncoming) } // VoterDescriptors returns the descriptors of current and future voter replicas // in the set. func (d ReplicaSet) VoterDescriptors() []ReplicaDescriptor { return d.FilterToDescriptors(predVoterFullOrIncoming) } func (d ReplicaSet) containsVoterIncoming() bool { return len(d.FilterToDescriptors(predVoterIncoming)) > 0 } // LearnerDescriptors returns a slice of ReplicaDescriptors corresponding to // learner replicas in `d`. This may allocate, but it also may return the // underlying slice as a performance optimization, so it's not safe to modify // the returned value. // // A learner is a participant in a raft group that accepts messages but doesn't // vote. This means it doesn't affect raft quorum and thus doesn't affect the // fragility of the range, even if it's very far behind or many learners are // down. // // At the time of writing, learners are used in CockroachDB as an interim state // while adding a replica. A learner replica is added to the range via raft // ConfChange, a raft snapshot (of type INITIAL) is sent to catch it up, and // then a second ConfChange promotes it to a full replica. // // This means that learners are currently always expected to have a short // lifetime, approximately the time it takes to send a snapshot. // // For simplicity, CockroachDB treats learner replicas the same as voter // replicas as much as possible, but there are a few exceptions: // // - Learner replicas are not considered when calculating quorum size, and thus // do not affect the computation of which ranges are under-replicated for // upreplication/alerting/debug/etc purposes. Ditto for over-replicated. // - Learner replicas cannot become raft leaders, so we also don't allow them to // become leaseholders. As a result, DistSender and the various oracles don't // try to send them traffic. // - The raft snapshot queue tries to avoid sending snapshots to ephemeral // learners (but not to non-voting replicas, which are also etcd learners) for // reasons described below. // - Merges won't run while a learner replica is present. // // Replicas are now added in two ConfChange transactions. The first creates the // learner and the second promotes it to a voter. If the node that is // coordinating this dies in the middle, we're left with an orphaned learner. // For this reason, the replicate queue always first removes any learners it // sees before doing anything else. We could instead try to finish off the // learner snapshot and promotion, but this is more complicated and it's not yet // clear the efficiency win is worth it. // // This introduces some rare races between the replicate queue and // AdminChangeReplicas or if a range's lease is moved to a new owner while the // old leaseholder is still processing it in the replicate queue. These races // are handled by retrying if a learner disappears during the // snapshot/promotion. // // If the coordinator otherwise encounters an error while sending the learner // snapshot or promoting it (which can happen for a number of reasons, including // the node getting the learner going away), it tries to clean up after itself // by rolling back the addition of the learner. // // [*] There is another race between the learner snapshot being sent and the // raft snapshot queue happening to check the replica at the same time, also // sending it a snapshot. This is safe but wasteful, so the raft snapshot queue // won't try to send snapshots to learners if there is already a snapshot to // that range in flight. // // *However*, raft is currently pickier than the needs to be about the snapshots // it requests and it can get stuck in StateSnapshot if it doesn't receive // exactly the index it wants. As a result, for now, the raft snapshot queue // will send one if it's still needed after the learner snapshot finishes (or // times out). To make this work in a timely manner (i.e. without relying on the // replica scanner) but without blocking the raft snapshot queue, when a // snapshot is skipped, this is reported to raft as an error sending the // snapshot. This causes raft to eventually re-enqueue it in the raft snapshot // queue. All of this is quite hard to reason about, so it'd be nice to make // this go away at some point. // // Merges are blocked if either side has a learner (to avoid working out the // edge cases) but it's historically turned out to be a bad idea to get in the // way of splits, so we allow them even when some of the replicas are learners. // This orphans a learner on each side of the split (the original coordinator // will not be able to finish either of them), but the replication queue will // eventually clean them up. // // Learner replicas don't affect quorum but they do affect the system in other // ways. The most obvious way is that the leader sends them the raft traffic it // would send to any follower, consuming resources. More surprising is that once // the learner has received a snapshot, it's considered by the quota pool that // prevents the raft leader from getting too far ahead of the followers. // However, it means a slow learner can slow down regular traffic. // // For some related mega-comments, see Replica.sendSnapshot. func (d ReplicaSet) LearnerDescriptors() []ReplicaDescriptor { return d.FilterToDescriptors(predLearner) } // NonVoters returns a ReplicaSet containing only the non-voters in `d`. // Non-voting replicas are treated differently from learner replicas. Learners // are a temporary internal state used to make atomic replication changes less // disruptive to the system. Even though learners and non-voting replicas are // both etcd/raft LearnerNodes under the hood, non-voting replicas are meant to // be a user-visible state and are explicitly chosen to be placed inside certain // localities via zone configs. func (d ReplicaSet) NonVoters() ReplicaSet { return d.Filter(predNonVoter) } // NonVoterDescriptors returns the non-voting replica descriptors in the set. func (d ReplicaSet) NonVoterDescriptors() []ReplicaDescriptor { return d.FilterToDescriptors(predNonVoter) } // VoterFullAndNonVoterDescriptors returns the descriptors of // VOTER_FULL/NON_VOTER replicas in the set. This set will not contain learners // or, during an atomic replication change, incoming or outgoing voters. // Notably, this set must encapsulate all replicas of a range for a range merge // to proceed. func (d ReplicaSet) VoterFullAndNonVoterDescriptors() []ReplicaDescriptor { return d.FilterToDescriptors(predVoterFullOrNonVoter) } // VoterAndNonVoterDescriptors returns the descriptors of VOTER_FULL, // VOTER_INCOMING and NON_VOTER replicas in the set. Notably, this is the set of // replicas the DistSender will consider routing follower read requests to. func (d ReplicaSet) VoterAndNonVoterDescriptors() []ReplicaDescriptor { return d.FilterToDescriptors(predVoterOrNonVoter) } // Filter returns a ReplicaSet corresponding to the replicas for which the // supplied predicate returns true. func (d ReplicaSet) Filter(pred func(rDesc ReplicaDescriptor) bool) ReplicaSet { return MakeReplicaSet(d.FilterToDescriptors(pred)) } // FilterToDescriptors returns only the replica descriptors for which the // supplied method returns true. The memory returned may be shared with the // receiver. func (d ReplicaSet) FilterToDescriptors( pred func(rDesc ReplicaDescriptor) bool, ) []ReplicaDescriptor { // Fast path when all or none match to avoid allocations. fastpath := true out := d.wrapped for i := range d.wrapped { if pred(d.wrapped[i]) { if !fastpath { out = append(out, d.wrapped[i]) } } else { if fastpath { out = nil out = append(out, d.wrapped[:i]...) fastpath = false } } } return out } // AsProto returns the protobuf representation of these replicas, suitable for // setting the InternalReplicas field of a RangeDescriptor. When possible the // SetReplicas method of RangeDescriptor should be used instead, this is only // here for the convenience of tests. func (d ReplicaSet) AsProto() []ReplicaDescriptor { return d.wrapped } // DeepCopy returns a copy of this set of replicas. Modifications to the // returned set will not affect this one and vice-versa. func (d ReplicaSet) DeepCopy() ReplicaSet { return ReplicaSet{ wrapped: append([]ReplicaDescriptor(nil), d.wrapped...), } } // AddReplica adds the given replica to this set. func (d *ReplicaSet) AddReplica(r ReplicaDescriptor) { d.wrapped = append(d.wrapped, r) } // RemoveReplica removes the matching replica from this set. If it wasn't found // to remove, false is returned. func (d *ReplicaSet) RemoveReplica(nodeID NodeID, storeID StoreID) (ReplicaDescriptor, bool) { idx := -1 for i := range d.wrapped { if d.wrapped[i].NodeID == nodeID && d.wrapped[i].StoreID == storeID { idx = i break } } if idx == -1 { return ReplicaDescriptor{}, false } // Swap with the last element so we can simply truncate the slice. d.wrapped[idx], d.wrapped[len(d.wrapped)-1] = d.wrapped[len(d.wrapped)-1], d.wrapped[idx] removed := d.wrapped[len(d.wrapped)-1] d.wrapped = d.wrapped[:len(d.wrapped)-1] return removed, true } // InAtomicReplicationChange returns true if the descriptor is in the middle of // an atomic replication change. func (d ReplicaSet) InAtomicReplicationChange() bool { for _, rDesc := range d.wrapped { switch rDesc.Type { case VOTER_INCOMING, VOTER_OUTGOING, VOTER_DEMOTING_LEARNER, VOTER_DEMOTING_NON_VOTER: return true case VOTER_FULL, LEARNER, NON_VOTER: default: panic(fmt.Sprintf("unknown replica type %d", rDesc.Type)) } } return false } // ConfState returns the Raft configuration described by the set of replicas. func (d ReplicaSet) ConfState() raftpb.ConfState { var cs raftpb.ConfState joint := d.InAtomicReplicationChange() // The incoming config is taken verbatim from the full voters when the // config is not joint. If it is joint, slot the voters into the right // category. for _, rep := range d.wrapped { id := uint64(rep.ReplicaID) switch rep.Type { case VOTER_FULL: cs.Voters = append(cs.Voters, id) if joint { cs.VotersOutgoing = append(cs.VotersOutgoing, id) } case VOTER_INCOMING: cs.Voters = append(cs.Voters, id) case VOTER_OUTGOING: cs.VotersOutgoing = append(cs.VotersOutgoing, id) case VOTER_DEMOTING_LEARNER, VOTER_DEMOTING_NON_VOTER: cs.VotersOutgoing = append(cs.VotersOutgoing, id) cs.LearnersNext = append(cs.LearnersNext, id) case LEARNER: cs.Learners = append(cs.Learners, id) case NON_VOTER: cs.Learners = append(cs.Learners, id) default: panic(fmt.Sprintf("unknown ReplicaType %d", rep.Type)) } } return cs } // HasReplicaOnNode returns true iff the given nodeID is present in the // ReplicaSet. func (d ReplicaSet) HasReplicaOnNode(nodeID NodeID) bool { for _, rep := range d.wrapped { if rep.NodeID == nodeID { return true } } return false } // CanMakeProgress reports whether the given descriptors can make progress at // the replication layer. This is more complicated than just counting the number // of replicas due to the existence of joint quorums. func (d ReplicaSet) CanMakeProgress(liveFunc func(descriptor ReplicaDescriptor) bool) bool { return d.ReplicationStatus(liveFunc, 0 /* neededVoters */, -1 /* neededNonVoters*/).Available } // RangeStatusReport contains info about a range's replication status. Returned // by ReplicaSet.ReplicationStatus. type RangeStatusReport struct { // Available is set if the range can make progress, based on replica liveness // info passed to ReplicationStatus(). Available bool // UnderReplicated is set if the range is considered under-replicated // according to the desired replication factor and the replica liveness info // passed to ReplicationStatus. Only voting replicas are counted here. Dead // replicas are considered to be missing. UnderReplicated bool // OverReplicated is set if the range is considered over-replicated // according to the desired replication factor passed to ReplicationStatus. // Only voting replicas are counted here. Replica liveness is not // considered. // // Note that a range can be under-replicated and over-replicated at the same // time if it has many replicas, but sufficiently many of them are on dead // nodes. OverReplicated bool // {Under,Over}ReplicatedNonVoters are like their {Under,Over}Replicated // counterparts but applying only to non-voters. UnderReplicatedNonVoters, OverReplicatedNonVoters bool } // ReplicationStatus returns availability and over/under-replication // determinations for the range. // // neededVoters is the replica's desired replication for purposes of determining // over/under-replication of voters. If the caller is only interested in // availability of voting replicas, 0 can be passed in. neededNonVoters is the // counterpart for non-voting replicas but with -1 as the sentinel value (unlike // voters, it's possible to expect 0 non-voters). func (d ReplicaSet) ReplicationStatus( liveFunc func(descriptor ReplicaDescriptor) bool, neededVoters int, neededNonVoters int, ) RangeStatusReport { var res RangeStatusReport // isBoth takes two replica predicates and returns their conjunction. isBoth := func( pred1 func(rDesc ReplicaDescriptor) bool, pred2 func(rDesc ReplicaDescriptor) bool) func(ReplicaDescriptor) bool { return func(rDesc ReplicaDescriptor) bool { return pred1(rDesc) && pred2(rDesc) } } // This functions handles regular, or joint-consensus replica groups. In the // joint-consensus case, we'll independently consider the health of the // outgoing group ("old") and the incoming group ("new"). In the regular case, // the two groups will be identical. votersOldGroup := d.FilterToDescriptors(ReplicaDescriptor.IsVoterOldConfig) liveVotersOldGroup := d.FilterToDescriptors(isBoth(ReplicaDescriptor.IsVoterOldConfig, liveFunc)) n := len(votersOldGroup) // Empty groups succeed by default, to match the Raft implementation. availableOutgoingGroup := (n == 0) || (len(liveVotersOldGroup) >= n/2+1) votersNewGroup := d.FilterToDescriptors(ReplicaDescriptor.IsVoterNewConfig) liveVotersNewGroup := d.FilterToDescriptors(isBoth(ReplicaDescriptor.IsVoterNewConfig, liveFunc)) n = len(votersNewGroup) availableIncomingGroup := len(liveVotersNewGroup) >= n/2+1 res.Available = availableIncomingGroup && availableOutgoingGroup // Determine over/under-replication of voting replicas. Note that learners // don't matter. underReplicatedOldGroup := len(liveVotersOldGroup) < neededVoters underReplicatedNewGroup := len(liveVotersNewGroup) < neededVoters overReplicatedOldGroup := len(votersOldGroup) > neededVoters overReplicatedNewGroup := len(votersNewGroup) > neededVoters res.UnderReplicated = underReplicatedOldGroup || underReplicatedNewGroup res.OverReplicated = overReplicatedOldGroup || overReplicatedNewGroup if neededNonVoters == -1 { return res } nonVoters := d.FilterToDescriptors(ReplicaDescriptor.IsNonVoter) liveNonVoters := d.FilterToDescriptors(isBoth(ReplicaDescriptor.IsNonVoter, liveFunc)) res.UnderReplicatedNonVoters = len(liveNonVoters) < neededNonVoters res.OverReplicatedNonVoters = len(nonVoters) > neededNonVoters return res } // Empty returns true if `target` is an empty replication target. func Empty(target ReplicationTarget) bool { return target == ReplicationTarget{} } // ReplicationTargets returns a slice of ReplicationTargets corresponding to // each of the replicas in the set. func (d ReplicaSet) ReplicationTargets() (out []ReplicationTarget) { descs := d.Descriptors() out = make([]ReplicationTarget, len(descs)) for i := range descs { repl := &descs[i] out[i].NodeID, out[i].StoreID = repl.NodeID, repl.StoreID } return out } // Difference compares two sets of replicas, returning the replica descriptors // that were added and removed when going from one to the other. 'd' is the before // state, 'o' is the one after. func (d ReplicaSet) Difference(o ReplicaSet) (added, removed []ReplicaDescriptor) { return o.Subtract(d), d.Subtract(o) } // Subtract one sets of replicas from another. This returning the replica // descriptors that were present in the original and not the other. 'd' is the // original set of descriptors, 'o' is the other. func (d ReplicaSet) Subtract(o ReplicaSet) []ReplicaDescriptor { var repls []ReplicaDescriptor for _, repl := range d.Descriptors() { if _, found := o.GetReplicaDescriptorByID(repl.ReplicaID); !found { repls = append(repls, repl) } } return repls } // IsAddition returns true if `c` refers to a replica addition operation. func (c ReplicaChangeType) IsAddition() bool { switch c { case ADD_NON_VOTER, ADD_VOTER: return true case REMOVE_NON_VOTER, REMOVE_VOTER: return false default: panic(fmt.Sprintf("unexpected ReplicaChangeType %s", c)) } } // IsRemoval returns true if `c` refers a replica removal operation. func (c ReplicaChangeType) IsRemoval() bool { switch c { case ADD_NON_VOTER, ADD_VOTER: return false case REMOVE_NON_VOTER, REMOVE_VOTER: return true default: panic(fmt.Sprintf("unexpected ReplicaChangeType %s", c)) } } // ErrReplicaNotFound can be returned from CheckCanReceiveLease. // // See: https://github.com/cockroachdb/cockroach/issues/93163. var ErrReplicaNotFound = errors.New(`lease target replica not found in RangeDescriptor`) // ErrReplicaCannotHoldLease can be returned from CheckCanReceiveLease. // // See: https://github.com/cockroachdb/cockroach/issues/93163. var ErrReplicaCannotHoldLease = errors.New(`lease target replica cannot hold lease`) // CheckCanReceiveLease checks whether `wouldbeLeaseholder` can receive a lease. // Returns an error if the respective replica is not eligible. // // Previously, we were not allowed to enter a joint config where the // leaseholder is being removed (i.e., not a full voter). In the new version // we're allowed to enter such a joint config (if it has a VOTER_INCOMING), // but not to exit it in this state, i.e., the leaseholder must be some // kind of voter in the next new config (potentially VOTER_DEMOTING). // // It is possible (and sometimes needed) that while in the joint configuration, // the replica being removed will receive lease. This is allowed only if // a) there is a VOTER_INCOMING replica to which the lease will be trasferred // when transitioning out of the joint config, and b) the replica being removed // was the last leaseholder (as indictated by wasLastLeaseholder). The // information we use for (b) is potentially stale, but if it incorrect // the removed node either does not need to get the lease or will not be able // to get it. In particular, when we think we are the last leaseholder but we // aren't, the CAS call for extending the lease will fail (see // wasLastLeaseholder := isExtension in cmd_lease_request.go). // // An error is also returned is the replica is not part of `replDescs`. // NB: This logic should be in sync with constraint_stats_report as report // will check voter constraint violations. When changing this method, you need // to update replica filter in report to keep it correct. func CheckCanReceiveLease( wouldbeLeaseholder ReplicaDescriptor, replDescs ReplicaSet, wasLastLeaseholder bool, ) error { repDesc, ok := replDescs.GetReplicaDescriptorByID(wouldbeLeaseholder.ReplicaID) if !ok { return ErrReplicaNotFound } if !(repDesc.IsVoterNewConfig() || (repDesc.IsVoterOldConfig() && replDescs.containsVoterIncoming() && wasLastLeaseholder)) { // We allow a demoting / incoming voter to receive the lease if there's an incoming voter. // In this case, when exiting the joint config, we will transfer the lease to the incoming // voter. return ErrReplicaCannotHoldLease } return nil }
pkg/roachpb/metadata_replicas.go
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.005455330014228821, 0.0004247201723046601, 0.0001629632752155885, 0.00017402971570845693, 0.0007895907619968057 ]
{ "id": 6, "code_window": [ "\trequire.Contains(t, out, \"will be updated\", \"no replica updates were recorded\")\n", "\trequire.Contains(t, out, fmt.Sprintf(\"Updated store(s): s%d\", node1ID),\n", "\t\t\"apply plan was not executed on requested node\")\n", "\n", "\ttcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n", "\t\tReplicationMode: base.ReplicationManual,\n", "\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n", "\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n", "\t\t},\n", "\t})\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// This logic is specific to the storage layer.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 244 }
@startuml participant "client.Txn" as Txn participant TxnCoordSender participant interceptors Txn -> TxnCoordSender : Send(BatchRequest) TxnCoordSender -> interceptors : Send(BatchRequest) ... interceptors -[#red]-> TxnCoordSender : unrecoverable error! TxnCoordSender -> TxnCoordSender : "txnState = txnError" activate TxnCoordSender #red TxnCoordSender -> Txn : error ... Txn -> TxnCoordSender : Send(BatchRequest) TxnCoordSender -> Txn : error (txn trashed) @enduml
docs/tech-notes/txn_coord_sender/eunrecoverable.puml
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.00017118426330853254, 0.0001711043150862679, 0.00017102436686400324, 0.0001711043150862679, 7.994822226464748e-8 ]
{ "id": 6, "code_window": [ "\trequire.Contains(t, out, \"will be updated\", \"no replica updates were recorded\")\n", "\trequire.Contains(t, out, fmt.Sprintf(\"Updated store(s): s%d\", node1ID),\n", "\t\t\"apply plan was not executed on requested node\")\n", "\n", "\ttcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n", "\t\tReplicationMode: base.ReplicationManual,\n", "\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n", "\t\t\t0: {StoreSpecs: []base.StoreSpec{{Path: dir + \"/store-1\"}}},\n", "\t\t},\n", "\t})\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// This logic is specific to the storage layer.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 244 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package backfiller import ( "context" "time" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/backfill" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "golang.org/x/sync/errgroup" ) // NewPeriodicProgressFlusher returns a PeriodicProgressFlusher that // will flush at the given intervals. func NewPeriodicProgressFlusher( checkpointIntervalFn func() time.Duration, fractionIntervalFn func() time.Duration, ) scexec.PeriodicProgressFlusher { return &periodicProgressFlusher{ clock: timeutil.DefaultTimeSource{}, checkpointInterval: checkpointIntervalFn, fractionInterval: fractionIntervalFn, } } // NewPeriodicProgressFlusherForIndexBackfill returns a PeriodicProgressFlusher // that will flush according to the intervals defined in the cluster settings. func NewPeriodicProgressFlusherForIndexBackfill( settings *cluster.Settings, ) scexec.PeriodicProgressFlusher { return NewPeriodicProgressFlusher( func() time.Duration { return backfill.IndexBackfillCheckpointInterval.Get(&settings.SV) }, func() time.Duration { // fractionInterval is copied from the logic in existing backfill code. // TODO(ajwerner): Add a cluster setting to control this. const fractionInterval = 10 * time.Second return fractionInterval }, ) } type periodicProgressFlusher struct { clock timeutil.TimeSource checkpointInterval, fractionInterval func() time.Duration } func (p *periodicProgressFlusher) StartPeriodicUpdates( ctx context.Context, tracker scexec.BackfillerProgressFlusher, ) (stop func() error) { stopCh := make(chan struct{}) runPeriodicWrite := func( ctx context.Context, write func(context.Context) error, interval func() time.Duration, ) error { timer := p.clock.NewTimer() defer timer.Stop() for { timer.Reset(interval()) select { case <-stopCh: return nil case <-ctx.Done(): return ctx.Err() case <-timer.Ch(): timer.MarkRead() if err := write(ctx); err != nil { return err } } } } var g errgroup.Group g.Go(func() error { return runPeriodicWrite( ctx, tracker.FlushFractionCompleted, p.fractionInterval) }) g.Go(func() error { return runPeriodicWrite( ctx, tracker.FlushCheckpoint, p.checkpointInterval) }) toClose := stopCh // make the returned function idempotent return func() error { if toClose != nil { close(toClose) toClose = nil } return g.Wait() } }
pkg/sql/schemachanger/scexec/backfiller/periodic_progress_flusher.go
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.00024105736520141363, 0.00018409664335194975, 0.00016322203737217933, 0.00017535340157337487, 0.000023761176635161974 ]
{ "id": 7, "code_window": [ "\tlistenerReg := listenerutil.NewListenerRegistry()\n", "\tdefer listenerReg.Close()\n", "\n", "\tstoreReg := server.NewStickyVFSRegistry()\n", "\ttc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{\n", "\t\tReplicationMode: base.ReplicationManual,\n", "\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n", "\t\t\t0: {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// This logic is specific to the storage layer.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 335 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cli import ( "context" "fmt" "os" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/listenerutil" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) // TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection. // This is done by running three node cluster with disk backed storage, // stopping it and verifying content of collected replica info file. // This check verifies that: // // we successfully iterate requested stores, // data is written in expected location, // data contains info only about stores requested. func TestCollectInfoFromMultipleStores(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, 1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}}, 2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}}, }, }) tc.Start(t) defer tc.Stopper().Stop(ctx) // Wait up-replication. require.NoError(t, tc.WaitForFullReplication()) // Shutdown. tc.Stopper().Stop(ctx) replicaInfoFileName := dir + "/node-1.json" c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1", "--store=" + dir + "/store-2", replicaInfoFileName}) replicas, err := readReplicaInfoData([]string{replicaInfoFileName}) require.NoError(t, err, "failed to read generated replica info") stores := map[roachpb.StoreID]interface{}{} for _, r := range replicas.LocalInfo[0].Replicas { stores[r.StoreID] = struct{}{} } require.Equal(t, 2, len(stores), "collected replicas from stores") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version, "collected version info from stores") } // TestCollectInfoFromOnlineCluster verifies that given a test cluster with // one stopped node, we can collect replica info and metadata from remaining // nodes using an admin recovery call. func TestCollectInfoFromOnlineCluster(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ StoreSpecs: []base.StoreSpec{{InMemory: true}}, Insecure: true, }, }) tc.Start(t) defer tc.Stopper().Stop(ctx) require.NoError(t, tc.WaitForFullReplication()) tc.ToggleReplicateQueues(false) r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases") var totalRanges int require.NoError(t, r.Scan(&totalRanges), "failed to query range count") tc.StopServer(0) replicaInfoFileName := dir + "/all-nodes.json" c.RunWithArgs([]string{ "debug", "recover", "collect-info", "--insecure", "--host", tc.Server(2).AdvRPCAddr(), replicaInfoFileName, }) replicas, err := readReplicaInfoData([]string{replicaInfoFileName}) require.NoError(t, err, "failed to read generated replica info") stores := map[roachpb.StoreID]interface{}{} totalReplicas := 0 for _, li := range replicas.LocalInfo { for _, r := range li.Replicas { stores[r.StoreID] = struct{}{} } totalReplicas += len(li.Replicas) } require.Equal(t, 2, len(stores), "collected replicas from stores") require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node") require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas") require.Equal(t, totalRanges, len(replicas.Descriptors), "number of collected descriptors from metadata") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version, "collected version info from stores") } // TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow. // This test doesn't try to validate all possible test cases, but instead check that // artifacts are correctly produced and overall cluster recovery could be performed // where it would be completely broken otherwise. func TestLossOfQuorumRecovery(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() // Test cluster contains 3 nodes that we would turn into a single node // cluster using loss of quorum recovery. After it is stopped, single node // would not be able to progress, but we will apply recovery procedure and // mark on replicas on node 1 as designated survivors. After that, starting // single node should succeed. tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, }, }) tcBefore.Start(t) s := sqlutils.MakeSQLRunner(tcBefore.Conns[0]) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tcBefore.Stopper().Stop(ctx) // We use scratch range to test special case for pending update on the // descriptor which has to be cleaned up before recovery could proceed. // For that we'll ensure it is not empty and then put an intent. After // recovery, we'll check that the range is still accessible for writes as // normal. sk := tcBefore.ScratchRange(t) require.NoError(t, tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"), "failed to write value to scratch range") createIntentOnRangeDescriptor(ctx, t, tcBefore, sk) node1ID := tcBefore.Servers[0].NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tcBefore.Stopper().Stop(ctx) server1StoreDir := dir + "/store-1" replicaInfoFileName := dir + "/node-1.json" c.RunWithArgs( []string{"debug", "recover", "collect-info", "--store=" + server1StoreDir, replicaInfoFileName}) // Generate recovery plan and try to verify that plan file was generated and contains // meaningful data. This is not strictly necessary for verifying end-to-end flow, but // having assertions on generated data helps to identify which stage of pipeline broke // if test fails. planFile := dir + "/recovery-plan.json" out, err := c.RunWithCaptureArgs( []string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile, replicaInfoFileName}) require.NoError(t, err, "failed to run make-plan") require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID), "planner didn't provide correct apply instructions") require.FileExists(t, planFile, "generated plan file") planFileContent, err := os.ReadFile(planFile) require.NoError(t, err, "test infra failed, can't open created plan file") plan := loqrecoverypb.ReplicaUpdatePlan{} jsonpb := protoutil.JSONPb{} require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan), "failed to deserialize replica recovery plan") require.NotEmpty(t, plan.Updates, "resulting plan contains no updates") out, err = c.RunWithCaptureArgs( []string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir, planFile}) require.NoError(t, err, "failed to run apply plan") // Check that there were at least one mention of replica being promoted. require.Contains(t, out, "will be updated", "no replica updates were recorded") require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID), "apply plan was not executed on requested node") tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, }, }) // NB: If recovery is not performed, new cluster will just hang on startup. // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. tcAfter.Start(t) defer tcAfter.Stopper().Stop(ctx) // In the new cluster, we will still have nodes 2 and 3 remaining from the first // attempt. That would increase number of replicas on system ranges to 5 and we // would not be able to upreplicate properly. So we need to decommission old nodes // first before proceeding. adminClient := tcAfter.Server(0).GetAdminClient(t) require.NoError(t, runDecommissionNodeImpl( ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false, []roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()), "Failed to decommission removed nodes") for i := 0; i < len(tcAfter.Servers); i++ { require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(true) return nil }), "Failed to activate replication queue") } require.NoError(t, tcAfter.WaitForZoneConfigPropagation(), "Failed to ensure zone configs are propagated") require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication") for i := 0; i < len(tcAfter.Servers); i++ { require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceConsistencyQueueProcess() }), "Failed to force replicas to consistency queue") } // As a validation step we will just pick one range and get its replicas to see // if they were up-replicated to the new nodes. s = sqlutils.MakeSQLRunner(tcAfter.Conns[0]) r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1") var replicas string r.Scan(&replicas) require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery") // Validate that rangelog is updated by recovery records after cluster restarts. testutils.SucceedsSoon(t, func() error { r := s.QueryRow(t, `select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`) var recoveries int r.Scan(&recoveries) if recoveries != len(plan.Updates) { return errors.Errorf("found %d recovery events while expecting %d", recoveries, len(plan.Updates)) } return nil }) // We were using scratch range to test cleanup of pending transaction on // rangedescriptor key. We want to verify that after recovery, range is still // writable e.g. recovery succeeded. require.NoError(t, tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"), "failed to write value to scratch range after recovery") } // TestStageVersionCheck verifies that we can force plan with different internal // version onto cluster. To do this, we create a plan with internal version // above current but matching major and minor. Then we check that staging fails // and that force flag will update plan version to match local node. func TestStageVersionCheck(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() _, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() listenerReg := listenerutil.NewListenerRegistry() defer listenerReg.Close() storeReg := server.NewStickyVFSRegistry() tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgsPerNode: map[int]base.TestServerArgs{ 0: { Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ StickyVFSRegistry: storeReg, }, }, StoreSpecs: []base.StoreSpec{ {InMemory: true, StickyVFSID: "1"}, }, }, }, ReusableListenerReg: listenerReg, }) tc.Start(t) defer tc.Stopper().Stop(ctx) tc.StopServer(3) adminClient := tc.Server(0).GetAdminClient(t) v := clusterversion.ByKey(clusterversion.BinaryVersionKey) v.Internal++ // To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force // node to stage plan for verification. p := loqrecoverypb.ReplicaUpdatePlan{ PlanID: uuid.FastMakeV4(), Version: v, ClusterID: tc.Server(0).StorageClusterID().String(), DecommissionedNodeIDs: []roachpb.NodeID{4}, StaleLeaseholderNodeIDs: []roachpb.NodeID{1}, } // Attempts to stage plan with different internal version must fail. _, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{ Plan: &p, AllNodes: true, ForcePlan: false, ForceLocalInternalVersion: false, }) require.ErrorContains(t, err, "doesn't match cluster active version") // Enable "stuck upgrade bypass" to stage plan on the cluster. _, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{ Plan: &p, AllNodes: true, ForcePlan: false, ForceLocalInternalVersion: true, }) require.NoError(t, err, "force local must fix incorrect version") // Check that stored plan has version matching cluster version. ps := loqrecovery.NewPlanStore("", storeReg.Get("1")) p, ok, err := ps.LoadPlan() require.NoError(t, err, "failed to read node 0 plan") require.True(t, ok, "plan was not staged") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version, "plan version was not updated") } func createIntentOnRangeDescriptor( ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key, ) { txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1) var desc roachpb.RangeDescriptor // Pick one of the predefined split points. rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk)) if err := txn.GetProto(ctx, rdKey, &desc); err != nil { t.Fatal(err) } desc.NextReplicaID++ if err := txn.Put(ctx, rdKey, &desc); err != nil { t.Fatal(err) } // At this point the intent has been written to Pebble but this // write was not synced (only the raft log append was synced). We // need to force another sync, but we're far from the storage // layer here so the easiest thing to do is simply perform a // second write. This will force the first write to be persisted // to disk (the second write may or may not make it to disk due to // timing). desc.NextReplicaID++ if err := txn.Put(ctx, rdKey, &desc); err != nil { t.Fatal(err) } } func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() listenerReg := listenerutil.NewListenerRegistry() defer listenerReg.Close() // Test cluster contains 3 nodes that we would turn into a single node // cluster using loss of quorum recovery. To do that, we will terminate // two nodes and run recovery on remaining one. Restarting node should // bring it back to healthy (but underreplicated) state. // Note that we inject reusable listeners into all nodes to prevent tests // running in parallel from taking over ports of stopped nodes and responding // to gateway node with errors. // TODO(oleg): Make test run with 7 nodes to exercise cases where multiple // replicas survive. Current startup and allocator behaviour would make // this test flaky. sa := make(map[int]base.TestServerArgs) for i := 0; i < 3; i++ { sa[i] = base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ StickyVFSRegistry: server.NewStickyVFSRegistry(), }, }, StoreSpecs: []base.StoreSpec{ { InMemory: true, }, }, } } tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, }, ReusableListenerReg: listenerReg, ServerArgsPerNode: sa, }) tc.Start(t) s := sqlutils.MakeSQLRunner(tc.Conns[0]) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tc.Stopper().Stop(ctx) // We use scratch range to test special case for pending update on the // descriptor which has to be cleaned up before recovery could proceed. // For that we'll ensure it is not empty and then put an intent. After // recovery, we'll check that the range is still accessible for writes as // normal. sk := tc.ScratchRange(t) require.NoError(t, tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"), "failed to write value to scratch range") createIntentOnRangeDescriptor(ctx, t, tc, sk) node1ID := tc.Servers[0].NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tc.StopServer(1) tc.StopServer(2) // Generate recovery plan and try to verify that plan file was generated and contains // meaningful data. This is not strictly necessary for verifying end-to-end flow, but // having assertions on generated data helps to identify which stage of pipeline broke // if test fails. planFile := dir + "/recovery-plan.json" out, err := c.RunWithCaptureArgs( []string{ "debug", "recover", "make-plan", "--confirm=y", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), "--plan=" + planFile, }) require.NoError(t, err, "failed to run make-plan") require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID), "planner didn't provide correct apply instructions") require.FileExists(t, planFile, "generated plan file") planFileContent, err := os.ReadFile(planFile) require.NoError(t, err, "test infra failed, can't open created plan file") plan := loqrecoverypb.ReplicaUpdatePlan{} jsonpb := protoutil.JSONPb{} require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan), "failed to deserialize replica recovery plan") require.NotEmpty(t, plan.Updates, "resulting plan contains no updates") out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "apply-plan", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), "--confirm=y", planFile, }) require.NoError(t, err, "failed to run apply plan") // Check that there were at least one mention of replica being promoted. require.Contains(t, out, "updating replica", "no replica updates were recorded") require.Contains(t, out, fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID), "apply plan failed to stage on expected nodes") // Verify plan is staged on nodes out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "verify", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), planFile, }) require.NoError(t, err, "failed to run verify plan") require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet") tc.StopServer(0) // NB: If recovery is not performed, server will just hang on startup. // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. require.NoError(t, tc.RestartServer(0), "restart failed") s = sqlutils.MakeSQLRunner(tc.Conns[0]) // Verifying that post start cleanup performed node decommissioning that // prevents old nodes from rejoining. ac := tc.GetAdminClient(t, 0) testutils.SucceedsSoon(t, func() error { dr, err := ac.DecommissionStatus(ctx, &serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}}) if err != nil { return err } for _, s := range dr.Status { if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED { return errors.Newf("expecting n%d to be decommissioned", s.NodeID) } } return nil }) // Validate that rangelog is updated by recovery records after cluster restarts. testutils.SucceedsSoon(t, func() error { r := s.QueryRow(t, `select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`) var recoveries int r.Scan(&recoveries) if recoveries != len(plan.Updates) { return errors.Errorf("found %d recovery events while expecting %d", recoveries, len(plan.Updates)) } return nil }) // Verify recovery complete. out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "verify", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), planFile, }) require.NoError(t, err, "failed to run verify plan") require.Contains(t, out, "Loss of quorum recovery is complete.") // We were using scratch range to test cleanup of pending transaction on // rangedescriptor key. We want to verify that after recovery, range is still // writable e.g. recovery succeeded. require.NoError(t, tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"), "failed to write value to scratch range after recovery") // Finally split scratch range to ensure metadata ranges are recovered. _, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42})) require.NoError(t, err, "failed to split range after recovery") } func TestUpdatePlanVsClusterDiff(t *testing.T) { defer leaktest.AfterTest(t)() var empty uuid.UUID planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000") otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001") applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z") status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus { s := loqrecoverypb.NodeRecoveryStatus{ NodeID: id, } if !pending.Equal(empty) { s.PendingPlanID = &pending } if !applied.Equal(empty) { s.AppliedPlanID = &applied s.ApplyTimestamp = &applyTime } s.Error = err return s } for _, d := range []struct { name string updatedNodes []int staleLeases []int status []loqrecoverypb.NodeRecoveryStatus pending int errors int report []string }{ { name: "after staging", updatedNodes: []int{1, 2}, staleLeases: []int{3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, planID, empty, ""), status(3, planID, empty, ""), }, pending: 3, report: []string{ " plan application pending on node n1", " plan application pending on node n2", " plan application pending on node n3", }, }, { name: "partially applied", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, ""), status(3, planID, empty, ""), }, pending: 2, report: []string{ " plan application pending on node n1", " plan applied successfully on node n2", " plan application pending on node n3", }, }, { name: "fully applied", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, empty, planID, ""), status(2, empty, planID, ""), status(3, empty, planID, ""), }, report: []string{ " plan applied successfully on node n1", " plan applied successfully on node n2", " plan applied successfully on node n3", }, }, { name: "staging lost no node", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(3, planID, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application pending on node n3", " failed to find node n2 where plan must be staged", }, }, { name: "staging lost no plan", updatedNodes: []int{1, 2}, staleLeases: []int{3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, planID, empty, ""), status(3, empty, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application pending on node n2", " failed to find staged plan on node n3", }, }, { name: "partial failure", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, "found stale replica"), status(3, planID, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application failed on node n2: found stale replica", " plan application pending on node n3", }, }, { name: "no plan", status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, "found stale replica"), status(3, empty, otherPlanID, ""), }, report: []string{ " node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000", " node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica", " node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC", }, }, { name: "wrong plan", updatedNodes: []int{1, 2}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, otherPlanID, empty, ""), status(3, otherPlanID, empty, ""), }, pending: 1, errors: 2, report: []string{ " plan application pending on node n1", " unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2", " unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3", }, }, } { t.Run(d.name, func(t *testing.T) { plan := loqrecoverypb.ReplicaUpdatePlan{ PlanID: planID, } // Plan will contain single replica update for each requested node. rangeSeq := 1 for _, id := range d.updatedNodes { plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{ RangeID: roachpb.RangeID(rangeSeq), StartKey: nil, OldReplicaID: roachpb.ReplicaID(1), NewReplica: roachpb.ReplicaDescriptor{ NodeID: roachpb.NodeID(id), StoreID: roachpb.StoreID(id), ReplicaID: roachpb.ReplicaID(rangeSeq + 17), }, NextReplicaID: roachpb.ReplicaID(rangeSeq + 18), }) } for _, id := range d.staleLeases { plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id)) } diff := diffPlanWithNodeStatus(plan, d.status) require.Equal(t, d.pending, diff.pending, "number of pending changes") require.Equal(t, d.errors, diff.errors, "number of node errors") if d.report != nil { require.Equal(t, len(d.report), len(diff.report), "number of lines in diff") for i := range d.report { require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i) } } }) } } func TestTruncateKeyOutput(t *testing.T) { defer leaktest.AfterTest(t)() for _, d := range []struct { len uint result string }{ { len: 13, result: "/System/No...", }, { len: 30, result: "/System/NodeLiveness", }, { len: 3, result: "/Sy", }, { len: 4, result: "/...", }, } { t.Run("", func(t *testing.T) { helper := outputFormatHelper{ maxPrintedKeyLength: d.len, } require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix)) }) } } func TestTruncateSpanOutput(t *testing.T) { defer leaktest.AfterTest(t)() for _, d := range []struct { len uint result string }{ { len: 30, result: "/System/{NodeLiveness-Syste...", }, { len: 90, result: "/System/{NodeLiveness-SystemSpanConfigKeys}", }, { len: 3, result: "/Sy", }, { len: 4, result: "/...", }, } { t.Run("", func(t *testing.T) { helper := outputFormatHelper{ maxPrintedKeyLength: d.len, } require.Equal(t, d.result, helper.formatSpan(roachpb.Span{ Key: keys.NodeLivenessPrefix, EndKey: keys.SystemSpanConfigPrefix, })) }) } }
pkg/cli/debug_recover_loss_of_quorum_test.go
1
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.9984839558601379, 0.2515121102333069, 0.00016421191685367376, 0.0001773799303919077, 0.41914039850234985 ]
{ "id": 7, "code_window": [ "\tlistenerReg := listenerutil.NewListenerRegistry()\n", "\tdefer listenerReg.Close()\n", "\n", "\tstoreReg := server.NewStickyVFSRegistry()\n", "\ttc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{\n", "\t\tReplicationMode: base.ReplicationManual,\n", "\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n", "\t\t\t0: {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// This logic is specific to the storage layer.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 335 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package rowflow import ( "context" "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/errors" ) func TestOrderedSync(t *testing.T) { defer leaktest.AfterTest(t)() v := [6]rowenc.EncDatum{} for i := range v { v[i] = rowenc.DatumToEncDatum(types.Int, tree.NewDInt(tree.DInt(i))) } asc := encoding.Ascending desc := encoding.Descending testCases := []struct { sources []rowenc.EncDatumRows ordering colinfo.ColumnOrdering expected rowenc.EncDatumRows }{ { sources: []rowenc.EncDatumRows{ { {v[0], v[1], v[4]}, {v[0], v[1], v[2]}, {v[0], v[2], v[3]}, {v[1], v[1], v[3]}, }, { {v[1], v[0], v[4]}, }, { {v[0], v[0], v[0]}, {v[4], v[4], v[4]}, }, }, ordering: colinfo.ColumnOrdering{ {ColIdx: 0, Direction: asc}, {ColIdx: 1, Direction: asc}, }, expected: rowenc.EncDatumRows{ {v[0], v[0], v[0]}, {v[0], v[1], v[4]}, {v[0], v[1], v[2]}, {v[0], v[2], v[3]}, {v[1], v[0], v[4]}, {v[1], v[1], v[3]}, {v[4], v[4], v[4]}, }, }, { sources: []rowenc.EncDatumRows{ {}, { {v[1], v[0], v[4]}, }, { {v[3], v[4], v[1]}, {v[4], v[4], v[4]}, {v[3], v[2], v[0]}, }, { {v[4], v[4], v[5]}, {v[3], v[3], v[0]}, {v[0], v[0], v[0]}, }, }, ordering: colinfo.ColumnOrdering{ {ColIdx: 1, Direction: desc}, {ColIdx: 0, Direction: asc}, {ColIdx: 2, Direction: asc}, }, expected: rowenc.EncDatumRows{ {v[3], v[4], v[1]}, {v[4], v[4], v[4]}, {v[4], v[4], v[5]}, {v[3], v[3], v[0]}, {v[3], v[2], v[0]}, {v[0], v[0], v[0]}, {v[1], v[0], v[4]}, }, }, } for testIdx, c := range testCases { var sources []execinfra.RowSource for _, srcRows := range c.sources { rowBuf := distsqlutils.NewRowBuffer(types.ThreeIntCols, srcRows, distsqlutils.RowBufferArgs{}) sources = append(sources, rowBuf) } evalCtx := eval.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) src, err := makeSerialSync(c.ordering, evalCtx, sources, 0, /* serialSrcIndexExclusiveUpperBound */ nil, /* exceedsSrcIndexExclusiveUpperBoundErrorFunc */ ) if err != nil { t.Fatal(err) } src.Start(context.Background()) var retRows rowenc.EncDatumRows for { row, meta := src.Next() if meta != nil { t.Fatalf("unexpected metadata: %v", meta) } if row == nil { break } retRows = append(retRows, row) } expStr := c.expected.String(types.ThreeIntCols) retStr := retRows.String(types.ThreeIntCols) if expStr != retStr { t.Errorf("invalid results for case %d; expected:\n %s\ngot:\n %s", testIdx, expStr, retStr) } } } func TestOrderedSyncDrainBeforeNext(t *testing.T) { defer leaktest.AfterTest(t)() expectedMeta := &execinfrapb.ProducerMetadata{Err: errors.New("expected metadata")} var sources []execinfra.RowSource for i := 0; i < 4; i++ { rowBuf := distsqlutils.NewRowBuffer(types.OneIntCol, nil /* rows */, distsqlutils.RowBufferArgs{}) sources = append(sources, rowBuf) rowBuf.Push(nil, expectedMeta) } ctx := context.Background() evalCtx := eval.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(ctx) o, err := makeSerialSync(colinfo.ColumnOrdering{}, evalCtx, sources, 0, /* serialSrcIndexExclusiveUpperBound */ nil, /* exceedsSrcIndexExclusiveUpperBoundErrorFunc */ ) if err != nil { t.Fatal(err) } o.Start(ctx) // Call ConsumerDone before Next has been called. o.ConsumerDone() metasFound := 0 for { _, meta := o.Next() if meta == nil { break } if meta != expectedMeta { t.Fatalf("unexpected meta %v, expected %v", meta, expectedMeta) } metasFound++ } if metasFound != len(sources) { t.Fatalf("unexpected number of metadata items %d, expected %d", metasFound, len(sources)) } } func TestUnorderedSync(t *testing.T) { defer leaktest.AfterTest(t)() mrc := &execinfra.RowChannel{} mrc.InitWithNumSenders([]*types.T{types.Int}, 5) producerErr := make(chan error, 100) for i := 1; i <= 5; i++ { go func(i int) { for j := 1; j <= 100; j++ { a := rowenc.DatumToEncDatum(types.Int, tree.NewDInt(tree.DInt(i))) b := rowenc.DatumToEncDatum(types.Int, tree.NewDInt(tree.DInt(j))) row := rowenc.EncDatumRow{a, b} if status := mrc.Push(row, nil /* meta */); status != execinfra.NeedMoreRows { producerErr <- errors.Errorf("producer error: unexpected response: %d", status) } } mrc.ProducerDone() }(i) } var retRows rowenc.EncDatumRows for { row, meta := mrc.Next() if meta != nil { t.Fatalf("unexpected metadata: %v", meta) } if row == nil { break } retRows = append(retRows, row) } // Verify all elements. for i := 1; i <= 5; i++ { j := 1 for _, row := range retRows { if int(tree.MustBeDInt(row[0].Datum)) == i { if int(tree.MustBeDInt(row[1].Datum)) != j { t.Errorf("Expected [%d %d], got %s", i, j, row.String(types.TwoIntCols)) } j++ } } if j != 101 { t.Errorf("Missing [%d %d]", i, j) } } select { case err := <-producerErr: t.Fatal(err) default: } // Test case when one source closes with an error. mrc = &execinfra.RowChannel{} mrc.InitWithNumSenders([]*types.T{types.Int}, 5) for i := 1; i <= 5; i++ { go func(i int) { for j := 1; j <= 100; j++ { a := rowenc.DatumToEncDatum(types.Int, tree.NewDInt(tree.DInt(i))) b := rowenc.DatumToEncDatum(types.Int, tree.NewDInt(tree.DInt(j))) row := rowenc.EncDatumRow{a, b} if status := mrc.Push(row, nil /* meta */); status != execinfra.NeedMoreRows { producerErr <- errors.Errorf("producer error: unexpected response: %d", status) } } if i == 3 { err := fmt.Errorf("Test error") mrc.Push(nil /* row */, &execinfrapb.ProducerMetadata{Err: err}) } mrc.ProducerDone() }(i) } foundErr := false for { row, meta := mrc.Next() if meta != nil && meta.Err != nil { if meta.Err.Error() != "Test error" { t.Error(meta.Err) } else { foundErr = true } } if row == nil && meta == nil { break } } select { case err := <-producerErr: t.Fatal(err) default: } if !foundErr { t.Error("Did not receive expected error") } }
pkg/sql/rowflow/input_sync_test.go
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.00019482204515952617, 0.0001748493523336947, 0.00016680793487466872, 0.0001755301927914843, 0.000005030214651924325 ]
{ "id": 7, "code_window": [ "\tlistenerReg := listenerutil.NewListenerRegistry()\n", "\tdefer listenerReg.Close()\n", "\n", "\tstoreReg := server.NewStickyVFSRegistry()\n", "\ttc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{\n", "\t\tReplicationMode: base.ReplicationManual,\n", "\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n", "\t\t\t0: {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// This logic is specific to the storage layer.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 335 }
/* setup */ CREATE TABLE t (i INT PRIMARY KEY, j INT); /* test */ EXPLAIN (DDL, SHAPE) ALTER TABLE t ALTER COLUMN j SET NOT NULL; ---- Schema change plan for ALTER TABLE ‹defaultdb›.‹public›.‹t› ALTER COLUMN ‹j› SET NOT NULL; ├── execute 1 system table mutations transaction ├── validate NOT NULL constraint on column j in index t@[0] in relation t └── execute 1 system table mutations transaction
pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_column_set_not_null/alter_table_alter_column_set_not_null.explain_shape
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.0003001581353601068, 0.00023853618768043816, 0.0001769142400007695, 0.00023853618768043816, 0.00006162194767966866 ]
{ "id": 7, "code_window": [ "\tlistenerReg := listenerutil.NewListenerRegistry()\n", "\tdefer listenerReg.Close()\n", "\n", "\tstoreReg := server.NewStickyVFSRegistry()\n", "\ttc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{\n", "\t\tReplicationMode: base.ReplicationManual,\n", "\t\tServerArgsPerNode: map[int]base.TestServerArgs{\n", "\t\t\t0: {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// This logic is specific to the storage layer.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 335 }
// Copyright 2023 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. import { all, call, put, takeEvery } from "redux-saga/effects"; import { actions } from "./databaseDetails.reducer"; import { DatabaseDetailsReqParams, ErrorWithKey, getDatabaseDetails, } from "src/api"; import moment from "moment"; import { PayloadAction } from "@reduxjs/toolkit"; export function* refreshDatabaseDetailsSaga( action: PayloadAction<DatabaseDetailsReqParams>, ) { yield put(actions.request(action.payload)); } export function* requestDatabaseDetailsSaga( action: PayloadAction<DatabaseDetailsReqParams>, ): any { try { const result = yield call( getDatabaseDetails, action.payload, moment.duration(10, "m"), ); yield put( actions.received({ key: action.payload.database, databaseDetailsResponse: result, }), ); } catch (e) { const err: ErrorWithKey = { err: e, key: action.payload.database, }; yield put(actions.failed(err)); } } export function* databaseDetailsSaga() { yield all([ takeEvery(actions.refresh, refreshDatabaseDetailsSaga), takeEvery(actions.request, requestDatabaseDetailsSaga), ]); }
pkg/ui/workspaces/cluster-ui/src/store/databaseDetails/databaseDetails.saga.ts
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.0001805881183827296, 0.00017604204185772687, 0.0001728963543428108, 0.0001755116245476529, 0.0000024295204639201984 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t}\n", "\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n", "\t\tReusableListenerReg: listenerReg,\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t// This logic is specific to the storage layer.\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 464 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cli import ( "context" "fmt" "path/filepath" "strings" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" ) func TestDebugCheckStore(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() baseDir, dirCleanupFn := testutils.TempDir(t) defer dirCleanupFn() // Number of nodes. Increasing this will make the test flaky as written // because it relies on finding r1 on n1. const n = 3 clusterArgs := base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{}, } var storePaths []string for i := 0; i < n; i++ { args := base.TestServerArgs{} args.ScanMaxIdleTime = time.Millisecond args.ScanMaxIdleTime = time.Millisecond storeID := roachpb.StoreID(i + 1) path := filepath.Join(baseDir, fmt.Sprintf("s%d", storeID)) storePaths = append(storePaths, path) args.StoreSpecs = []base.StoreSpec{{Path: path}} clusterArgs.ServerArgsPerNode[i] = args } // Start the cluster, wait for full replication, stop the cluster. func() { tc := testcluster.StartTestCluster(t, n, clusterArgs) defer tc.Stopper().Stop(ctx) require.NoError(t, tc.WaitForFullReplication()) }() check := func(dir string) (string, error) { var buf strings.Builder err := checkStoreRangeStats(ctx, dir, func(args ...interface{}) { fmt.Fprintln(&buf, args...) }) return buf.String(), err } // Should not error out randomly. for _, dir := range storePaths { out, err := check(dir) require.NoError(t, err, "dir=%s\nout=%s\n", dir, out) require.Contains(t, out, "total stats", dir) } // Introduce a stats divergence on s1. func() { eng, err := storage.Open(ctx, storage.Filesystem(storePaths[0]), cluster.MakeClusterSettings(), storage.CacheSize(10<<20 /* 10 MiB */), storage.MustExist) require.NoError(t, err) defer eng.Close() sl := stateloader.Make(1) ms, err := sl.LoadMVCCStats(ctx, eng) require.NoError(t, err) ms.ContainsEstimates = 0 ms.LiveBytes++ require.NoError(t, sl.SetMVCCStats(ctx, eng, &ms)) }() // The check should now fail on s1. { const s = "stats inconsistency" out, err := check(storePaths[0]) require.Error(t, err) require.Contains(t, out, s) require.Contains(t, out, "total stats") } }
pkg/cli/debug_check_store_test.go
1
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.9686025977134705, 0.08849622309207916, 0.00016197767399717122, 0.00017430151638109237, 0.2783147990703583 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t}\n", "\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n", "\t\tReusableListenerReg: listenerReg,\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t// This logic is specific to the storage layer.\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 464 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package ts import ( "context" "fmt" "math" "unsafe" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/ts/tspb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/mon" ) // Compute the size of various structures to use when tracking memory usage. var ( sizeOfTimeSeriesData = int64(unsafe.Sizeof(roachpb.InternalTimeSeriesData{})) sizeOfSample = int64(unsafe.Sizeof(roachpb.InternalTimeSeriesSample{})) sizeOfDataPoint = int64(unsafe.Sizeof(tspb.TimeSeriesDatapoint{})) sizeOfInt32 = int64(unsafe.Sizeof(int32(0))) sizeOfUint32 = int64(unsafe.Sizeof(uint32(0))) sizeOfFloat64 = int64(unsafe.Sizeof(float64(0))) sizeOfTimestamp = int64(unsafe.Sizeof(hlc.Timestamp{})) ) // QueryMemoryOptions represents the adjustable options of a QueryMemoryContext. type QueryMemoryOptions struct { // BudgetBytes is the maximum number of bytes that should be reserved by this // query at any one time. BudgetBytes int64 // EstimatedSources is an estimate of the number of distinct sources that this // query will encounter on disk. This is needed to better estimate how much // memory a query will actually consume. EstimatedSources int64 // InterpolationLimitNanos determines the maximum gap size for which missing // values will be interpolated. By making this limit explicit, we can put a // hard limit on the timespan that needs to be read from disk to satisfy // a query. InterpolationLimitNanos int64 // If true, memory will be computed assuming the columnar layout. Columnar bool } // QueryMemoryContext encapsulates the memory-related parameters of a time // series query. These same parameters are often repeated across numerous // queries. type QueryMemoryContext struct { workerMonitor *mon.BytesMonitor resultAccount *mon.BoundAccount QueryMemoryOptions } // MakeQueryMemoryContext constructs a new query memory context from the // given parameters. func MakeQueryMemoryContext( workerMonitor, resultMonitor *mon.BytesMonitor, opts QueryMemoryOptions, ) QueryMemoryContext { resultAccount := resultMonitor.MakeBoundAccount() return QueryMemoryContext{ workerMonitor: workerMonitor, resultAccount: &resultAccount, QueryMemoryOptions: opts, } } // Close closes any resources held by the queryMemoryContext. func (qmc QueryMemoryContext) Close(ctx context.Context) { if qmc.resultAccount != nil { qmc.resultAccount.Close(ctx) } } // overflowSafeMultiply64 is a check for signed integer multiplication taken // from https://github.com/JohnCGriffin/overflow/blob/master/overflow_impl.go func overflowSafeMultiply64(a, b int64) (int64, bool) { if a == 0 || b == 0 { return 0, true } c := a * b if (c < 0) == ((a < 0) != (b < 0)) { if c/b == a { return c, true } } return c, false } // GetMaxTimespan computes the longest timespan that can be safely queried while // remaining within the given memory budget. Inputs are the resolution of data // being queried, the budget, the estimated number of sources, and the // interpolation limit being used for the query. func (qmc QueryMemoryContext) GetMaxTimespan(r Resolution) (int64, error) { slabDuration := r.SlabDuration() // Compute the size of a slab. sizeOfSlab := qmc.computeSizeOfSlab(r) // InterpolationBuffer is the number of slabs outside of the query range // needed to satisfy the interpolation limit. Extra slabs may be queried // on both sides of the target range. interpolationBufferOneSide := int64(math.Ceil(float64(qmc.InterpolationLimitNanos) / float64(slabDuration))) interpolationBuffer := interpolationBufferOneSide * 2 // If the (interpolation buffer timespan - interpolation limit) is less than // half of a slab, then it is possible for one additional slab to be queried // that would not have otherwise been queried. This can occur when the queried // timespan does not start on an even slab boundary. if (interpolationBufferOneSide*slabDuration)-qmc.InterpolationLimitNanos < slabDuration/2 { interpolationBuffer++ } // The number of slabs that can be queried safely is perSeriesMem/sizeOfSlab, // less the interpolation buffer. perSourceMem := qmc.BudgetBytes / qmc.EstimatedSources numSlabs := perSourceMem/sizeOfSlab - interpolationBuffer if numSlabs <= 0 { return 0, fmt.Errorf("insufficient memory budget to attempt query") } maxDuration, valid := overflowSafeMultiply64(numSlabs, slabDuration) if valid { return maxDuration, nil } return math.MaxInt64, nil } // GetMaxRollupSlabs returns the maximum number of rows that should be processed // at one time when rolling up the given resolution. func (qmc QueryMemoryContext) GetMaxRollupSlabs(r Resolution) int64 { // Rollup computations only occur when columnar is true. return qmc.BudgetBytes / qmc.computeSizeOfSlab(r) } // computeSizeOfSlab returns the size of a completely full data slab for the supplied // data resolution. func (qmc QueryMemoryContext) computeSizeOfSlab(r Resolution) int64 { slabDuration := r.SlabDuration() var sizeOfSlab int64 if qmc.Columnar { // Contains an Offset (int32) and Last (float64) for each sample. sizeOfColumns := (sizeOfInt32 + sizeOfFloat64) if r.IsRollup() { // Five additional float64 (First, Min, Max, Sum, Variance) and one uint32 // (count) per sample sizeOfColumns += 5*sizeOfFloat64 + sizeOfUint32 } sizeOfSlab = sizeOfTimeSeriesData + (slabDuration/r.SampleDuration())*sizeOfColumns } else { // Contains a sample structure for each sample. sizeOfSlab = sizeOfTimeSeriesData + (slabDuration/r.SampleDuration())*sizeOfSample } return sizeOfSlab }
pkg/ts/memory.go
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.0001945165276993066, 0.00017382125952281058, 0.00016420206520706415, 0.0001734789548208937, 0.000006694552666886011 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t}\n", "\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n", "\t\tReusableListenerReg: listenerReg,\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t// This logic is specific to the storage layer.\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 464 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "context" "math" "time" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/stats/bounds" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" ) type requestedStat struct { columns []descpb.ColumnID histogram bool histogramMaxBuckets uint32 name string inverted bool } // histogramSamples is the number of sample rows to be collected for histogram // construction. For larger tables, it may be beneficial to increase this number // to get a more accurate distribution. var histogramSamples = settings.RegisterIntSetting( settings.TenantWritable, "sql.stats.histogram_samples.count", "number of rows sampled for histogram construction during table statistics collection", 10000, settings.NonNegativeIntWithMaximum(math.MaxUint32), settings.WithPublic) // maxTimestampAge is the maximum allowed age of a scan timestamp during table // stats collection, used when creating statistics AS OF SYSTEM TIME. The // timestamp is advanced during long operations as needed. See TableReaderSpec. // // The lowest TTL we recommend is 10 minutes. This value must be lower than // that. var maxTimestampAge = settings.RegisterDurationSetting( settings.TenantWritable, "sql.stats.max_timestamp_age", "maximum age of timestamp during table statistics collection", 5*time.Minute, ) func (dsp *DistSQLPlanner) createAndAttachSamplers( ctx context.Context, p *PhysicalPlan, desc catalog.TableDescriptor, tableStats []*stats.TableStatistic, details jobspb.CreateStatsDetails, sampledColumnIDs []descpb.ColumnID, jobID jobspb.JobID, reqStats []requestedStat, sketchSpec, invSketchSpec []execinfrapb.SketchSpec, ) *PhysicalPlan { // Set up the samplers. sampler := &execinfrapb.SamplerSpec{ Sketches: sketchSpec, InvertedSketches: invSketchSpec, } sampler.MaxFractionIdle = details.MaxFractionIdle // For partial statistics this loop should only iterate once // since we only support one reqStat at a time. for _, s := range reqStats { if s.histogram { if count, ok := desc.HistogramSamplesCount(); ok { sampler.SampleSize = count } else { sampler.SampleSize = uint32(histogramSamples.Get(&dsp.st.SV)) } // This could be anything >= 2 to produce a histogram, but the max number // of buckets is probably also a reasonable minimum number of samples. (If // there are fewer rows than this in the table, there will be fewer // samples of course, which is fine.) sampler.MinSampleSize = s.histogramMaxBuckets } } // The sampler outputs the original columns plus a rank column, five // sketch columns, and two inverted histogram columns. outTypes := make([]*types.T, 0, len(p.GetResultTypes())+5) outTypes = append(outTypes, p.GetResultTypes()...) // An INT column for the rank of each row. outTypes = append(outTypes, types.Int) // An INT column indicating the sketch index. outTypes = append(outTypes, types.Int) // An INT column indicating the number of rows processed. outTypes = append(outTypes, types.Int) // An INT column indicating the number of rows that have a NULL in any sketch // column. outTypes = append(outTypes, types.Int) // An INT column indicating the size of the columns in this sketch. outTypes = append(outTypes, types.Int) // A BYTES column with the sketch data. outTypes = append(outTypes, types.Bytes) // An INT column indicating the inverted sketch index. outTypes = append(outTypes, types.Int) // A BYTES column with the inverted index key datum. outTypes = append(outTypes, types.Bytes) p.AddNoGroupingStage( execinfrapb.ProcessorCoreUnion{Sampler: sampler}, execinfrapb.PostProcessSpec{}, outTypes, execinfrapb.Ordering{}, ) // Estimate the expected number of rows based on existing stats in the cache. var rowsExpected uint64 if len(tableStats) > 0 { overhead := stats.AutomaticStatisticsFractionStaleRows.Get(&dsp.st.SV) if autoStatsFractionStaleRowsForTable, ok := desc.AutoStatsFractionStaleRows(); ok { overhead = autoStatsFractionStaleRowsForTable } // Convert to a signed integer first to make the linter happy. rowsExpected = uint64(int64( // The total expected number of rows is the same number that was measured // most recently, plus some overhead for possible insertions. float64(tableStats[0].RowCount) * (1 + overhead), )) } // Set up the final SampleAggregator stage. agg := &execinfrapb.SampleAggregatorSpec{ Sketches: sketchSpec, InvertedSketches: invSketchSpec, SampleSize: sampler.SampleSize, MinSampleSize: sampler.MinSampleSize, SampledColumnIDs: sampledColumnIDs, TableID: desc.GetID(), JobID: jobID, RowsExpected: rowsExpected, DeleteOtherStats: details.DeleteOtherStats, } // Plan the SampleAggregator on the gateway, unless we have a single Sampler. node := dsp.gatewaySQLInstanceID if len(p.ResultRouters) == 1 { node = p.Processors[p.ResultRouters[0]].SQLInstanceID } p.AddSingleGroupStage( ctx, node, execinfrapb.ProcessorCoreUnion{SampleAggregator: agg}, execinfrapb.PostProcessSpec{}, []*types.T{}, ) p.PlanToStreamColMap = []int{} return p } func (dsp *DistSQLPlanner) createPartialStatsPlan( ctx context.Context, planCtx *PlanningCtx, desc catalog.TableDescriptor, reqStats []requestedStat, jobID jobspb.JobID, details jobspb.CreateStatsDetails, ) (*PhysicalPlan, error) { // Currently, we limit the number of requests for partial statistics // stats at a given point in time to 1. // TODO (faizaanmadhani): Add support for multiple distinct requested // partial stats in one job. if len(reqStats) > 1 { return nil, pgerror.Newf(pgcode.FeatureNotSupported, "cannot process multiple partial statistics at once") } reqStat := reqStats[0] if len(reqStat.columns) > 1 { // TODO (faizaanmadhani): Add support for creating multi-column stats return nil, pgerror.Newf(pgcode.FeatureNotSupported, "multi-column partial statistics are not currently supported") } // Fetch all stats for the table that matches the given table descriptor. tableStats, err := planCtx.ExtendedEvalCtx.ExecCfg.TableStatsCache.GetTableStats(ctx, desc) if err != nil { return nil, err } column, err := catalog.MustFindColumnByID(desc, reqStat.columns[0]) if err != nil { return nil, err } // Calculate the column we need to scan // TODO (faizaanmadhani): Iterate through all columns in a requested stat when // when we add support for multi-column statistics. var colCfg scanColumnsConfig colCfg.wantedColumns = append(colCfg.wantedColumns, column.GetID()) // Initialize a dummy scanNode for the requested statistic. scan := scanNode{desc: desc} err = scan.initDescSpecificCol(colCfg, column) if err != nil { return nil, err } // Map the ColumnIDs to their ordinals in scan.cols // This loop should only iterate once, since we only // handle single column partial statistics. // TODO(faizaanmadhani): Add support for multi-column partial stats next var colIdxMap catalog.TableColMap for i, c := range scan.cols { colIdxMap.Set(c.GetID(), i) } var sb span.Builder sb.Init(planCtx.EvalContext(), planCtx.ExtendedEvalCtx.Codec, desc, scan.index) var stat *stats.TableStatistic var histogram []cat.HistogramBucket // Find the statistic and histogram from the newest table statistic for our // column that is not partial and not forecasted. The first one we find will // be the latest due to the newest to oldest ordering property of the cache. for _, t := range tableStats { if len(t.ColumnIDs) == 1 && column.GetID() == t.ColumnIDs[0] && !t.IsPartial() && !t.IsMerged() && !t.IsForecast() { if t.HistogramData == nil || t.HistogramData.ColumnType == nil || len(t.Histogram) == 0 { return nil, pgerror.Newf( pgcode.ObjectNotInPrerequisiteState, "the latest full statistic for column %s has no histogram", column.GetName(), ) } if colinfo.ColumnTypeIsInvertedIndexable(column.GetType()) && t.HistogramData.ColumnType.Family() == types.BytesFamily { return nil, pgerror.Newf( pgcode.ObjectNotInPrerequisiteState, "the latest full statistic histogram for column %s is an inverted index histogram", column.GetName(), ) } stat = t histogram = t.Histogram break } } if stat == nil { return nil, pgerror.Newf( pgcode.ObjectNotInPrerequisiteState, "column %s does not have a prior statistic", column.GetName()) } lowerBound, upperBound, err := bounds.GetUsingExtremesBounds(planCtx.EvalContext(), histogram) if err != nil { return nil, err } extremesSpans, err := bounds.ConstructUsingExtremesSpans(lowerBound, upperBound, scan.index) if err != nil { return nil, err } extremesPredicate := bounds.ConstructUsingExtremesPredicate(lowerBound, upperBound, column.GetName()) // Get roachpb.Spans from constraint.Spans scan.spans, err = sb.SpansFromConstraintSpan(&extremesSpans, span.NoopSplitter()) if err != nil { return nil, err } p, err := dsp.createTableReaders(ctx, planCtx, &scan) if err != nil { return nil, err } if details.AsOf != nil { val := maxTimestampAge.Get(&dsp.st.SV) for i := range p.Processors { spec := p.Processors[i].Spec.Core.TableReader spec.MaxTimestampAgeNanos = uint64(val) } } sampledColumnIDs := make([]descpb.ColumnID, len(scan.cols)) spec := execinfrapb.SketchSpec{ SketchType: execinfrapb.SketchType_HLL_PLUS_PLUS_V1, GenerateHistogram: reqStat.histogram, HistogramMaxBuckets: reqStat.histogramMaxBuckets, Columns: make([]uint32, len(reqStat.columns)), StatName: reqStat.name, PartialPredicate: extremesPredicate, FullStatisticID: stat.StatisticID, PrevLowerBound: tree.Serialize(lowerBound), } // For now, this loop should iterate only once, as we only // handle single-column partial statistics. // TODO(faizaanmadhani): Add support for multi-column partial stats next for i, colID := range reqStat.columns { colIdx, ok := colIdxMap.Get(colID) if !ok { panic("necessary column not scanned") } streamColIdx := uint32(p.PlanToStreamColMap[colIdx]) spec.Columns[i] = streamColIdx sampledColumnIDs[streamColIdx] = colID } var sketchSpec, invSketchSpec []execinfrapb.SketchSpec if reqStat.inverted { // Find the first inverted index on the first column for collecting // histograms. Although there may be more than one index, we don't // currently have a way of using more than one or deciding which one // is better. // // We do not generate multi-column stats with histograms, so there // is no need to find an index for multi-column stats here. // // TODO(mjibson): allow multiple inverted indexes on the same column // (i.e., with different configurations). See #50655. if len(reqStat.columns) == 1 { for _, index := range desc.PublicNonPrimaryIndexes() { if index.GetType() == descpb.IndexDescriptor_INVERTED && index.InvertedColumnID() == column.GetID() { spec.Index = index.IndexDesc() break } } } // Even if spec.Index is nil because there isn't an inverted index // on the requested stats column, we can still proceed. We aren't // generating histograms in that case so we don't need an index // descriptor to generate the inverted index entries. invSketchSpec = append(invSketchSpec, spec) } else { sketchSpec = append(sketchSpec, spec) } return dsp.createAndAttachSamplers( ctx, p, desc, tableStats, details, sampledColumnIDs, jobID, reqStats, sketchSpec, invSketchSpec), nil } func (dsp *DistSQLPlanner) createStatsPlan( ctx context.Context, planCtx *PlanningCtx, desc catalog.TableDescriptor, reqStats []requestedStat, jobID jobspb.JobID, details jobspb.CreateStatsDetails, ) (*PhysicalPlan, error) { if len(reqStats) == 0 { return nil, errors.New("no stats requested") } // Calculate the set of columns we need to scan. var colCfg scanColumnsConfig var tableColSet catalog.TableColSet for _, s := range reqStats { for _, c := range s.columns { if !tableColSet.Contains(c) { tableColSet.Add(c) colCfg.wantedColumns = append(colCfg.wantedColumns, c) } } } // Create the table readers; for this we initialize a dummy scanNode. scan := scanNode{desc: desc} err := scan.initDescDefaults(colCfg) if err != nil { return nil, err } var colIdxMap catalog.TableColMap for i, c := range scan.cols { colIdxMap.Set(c.GetID(), i) } var sb span.Builder sb.Init(planCtx.EvalContext(), planCtx.ExtendedEvalCtx.Codec, desc, scan.index) scan.spans, err = sb.UnconstrainedSpans() if err != nil { return nil, err } scan.isFull = true p, err := dsp.createTableReaders(ctx, planCtx, &scan) if err != nil { return nil, err } if details.AsOf != nil { // If the read is historical, set the max timestamp age. val := maxTimestampAge.Get(&dsp.st.SV) for i := range p.Processors { spec := p.Processors[i].Spec.Core.TableReader spec.MaxTimestampAgeNanos = uint64(val) } } var sketchSpecs, invSketchSpecs []execinfrapb.SketchSpec sampledColumnIDs := make([]descpb.ColumnID, len(scan.cols)) for _, s := range reqStats { spec := execinfrapb.SketchSpec{ SketchType: execinfrapb.SketchType_HLL_PLUS_PLUS_V1, GenerateHistogram: s.histogram, HistogramMaxBuckets: s.histogramMaxBuckets, Columns: make([]uint32, len(s.columns)), StatName: s.name, } for i, colID := range s.columns { colIdx, ok := colIdxMap.Get(colID) if !ok { panic("necessary column not scanned") } streamColIdx := uint32(p.PlanToStreamColMap[colIdx]) spec.Columns[i] = streamColIdx sampledColumnIDs[streamColIdx] = colID } if s.inverted { // Find the first inverted index on the first column for collecting // histograms. Although there may be more than one index, we don't // currently have a way of using more than one or deciding which one // is better. // // We do not generate multi-column stats with histograms, so there // is no need to find an index for multi-column stats here. // // TODO(mjibson): allow multiple inverted indexes on the same column // (i.e., with different configurations). See #50655. if len(s.columns) == 1 { col := s.columns[0] for _, index := range desc.PublicNonPrimaryIndexes() { if index.GetType() == descpb.IndexDescriptor_INVERTED && index.InvertedColumnID() == col { spec.Index = index.IndexDesc() break } } } // Even if spec.Index is nil because there isn't an inverted index // on the requested stats column, we can still proceed. We aren't // generating histograms in that case so we don't need an index // descriptor to generate the inverted index entries. invSketchSpecs = append(invSketchSpecs, spec) } else { sketchSpecs = append(sketchSpecs, spec) } } tableStats, err := planCtx.ExtendedEvalCtx.ExecCfg.TableStatsCache.GetTableStats(ctx, desc) if err != nil { return nil, err } return dsp.createAndAttachSamplers( ctx, p, desc, tableStats, details, sampledColumnIDs, jobID, reqStats, sketchSpecs, invSketchSpecs), nil } func (dsp *DistSQLPlanner) createPlanForCreateStats( ctx context.Context, planCtx *PlanningCtx, jobID jobspb.JobID, details jobspb.CreateStatsDetails, ) (*PhysicalPlan, error) { reqStats := make([]requestedStat, len(details.ColumnStats)) histogramCollectionEnabled := stats.HistogramClusterMode.Get(&dsp.st.SV) tableDesc := tabledesc.NewBuilder(&details.Table).BuildImmutableTable() defaultHistogramBuckets := stats.GetDefaultHistogramBuckets(&dsp.st.SV, tableDesc) for i := 0; i < len(reqStats); i++ { histogram := details.ColumnStats[i].HasHistogram && histogramCollectionEnabled var histogramMaxBuckets = defaultHistogramBuckets if details.ColumnStats[i].HistogramMaxBuckets > 0 { histogramMaxBuckets = details.ColumnStats[i].HistogramMaxBuckets } if details.ColumnStats[i].Inverted && details.UsingExtremes { return nil, pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "cannot create partial statistics on an inverted index column") } reqStats[i] = requestedStat{ columns: details.ColumnStats[i].ColumnIDs, histogram: histogram, histogramMaxBuckets: histogramMaxBuckets, name: details.Name, inverted: details.ColumnStats[i].Inverted, } } if len(reqStats) == 0 { return nil, errors.New("no stats requested") } if details.UsingExtremes { return dsp.createPartialStatsPlan(ctx, planCtx, tableDesc, reqStats, jobID, details) } return dsp.createStatsPlan(ctx, planCtx, tableDesc, reqStats, jobID, details) } func (dsp *DistSQLPlanner) planAndRunCreateStats( ctx context.Context, evalCtx *extendedEvalContext, planCtx *PlanningCtx, txn *kv.Txn, job *jobs.Job, resultWriter *RowResultWriter, ) error { ctx = logtags.AddTag(ctx, "create-stats-distsql", nil) details := job.Details().(jobspb.CreateStatsDetails) physPlan, err := dsp.createPlanForCreateStats(ctx, planCtx, job.ID(), details) if err != nil { return err } FinalizePlan(ctx, planCtx, physPlan) recv := MakeDistSQLReceiver( ctx, resultWriter, tree.DDL, evalCtx.ExecCfg.RangeDescriptorCache, txn, evalCtx.ExecCfg.Clock, evalCtx.Tracing, ) defer recv.Release() dsp.Run(ctx, planCtx, txn, physPlan, recv, evalCtx, nil /* finishedSetupFn */) return resultWriter.Err() }
pkg/sql/distsql_plan_stats.go
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.0004069443966727704, 0.00017849044525064528, 0.00016498519107699394, 0.00017524223949294537, 0.000031255276553565636 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t}\n", "\ttc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n", "\t\tReusableListenerReg: listenerReg,\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t// This logic is specific to the storage layer.\n" ], "file_path": "pkg/cli/debug_recover_loss_of_quorum_test.go", "type": "add", "edit_start_line_idx": 464 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package randgen import ( "bytes" "encoding/json" "math/rand" "regexp" "sort" "strings" "github.com/cockroachdb/cockroach/pkg/geo/geoindex" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" ) var ( // StatisticsMutator adds ALTER TABLE INJECT STATISTICS statements. StatisticsMutator MultiStatementMutation = statisticsMutator // ForeignKeyMutator adds ALTER TABLE ADD FOREIGN KEY statements. ForeignKeyMutator MultiStatementMutation = foreignKeyMutator // ColumnFamilyMutator modifies a CREATE TABLE statement without any FAMILY // definitions to have random FAMILY definitions. ColumnFamilyMutator StatementMutator = columnFamilyMutator // IndexStoringMutator modifies the STORING clause of CREATE INDEX and // indexes in CREATE TABLE. IndexStoringMutator MultiStatementMutation = indexStoringMutator // PartialIndexMutator adds random partial index predicate expressions to // indexes. PartialIndexMutator MultiStatementMutation = partialIndexMutator // PostgresMutator modifies strings such that they execute identically // in both Postgres and Cockroach (however this mutator does not remove // features not supported by Postgres; use PostgresCreateTableMutator // for those). PostgresMutator StatementStringMutator = postgresMutator // PostgresCreateTableMutator modifies CREATE TABLE statements to // remove any features not supported by Postgres that would change // results (like descending primary keys). This should be used on the // output of sqlbase.RandCreateTable. PostgresCreateTableMutator MultiStatementMutation = postgresCreateTableMutator ) var ( // These are used in pkg/compose/compare/compare/compare_test.go, but // it has a build tag so it's not detected by the linter. _ = IndexStoringMutator _ = PostgresCreateTableMutator ) // StatementMutator defines a func that can change a statement. type StatementMutator func(rng *rand.Rand, stmt tree.Statement) (changed bool) // MultiStatementMutation defines a func that can return a list of new and/or mutated statements. type MultiStatementMutation func(rng *rand.Rand, stmts []tree.Statement) (mutated []tree.Statement, changed bool) // Mutator defines a method that can mutate or add SQL statements. type Mutator interface { Mutate(rng *rand.Rand, stmts []tree.Statement) (mutated []tree.Statement, changed bool) } // Mutate implements the Mutator interface. func (sm StatementMutator) Mutate( rng *rand.Rand, stmts []tree.Statement, ) (mutated []tree.Statement, changed bool) { for _, stmt := range stmts { sc := sm(rng, stmt) changed = changed || sc } return stmts, changed } // Mutate implements the Mutator interface. func (msm MultiStatementMutation) Mutate( rng *rand.Rand, stmts []tree.Statement, ) (mutated []tree.Statement, changed bool) { return msm(rng, stmts) } // Apply executes all mutators on stmts. It returns the (possibly mutated and // changed in place) statements and a boolean indicating whether any changes // were made. func Apply( rng *rand.Rand, stmts []tree.Statement, mutators ...Mutator, ) (mutated []tree.Statement, changed bool) { var mc bool for _, m := range mutators { stmts, mc = m.Mutate(rng, stmts) changed = changed || mc } return stmts, changed } // StringMutator defines a mutator that works on strings. type StringMutator interface { MutateString(*rand.Rand, string) (mutated string, changed bool) } // StatementStringMutator defines a func that mutates a string. type StatementStringMutator func(*rand.Rand, string) string // Mutate implements the Mutator interface. func (sm StatementStringMutator) Mutate( rng *rand.Rand, stmts []tree.Statement, ) (mutated []tree.Statement, changed bool) { panic("can only be used with MutateString") } // MutateString implements the StringMutator interface. func (sm StatementStringMutator) MutateString( rng *rand.Rand, q string, ) (mutated string, changed bool) { newq := sm(rng, q) return newq, newq != q } // ApplyString executes all mutators on input. A mutator can also be a // StringMutator which will operate after all other mutators. func ApplyString(rng *rand.Rand, input string, mutators ...Mutator) (output string, changed bool) { parsed, err := parser.Parse(input) if err != nil { return input, false } stmts := make([]tree.Statement, len(parsed)) for i, p := range parsed { stmts[i] = p.AST } var normalMutators []Mutator var stringMutators []StringMutator for _, m := range mutators { if sm, ok := m.(StringMutator); ok { stringMutators = append(stringMutators, sm) } else { normalMutators = append(normalMutators, m) } } stmts, changed = Apply(rng, stmts, normalMutators...) if changed { var sb strings.Builder for _, s := range stmts { sb.WriteString(tree.Serialize(s)) sb.WriteString(";\n") } input = sb.String() } for _, m := range stringMutators { s, ch := m.MutateString(rng, input) if ch { input = s changed = true } } return input, changed } // randNonNegInt returns a random non-negative integer. It attempts to // distribute it over powers of 10. func randNonNegInt(rng *rand.Rand) int64 { var v int64 if n := rng.Intn(20); n == 0 { // v == 0 } else if n <= 10 { v = rng.Int63n(10) + 1 for i := 0; i < n; i++ { v *= 10 } } else { v = rng.Int63() } return v } func statisticsMutator( rng *rand.Rand, stmts []tree.Statement, ) (mutated []tree.Statement, changed bool) { for _, stmt := range stmts { create, ok := stmt.(*tree.CreateTable) if !ok { continue } alter := &tree.AlterTable{ Table: create.Table.ToUnresolvedObjectName(), } rowCount := randNonNegInt(rng) cols := map[tree.Name]*tree.ColumnTableDef{} colStats := map[tree.Name]*stats.JSONStatistic{} makeHistogram := func(col *tree.ColumnTableDef) { // If an index appeared before a column definition, col // can be nil. if col == nil { return } // Do not create a histogram 20% of the time. if rng.Intn(5) == 0 { return } colType := tree.MustBeStaticallyKnownType(col.Type) if colType.Family() == types.CollatedStringFamily { // Collated strings are not roundtrippable during // encoding/decoding, so we cannot always make a valid // histogram. return } h := randHistogram(rng, colType) stat := colStats[col.Name] if err := stat.SetHistogram(&h); err != nil { panic(err) } } for _, def := range create.Defs { switch def := def.(type) { case *tree.ColumnTableDef: var nullCount, distinctCount, avgSize uint64 if rowCount > 0 { if def.Nullable.Nullability != tree.NotNull { nullCount = uint64(rng.Int63n(rowCount)) } distinctCount = uint64(rng.Int63n(rowCount)) avgSize = uint64(rng.Int63n(32)) } cols[def.Name] = def colStats[def.Name] = &stats.JSONStatistic{ Name: "__auto__", CreatedAt: "2000-01-01 00:00:00+00:00", RowCount: uint64(rowCount), Columns: []string{def.Name.String()}, DistinctCount: distinctCount, NullCount: nullCount, AvgSize: avgSize, } if (def.Unique.IsUnique && !def.Unique.WithoutIndex) || def.PrimaryKey.IsPrimaryKey { makeHistogram(def) } case *tree.IndexTableDef: // TODO(mgartner): We should make a histogram for each indexed // column. makeHistogram(cols[def.Columns[0].Column]) case *tree.UniqueConstraintTableDef: if !def.WithoutIndex { // TODO(mgartner): We should make a histogram for each // column in the unique constraint. makeHistogram(cols[def.Columns[0].Column]) } } } if len(colStats) > 0 { var allStats []*stats.JSONStatistic for _, cs := range colStats { allStats = append(allStats, cs) } b, err := json.Marshal(allStats) if err != nil { // Should not happen. panic(err) } j, err := tree.ParseDJSON(string(b)) if err != nil { panic(err) } alter.Cmds = append(alter.Cmds, &tree.AlterTableInjectStats{ Stats: j, }) stmts = append(stmts, alter) changed = true } } return stmts, changed } // randHistogram generates a histogram for the given type with random histogram // buckets. If colType is inverted indexable then the histogram bucket upper // bounds are byte-encoded inverted index keys. func randHistogram(rng *rand.Rand, colType *types.T) stats.HistogramData { histogramColType := colType if colinfo.ColumnTypeIsOnlyInvertedIndexable(colType) { histogramColType = types.Bytes } h := stats.HistogramData{ ColumnType: histogramColType, } // Generate random values for histogram bucket upper bounds. var encodedUpperBounds [][]byte for i, numDatums := 0, rng.Intn(10); i < numDatums; i++ { upper := RandDatum(rng, colType, false /* nullOk */) if colinfo.ColumnTypeIsOnlyInvertedIndexable(colType) { encs := encodeInvertedIndexHistogramUpperBounds(colType, upper) encodedUpperBounds = append(encodedUpperBounds, encs...) } else { enc, err := keyside.Encode(nil, upper, encoding.Ascending) if err != nil { panic(err) } encodedUpperBounds = append(encodedUpperBounds, enc) } } // Return early if there are no upper-bounds. if len(encodedUpperBounds) == 0 { return h } // Sort the encoded upper-bounds. sort.Slice(encodedUpperBounds, func(i, j int) bool { return bytes.Compare(encodedUpperBounds[i], encodedUpperBounds[j]) < 0 }) // Remove duplicates. dedupIdx := 1 for i := 1; i < len(encodedUpperBounds); i++ { if !bytes.Equal(encodedUpperBounds[i], encodedUpperBounds[i-1]) { encodedUpperBounds[dedupIdx] = encodedUpperBounds[i] dedupIdx++ } } encodedUpperBounds = encodedUpperBounds[:dedupIdx] // Create a histogram bucket for each encoded upper-bound. for i := range encodedUpperBounds { // The first bucket must have NumRange = 0, and thus DistinctRange = 0 // as well. var numRange int64 var distinctRange float64 if i > 0 { numRange, distinctRange = randNumRangeAndDistinctRange(rng) } h.Buckets = append(h.Buckets, stats.HistogramData_Bucket{ NumEq: randNonNegInt(rng), NumRange: numRange, DistinctRange: distinctRange, UpperBound: encodedUpperBounds[i], }) } return h } // encodeInvertedIndexHistogramUpperBounds returns a slice of byte-encoded // inverted index keys that are created from val. func encodeInvertedIndexHistogramUpperBounds(colType *types.T, val tree.Datum) (encs [][]byte) { var keys [][]byte var err error switch colType.Family() { case types.GeometryFamily: keys, err = rowenc.EncodeGeoInvertedIndexTableKeys(val, nil, *geoindex.DefaultGeometryIndexConfig()) case types.GeographyFamily: keys, err = rowenc.EncodeGeoInvertedIndexTableKeys(val, nil, *geoindex.DefaultGeographyIndexConfig()) default: keys, err = rowenc.EncodeInvertedIndexTableKeys(val, nil, descpb.LatestIndexDescriptorVersion) } if err != nil { panic(err) } var da tree.DatumAlloc for i := range keys { // Each key much be a byte-encoded datum so that it can be // decoded in JSONStatistic.SetHistogram. enc, err := keyside.Encode(nil, da.NewDBytes(tree.DBytes(keys[i])), encoding.Ascending) if err != nil { panic(err) } encs = append(encs, enc) } return encs } // randNumRangeAndDistinctRange returns two random numbers to be used for // NumRange and DistinctRange fields of a histogram bucket. func randNumRangeAndDistinctRange(rng *rand.Rand) (numRange int64, distinctRange float64) { numRange = randNonNegInt(rng) // distinctRange should be <= numRange. switch rng.Intn(3) { case 0: distinctRange = 0 case 1: distinctRange = float64(numRange) default: distinctRange = rng.Float64() * float64(numRange) } return numRange, distinctRange } // foreignKeyMutator is a MultiStatementMutation implementation which adds // foreign key references between existing columns. func foreignKeyMutator( rng *rand.Rand, stmts []tree.Statement, ) (mutated []tree.Statement, changed bool) { // Find columns in the tables. cols := map[tree.TableName][]*tree.ColumnTableDef{} byName := map[tree.TableName]*tree.CreateTable{} // Keep track of referencing columns since we have a limitation that a // column can only be used by one FK. usedCols := map[tree.TableName]map[tree.Name]bool{} // Keep track of table dependencies to prevent circular dependencies. dependsOn := map[tree.TableName]map[tree.TableName]bool{} var tables []*tree.CreateTable for _, stmt := range stmts { table, ok := stmt.(*tree.CreateTable) if !ok { continue } // Skip partitioned tables, since using foreign keys results in in-between // filters not yielding a constraint. // TODO(harding): Allow foreign keys on partitioned tables. var skip bool for _, def := range table.Defs { switch def := def.(type) { case *tree.IndexTableDef: if def.PartitionByIndex != nil { skip = true break } case *tree.UniqueConstraintTableDef: if def.IndexTableDef.PartitionByIndex != nil { skip = true break } } } if skip { continue } tables = append(tables, table) byName[table.Table] = table usedCols[table.Table] = map[tree.Name]bool{} dependsOn[table.Table] = map[tree.TableName]bool{} for _, def := range table.Defs { switch def := def.(type) { case *tree.ColumnTableDef: cols[table.Table] = append(cols[table.Table], def) } } } if len(tables) == 0 { return stmts, false } toNames := func(cols []*tree.ColumnTableDef) tree.NameList { names := make(tree.NameList, len(cols)) for i, c := range cols { names[i] = c.Name } return names } // We cannot mutate the table definitions themselves because 1) we // don't know the order of dependencies (i.e., table 1 could reference // table 4 which doesn't exist yet) and relatedly 2) we don't prevent // circular dependencies. Instead, add new ALTER TABLE commands to the // end of a list of statements. // Create some FKs. for rng.Intn(2) == 0 { // Choose a random table. table := tables[rng.Intn(len(tables))] // Choose a random column subset. var fkCols []*tree.ColumnTableDef for _, c := range cols[table.Table] { if c.Computed.Computed { // We don't support FK references from computed columns (#46672). continue } if usedCols[table.Table][c.Name] { continue } fkCols = append(fkCols, c) } if len(fkCols) == 0 { continue } rng.Shuffle(len(fkCols), func(i, j int) { fkCols[i], fkCols[j] = fkCols[j], fkCols[i] }) // Pick some randomly short prefix. I'm sure there's a closed // form solution to this with a single call to rng.Intn but I'm // not sure what to search for. i := 1 for len(fkCols) > i && rng.Intn(2) == 0 { i++ } fkCols = fkCols[:i] // Check if a table has the needed column types. LoopTable: for refTable, refCols := range cols { // Prevent circular and self references because // generating valid INSERTs could become impossible or // difficult algorithmically. if refTable == table.Table || len(refCols) < len(fkCols) { continue } { // To prevent circular references, find all transitive // dependencies of refTable and make sure none of them // are table. stack := []tree.TableName{refTable} for i := 0; i < len(stack); i++ { curTable := stack[i] if curTable == table.Table { // table was trying to add a dependency // to refTable, but refTable already // depends on table (directly or // indirectly). continue LoopTable } for t := range dependsOn[curTable] { stack = append(stack, t) } } } // We found a table with enough columns. Check if it // has some columns that are needed types. In order // to not use columns multiple times, keep track of // available columns. availCols := append([]*tree.ColumnTableDef(nil), refCols...) var usingCols []*tree.ColumnTableDef for len(availCols) > 0 && len(usingCols) < len(fkCols) { fkCol := fkCols[len(usingCols)] found := false for refI, refCol := range availCols { if refCol.Computed.Virtual { // We don't support FK references to virtual columns (#51296). continue } fkColType := tree.MustBeStaticallyKnownType(fkCol.Type) refColType := tree.MustBeStaticallyKnownType(refCol.Type) if fkColType.Equivalent(refColType) && colinfo.ColumnTypeIsIndexable(refColType) { usingCols = append(usingCols, refCol) availCols = append(availCols[:refI], availCols[refI+1:]...) found = true break } } if !found { continue LoopTable } } // If we didn't find enough columns, try another table. if len(usingCols) != len(fkCols) { continue } // Found a suitable table. // TODO(mjibson): prevent the creation of unneeded // unique indexes. One may already exist with the // correct prefix. ref := byName[refTable] refColumns := make(tree.IndexElemList, len(usingCols)) for i, c := range usingCols { refColumns[i].Column = c.Name } for _, c := range fkCols { usedCols[table.Table][c.Name] = true } dependsOn[table.Table][ref.Table] = true ref.Defs = append(ref.Defs, &tree.UniqueConstraintTableDef{ IndexTableDef: tree.IndexTableDef{ Columns: refColumns, }, }) match := tree.MatchSimple // TODO(mjibson): Set match once #42498 is fixed. var actions tree.ReferenceActions if rng.Intn(2) == 0 { actions.Delete = randAction(rng, table) } if rng.Intn(2) == 0 { actions.Update = randAction(rng, table) } stmts = append(stmts, &tree.AlterTable{ Table: table.Table.ToUnresolvedObjectName(), Cmds: tree.AlterTableCmds{&tree.AlterTableAddConstraint{ ConstraintDef: &tree.ForeignKeyConstraintTableDef{ Table: ref.Table, FromCols: toNames(fkCols), ToCols: toNames(usingCols), Actions: actions, Match: match, }, }}, }) changed = true break } } return stmts, changed } func randAction(rng *rand.Rand, table *tree.CreateTable) tree.ReferenceAction { const highestAction = tree.Cascade // Find a valid action. Depending on the random action chosen, we have // to verify some validity conditions. Loop: for { action := tree.ReferenceAction(rng.Intn(int(highestAction + 1))) for _, def := range table.Defs { col, ok := def.(*tree.ColumnTableDef) if !ok { continue } switch action { case tree.SetNull: if col.Nullable.Nullability == tree.NotNull { continue Loop } case tree.SetDefault: if col.DefaultExpr.Expr == nil && col.Nullable.Nullability == tree.NotNull { continue Loop } } } return action } } var postgresMutatorAtIndex = regexp.MustCompile(`@[\[\]\w]+`) func postgresMutator(rng *rand.Rand, q string) string { q, _ = ApplyString(rng, q, postgresStatementMutator) for from, to := range map[string]string{ ":::": "::", "STRING": "TEXT", "BYTES": "BYTEA", "STORING": "INCLUDE", " AS (": " GENERATED ALWAYS AS (", ",)": ")", } { q = strings.Replace(q, from, to, -1) } q = postgresMutatorAtIndex.ReplaceAllString(q, "") return q } // postgresStatementMutator removes cockroach-only things from CREATE TABLE and // ALTER TABLE. var postgresStatementMutator MultiStatementMutation = func(rng *rand.Rand, stmts []tree.Statement) (mutated []tree.Statement, changed bool) { for _, stmt := range stmts { switch stmt := stmt.(type) { case *tree.SetClusterSetting, *tree.SetVar, *tree.AlterTenantSetClusterSetting: changed = true continue case *tree.CreateTable: if stmt.PartitionByTable != nil { stmt.PartitionByTable = nil changed = true } for i := 0; i < len(stmt.Defs); i++ { switch def := stmt.Defs[i].(type) { case *tree.FamilyTableDef: // Remove. stmt.Defs = append(stmt.Defs[:i], stmt.Defs[i+1:]...) i-- changed = true case *tree.ColumnTableDef: if def.HasColumnFamily() { def.Family.Name = "" def.Family.Create = false changed = true } if def.Unique.WithoutIndex { def.Unique.WithoutIndex = false changed = true } if def.IsVirtual() { def.Computed.Virtual = false def.Computed.Computed = true changed = true } case *tree.UniqueConstraintTableDef: if def.PartitionByIndex != nil { def.PartitionByIndex = nil changed = true } if def.WithoutIndex { def.WithoutIndex = false changed = true } } } case *tree.AlterTable: for i := 0; i < len(stmt.Cmds); i++ { // Postgres doesn't have alter stats. if _, ok := stmt.Cmds[i].(*tree.AlterTableInjectStats); ok { stmt.Cmds = append(stmt.Cmds[:i], stmt.Cmds[i+1:]...) i-- changed = true } } // If there are no commands, don't add this statement. if len(stmt.Cmds) == 0 { continue } } mutated = append(mutated, stmt) } return mutated, changed } func postgresCreateTableMutator( rng *rand.Rand, stmts []tree.Statement, ) (mutated []tree.Statement, changed bool) { for _, stmt := range stmts { mutated = append(mutated, stmt) mutatedStmt, ok := stmt.(*tree.CreateTable) if !ok { continue } // Get all the column types first. colTypes := make(map[string]*types.T) for _, def := range mutatedStmt.Defs { def, ok := def.(*tree.ColumnTableDef) if !ok { continue } colDefType := tree.MustBeStaticallyKnownType(def.Type) colTypes[string(def.Name)] = colDefType } // - Exclude `INDEX` and `UNIQUE` table defs and hoist them into separate // `CREATE INDEX` and `CREATE UNIQUE INDEX` statements because Postgres does // not support them in `CREATE TABLE` stmt. // - Erase `COLLATE locale` from column defs because Postgres only support // double-quoted locale. var newdefs tree.TableDefs for _, def := range mutatedStmt.Defs { switch def := def.(type) { case *tree.IndexTableDef: var newCols tree.IndexElemList for _, col := range def.Columns { isBox2d := false // NB: col.Column is empty for expression-based indexes. if col.Expr == nil { // Postgres doesn't support box2d as a btree index key. colTypeFamily := colTypes[string(col.Column)].Family() if colTypeFamily == types.Box2DFamily { isBox2d = true } } if isBox2d { changed = true } else { newCols = append(newCols, col) } } if len(newCols) == 0 { // Break without adding this index at all. break } def.Columns = newCols // Hoist this IndexTableDef into a separate CreateIndex. changed = true // TODO(rafi): Postgres supports inverted indexes with a different // syntax than Cockroach. Maybe we could add it later. // The syntax is `CREATE INDEX name ON table USING gin(column)`. if !def.Inverted { mutated = append(mutated, &tree.CreateIndex{ Name: def.Name, Table: mutatedStmt.Table, Inverted: def.Inverted, Columns: newCols, Storing: def.Storing, // Postgres doesn't support NotVisible Index, so NotVisible is not populated here. }) } case *tree.UniqueConstraintTableDef: var newCols tree.IndexElemList for _, col := range def.Columns { isBox2d := false // NB: col.Column is empty for expression-based indexes. if col.Expr == nil { // Postgres doesn't support box2d as a btree index key. colTypeFamily := colTypes[string(col.Column)].Family() if colTypeFamily == types.Box2DFamily { isBox2d = true } } if isBox2d { changed = true } else { newCols = append(newCols, col) } } if len(newCols) == 0 { // Break without adding this index at all. break } def.Columns = newCols if def.PrimaryKey { for i, col := range def.Columns { // Postgres doesn't support descending PKs. if col.Direction != tree.DefaultDirection { def.Columns[i].Direction = tree.DefaultDirection changed = true } } if def.Name != "" { // Unset Name here because constraint names cannot be shared among // tables, so multiple PK constraints named "primary" is an error. def.Name = "" changed = true } newdefs = append(newdefs, def) break } mutated = append(mutated, &tree.CreateIndex{ Name: def.Name, Table: mutatedStmt.Table, Unique: true, Inverted: def.Inverted, Columns: newCols, Storing: def.Storing, // Postgres doesn't support NotVisible Index, so NotVisible is not populated here. }) changed = true case *tree.ColumnTableDef: if def.Type.(*types.T).Family() == types.CollatedStringFamily { def.Type = types.String changed = true } newdefs = append(newdefs, def) default: newdefs = append(newdefs, def) } } mutatedStmt.Defs = newdefs } return mutated, changed } // columnFamilyMutator is mutations.StatementMutator, but lives here to prevent // dependency cycles with RandCreateTable. func columnFamilyMutator(rng *rand.Rand, stmt tree.Statement) (changed bool) { ast, ok := stmt.(*tree.CreateTable) if !ok { return false } var columns []tree.Name for _, def := range ast.Defs { switch def := def.(type) { case *tree.FamilyTableDef: return false case *tree.ColumnTableDef: if def.HasColumnFamily() { return false } if !def.Computed.Virtual { columns = append(columns, def.Name) } } } if len(columns) <= 1 { return false } // Any columns not specified in column families // are auto assigned to the first family, so // there's no requirement to exhaust columns here. rng.Shuffle(len(columns), func(i, j int) { columns[i], columns[j] = columns[j], columns[i] }) fd := &tree.FamilyTableDef{} for { if len(columns) == 0 { if len(fd.Columns) > 0 { ast.Defs = append(ast.Defs, fd) } break } fd.Columns = append(fd.Columns, columns[0]) columns = columns[1:] // 50% chance to make a new column family. if rng.Intn(2) != 0 { ast.Defs = append(ast.Defs, fd) fd = &tree.FamilyTableDef{} } } return true } // tableInfo is a helper struct that contains information necessary for mutating // indexes. It is used by IndexStoringMutator and PartialIndexMutator. type tableInfo struct { columnNames []tree.Name columnsTableDefs []*tree.ColumnTableDef pkCols []tree.Name refColsLists [][]tree.Name } // getTableInfoFromDDLStatements collects tableInfo from every CreateTable // and AlterTable statement in the given list of statements. func getTableInfoFromDDLStatements(stmts []tree.Statement) map[tree.Name]tableInfo { tables := make(map[tree.Name]tableInfo) for _, stmt := range stmts { switch ast := stmt.(type) { case *tree.CreateTable: info := tableInfo{} for _, def := range ast.Defs { switch ast := def.(type) { case *tree.ColumnTableDef: info.columnNames = append(info.columnNames, ast.Name) info.columnsTableDefs = append(info.columnsTableDefs, ast) if ast.PrimaryKey.IsPrimaryKey { info.pkCols = []tree.Name{ast.Name} } case *tree.UniqueConstraintTableDef: if ast.PrimaryKey { for _, elem := range ast.Columns { info.pkCols = append(info.pkCols, elem.Column) } } case *tree.ForeignKeyConstraintTableDef: // The tableInfo must have already been created, since FK constraints // can only reference tables that already exist. if refTableInfo, ok := tables[ast.Table.ObjectName]; ok { refTableInfo.refColsLists = append(refTableInfo.refColsLists, ast.ToCols) tables[ast.Table.ObjectName] = refTableInfo } } } tables[ast.Table.ObjectName] = info case *tree.AlterTable: for _, cmd := range ast.Cmds { switch alterCmd := cmd.(type) { case *tree.AlterTableAddConstraint: switch constraintDef := alterCmd.ConstraintDef.(type) { case *tree.ForeignKeyConstraintTableDef: // The tableInfo must have already been created, since ALTER // statements come after CREATE statements. if info, ok := tables[constraintDef.Table.ObjectName]; ok { info.refColsLists = append(info.refColsLists, constraintDef.ToCols) tables[constraintDef.Table.ObjectName] = info } } } } } } return tables } // indexStoringMutator is a mutations.MultiStatementMutator, but lives here to // prevent dependency cycles with RandCreateTable. func indexStoringMutator(rng *rand.Rand, stmts []tree.Statement) ([]tree.Statement, bool) { changed := false tables := getTableInfoFromDDLStatements(stmts) mapFromIndexCols := func(cols []tree.Name) map[tree.Name]struct{} { colMap := map[tree.Name]struct{}{} for _, col := range cols { colMap[col] = struct{}{} } return colMap } generateStoringCols := func(rng *rand.Rand, tableInfo tableInfo, indexCols map[tree.Name]struct{}) []tree.Name { var storingCols []tree.Name for colOrdinal, col := range tableInfo.columnNames { if _, ok := indexCols[col]; ok { // Skip PK columns and columns already in the index. continue } // Virtual columns can't be stored. if tableInfo.columnsTableDefs[colOrdinal].Computed.Virtual || // Neither can TableOID. Neither can MVCCTimestamp, but the logic to // read the columns filters that one out. tableInfo.columnsTableDefs[colOrdinal].Name == colinfo.TableOIDColumnName { continue } if rng.Intn(2) == 0 { storingCols = append(storingCols, col) } } return storingCols } for _, stmt := range stmts { switch ast := stmt.(type) { case *tree.CreateIndex: if ast.Inverted { continue } info, ok := tables[ast.Table.ObjectName] if !ok { continue } // If we don't have a storing list, make one with 50% chance. if ast.Storing == nil && rng.Intn(2) == 0 { indexCols := mapFromIndexCols(info.pkCols) for _, elem := range ast.Columns { indexCols[elem.Column] = struct{}{} } ast.Storing = generateStoringCols(rng, info, indexCols) changed = true } case *tree.CreateTable: info, ok := tables[ast.Table.ObjectName] if !ok { panic("table info could not be found") } for _, def := range ast.Defs { var idx *tree.IndexTableDef switch defType := def.(type) { case *tree.IndexTableDef: idx = defType case *tree.UniqueConstraintTableDef: if !defType.PrimaryKey && !defType.WithoutIndex { idx = &defType.IndexTableDef } } if idx == nil || idx.Inverted { continue } // If we don't have a storing list, make one with 50% chance. if idx.Storing == nil && rng.Intn(2) == 0 { indexCols := mapFromIndexCols(info.pkCols) for _, elem := range idx.Columns { indexCols[elem.Column] = struct{}{} } idx.Storing = generateStoringCols(rng, info, indexCols) changed = true } } } } return stmts, changed } // partialIndexMutator is a mutations.MultiStatementMutator, but lives here to // prevent dependency cycles with RandCreateTable. This mutator adds random // partial index predicate expressions to indexes. func partialIndexMutator(rng *rand.Rand, stmts []tree.Statement) ([]tree.Statement, bool) { changed := false tables := getTableInfoFromDDLStatements(stmts) for _, stmt := range stmts { switch ast := stmt.(type) { case *tree.CreateIndex: info, ok := tables[ast.Table.ObjectName] if !ok { continue } // If the index is not already a partial index, make it a partial index // with a 50% chance. Do not mutate an index that was created to satisfy a // FK constraint. if ast.Predicate == nil && !hasReferencingConstraint(info, ast.Columns) && rng.Intn(2) == 0 { tn := tree.MakeUnqualifiedTableName(ast.Table.ObjectName) ast.Predicate = randPartialIndexPredicateFromCols(rng, info.columnsTableDefs, &tn) changed = true } case *tree.CreateTable: info, ok := tables[ast.Table.ObjectName] if !ok { panic("table info could not be found") } for _, def := range ast.Defs { var idx *tree.IndexTableDef switch defType := def.(type) { case *tree.IndexTableDef: idx = defType case *tree.UniqueConstraintTableDef: if !defType.PrimaryKey && !defType.WithoutIndex { idx = &defType.IndexTableDef } } if idx == nil { continue } // If the index is not already a partial index, make it a partial // index with a 50% chance. if idx.Predicate == nil && !hasReferencingConstraint(info, idx.Columns) && rng.Intn(2) == 0 { tn := tree.MakeUnqualifiedTableName(ast.Table.ObjectName) idx.Predicate = randPartialIndexPredicateFromCols(rng, info.columnsTableDefs, &tn) changed = true } } } } return stmts, changed } // hasReferencingConstraint returns true if the tableInfo has any referencing // columns that match idxColumns. func hasReferencingConstraint(info tableInfo, idxColumns tree.IndexElemList) bool { RefColsLoop: for _, refCols := range info.refColsLists { if len(refCols) != len(idxColumns) { continue RefColsLoop } for i := range refCols { if refCols[i] != idxColumns[i].Column { continue RefColsLoop } } return true } return false }
pkg/sql/randgen/mutator.go
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.0005342921940609813, 0.00018013290537055582, 0.00016507015971001238, 0.00017467528232373297, 0.000040514285501558334 ]
{ "id": 9, "code_window": [ "\tdefer leaktest.AfterTest(t)()\n", "\tdefer log.Scope(t).Close(t)\n", "\n", "\tcluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{},\n", "\t})\n", "\tdefer cluster.Stopper().Stop(context.Background())\n", "\ttestConn := cluster.ServerConn(0 /* idx */)\n", "\tsqlDB := sqlutils.MakeSQLRunner(testConn)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// The zip queries include queries that are only meant to work\n", "\t\t\t// in a system tenant. These would fail if pointed to a\n", "\t\t\t// secondary tenant.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/zip_table_registry_test.go", "type": "replace", "edit_start_line_idx": 165 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cli import ( "context" "fmt" "os" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/listenerutil" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) // TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection. // This is done by running three node cluster with disk backed storage, // stopping it and verifying content of collected replica info file. // This check verifies that: // // we successfully iterate requested stores, // data is written in expected location, // data contains info only about stores requested. func TestCollectInfoFromMultipleStores(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, 1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}}, 2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}}, }, }) tc.Start(t) defer tc.Stopper().Stop(ctx) // Wait up-replication. require.NoError(t, tc.WaitForFullReplication()) // Shutdown. tc.Stopper().Stop(ctx) replicaInfoFileName := dir + "/node-1.json" c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1", "--store=" + dir + "/store-2", replicaInfoFileName}) replicas, err := readReplicaInfoData([]string{replicaInfoFileName}) require.NoError(t, err, "failed to read generated replica info") stores := map[roachpb.StoreID]interface{}{} for _, r := range replicas.LocalInfo[0].Replicas { stores[r.StoreID] = struct{}{} } require.Equal(t, 2, len(stores), "collected replicas from stores") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version, "collected version info from stores") } // TestCollectInfoFromOnlineCluster verifies that given a test cluster with // one stopped node, we can collect replica info and metadata from remaining // nodes using an admin recovery call. func TestCollectInfoFromOnlineCluster(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ StoreSpecs: []base.StoreSpec{{InMemory: true}}, Insecure: true, }, }) tc.Start(t) defer tc.Stopper().Stop(ctx) require.NoError(t, tc.WaitForFullReplication()) tc.ToggleReplicateQueues(false) r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases") var totalRanges int require.NoError(t, r.Scan(&totalRanges), "failed to query range count") tc.StopServer(0) replicaInfoFileName := dir + "/all-nodes.json" c.RunWithArgs([]string{ "debug", "recover", "collect-info", "--insecure", "--host", tc.Server(2).AdvRPCAddr(), replicaInfoFileName, }) replicas, err := readReplicaInfoData([]string{replicaInfoFileName}) require.NoError(t, err, "failed to read generated replica info") stores := map[roachpb.StoreID]interface{}{} totalReplicas := 0 for _, li := range replicas.LocalInfo { for _, r := range li.Replicas { stores[r.StoreID] = struct{}{} } totalReplicas += len(li.Replicas) } require.Equal(t, 2, len(stores), "collected replicas from stores") require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node") require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas") require.Equal(t, totalRanges, len(replicas.Descriptors), "number of collected descriptors from metadata") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version, "collected version info from stores") } // TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow. // This test doesn't try to validate all possible test cases, but instead check that // artifacts are correctly produced and overall cluster recovery could be performed // where it would be completely broken otherwise. func TestLossOfQuorumRecovery(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() // Test cluster contains 3 nodes that we would turn into a single node // cluster using loss of quorum recovery. After it is stopped, single node // would not be able to progress, but we will apply recovery procedure and // mark on replicas on node 1 as designated survivors. After that, starting // single node should succeed. tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, }, }) tcBefore.Start(t) s := sqlutils.MakeSQLRunner(tcBefore.Conns[0]) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tcBefore.Stopper().Stop(ctx) // We use scratch range to test special case for pending update on the // descriptor which has to be cleaned up before recovery could proceed. // For that we'll ensure it is not empty and then put an intent. After // recovery, we'll check that the range is still accessible for writes as // normal. sk := tcBefore.ScratchRange(t) require.NoError(t, tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"), "failed to write value to scratch range") createIntentOnRangeDescriptor(ctx, t, tcBefore, sk) node1ID := tcBefore.Servers[0].NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tcBefore.Stopper().Stop(ctx) server1StoreDir := dir + "/store-1" replicaInfoFileName := dir + "/node-1.json" c.RunWithArgs( []string{"debug", "recover", "collect-info", "--store=" + server1StoreDir, replicaInfoFileName}) // Generate recovery plan and try to verify that plan file was generated and contains // meaningful data. This is not strictly necessary for verifying end-to-end flow, but // having assertions on generated data helps to identify which stage of pipeline broke // if test fails. planFile := dir + "/recovery-plan.json" out, err := c.RunWithCaptureArgs( []string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile, replicaInfoFileName}) require.NoError(t, err, "failed to run make-plan") require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID), "planner didn't provide correct apply instructions") require.FileExists(t, planFile, "generated plan file") planFileContent, err := os.ReadFile(planFile) require.NoError(t, err, "test infra failed, can't open created plan file") plan := loqrecoverypb.ReplicaUpdatePlan{} jsonpb := protoutil.JSONPb{} require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan), "failed to deserialize replica recovery plan") require.NotEmpty(t, plan.Updates, "resulting plan contains no updates") out, err = c.RunWithCaptureArgs( []string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir, planFile}) require.NoError(t, err, "failed to run apply plan") // Check that there were at least one mention of replica being promoted. require.Contains(t, out, "will be updated", "no replica updates were recorded") require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID), "apply plan was not executed on requested node") tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, }, }) // NB: If recovery is not performed, new cluster will just hang on startup. // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. tcAfter.Start(t) defer tcAfter.Stopper().Stop(ctx) // In the new cluster, we will still have nodes 2 and 3 remaining from the first // attempt. That would increase number of replicas on system ranges to 5 and we // would not be able to upreplicate properly. So we need to decommission old nodes // first before proceeding. adminClient := tcAfter.Server(0).GetAdminClient(t) require.NoError(t, runDecommissionNodeImpl( ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false, []roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()), "Failed to decommission removed nodes") for i := 0; i < len(tcAfter.Servers); i++ { require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(true) return nil }), "Failed to activate replication queue") } require.NoError(t, tcAfter.WaitForZoneConfigPropagation(), "Failed to ensure zone configs are propagated") require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication") for i := 0; i < len(tcAfter.Servers); i++ { require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceConsistencyQueueProcess() }), "Failed to force replicas to consistency queue") } // As a validation step we will just pick one range and get its replicas to see // if they were up-replicated to the new nodes. s = sqlutils.MakeSQLRunner(tcAfter.Conns[0]) r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1") var replicas string r.Scan(&replicas) require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery") // Validate that rangelog is updated by recovery records after cluster restarts. testutils.SucceedsSoon(t, func() error { r := s.QueryRow(t, `select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`) var recoveries int r.Scan(&recoveries) if recoveries != len(plan.Updates) { return errors.Errorf("found %d recovery events while expecting %d", recoveries, len(plan.Updates)) } return nil }) // We were using scratch range to test cleanup of pending transaction on // rangedescriptor key. We want to verify that after recovery, range is still // writable e.g. recovery succeeded. require.NoError(t, tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"), "failed to write value to scratch range after recovery") } // TestStageVersionCheck verifies that we can force plan with different internal // version onto cluster. To do this, we create a plan with internal version // above current but matching major and minor. Then we check that staging fails // and that force flag will update plan version to match local node. func TestStageVersionCheck(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() _, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() listenerReg := listenerutil.NewListenerRegistry() defer listenerReg.Close() storeReg := server.NewStickyVFSRegistry() tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgsPerNode: map[int]base.TestServerArgs{ 0: { Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ StickyVFSRegistry: storeReg, }, }, StoreSpecs: []base.StoreSpec{ {InMemory: true, StickyVFSID: "1"}, }, }, }, ReusableListenerReg: listenerReg, }) tc.Start(t) defer tc.Stopper().Stop(ctx) tc.StopServer(3) adminClient := tc.Server(0).GetAdminClient(t) v := clusterversion.ByKey(clusterversion.BinaryVersionKey) v.Internal++ // To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force // node to stage plan for verification. p := loqrecoverypb.ReplicaUpdatePlan{ PlanID: uuid.FastMakeV4(), Version: v, ClusterID: tc.Server(0).StorageClusterID().String(), DecommissionedNodeIDs: []roachpb.NodeID{4}, StaleLeaseholderNodeIDs: []roachpb.NodeID{1}, } // Attempts to stage plan with different internal version must fail. _, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{ Plan: &p, AllNodes: true, ForcePlan: false, ForceLocalInternalVersion: false, }) require.ErrorContains(t, err, "doesn't match cluster active version") // Enable "stuck upgrade bypass" to stage plan on the cluster. _, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{ Plan: &p, AllNodes: true, ForcePlan: false, ForceLocalInternalVersion: true, }) require.NoError(t, err, "force local must fix incorrect version") // Check that stored plan has version matching cluster version. ps := loqrecovery.NewPlanStore("", storeReg.Get("1")) p, ok, err := ps.LoadPlan() require.NoError(t, err, "failed to read node 0 plan") require.True(t, ok, "plan was not staged") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version, "plan version was not updated") } func createIntentOnRangeDescriptor( ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key, ) { txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1) var desc roachpb.RangeDescriptor // Pick one of the predefined split points. rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk)) if err := txn.GetProto(ctx, rdKey, &desc); err != nil { t.Fatal(err) } desc.NextReplicaID++ if err := txn.Put(ctx, rdKey, &desc); err != nil { t.Fatal(err) } // At this point the intent has been written to Pebble but this // write was not synced (only the raft log append was synced). We // need to force another sync, but we're far from the storage // layer here so the easiest thing to do is simply perform a // second write. This will force the first write to be persisted // to disk (the second write may or may not make it to disk due to // timing). desc.NextReplicaID++ if err := txn.Put(ctx, rdKey, &desc); err != nil { t.Fatal(err) } } func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() listenerReg := listenerutil.NewListenerRegistry() defer listenerReg.Close() // Test cluster contains 3 nodes that we would turn into a single node // cluster using loss of quorum recovery. To do that, we will terminate // two nodes and run recovery on remaining one. Restarting node should // bring it back to healthy (but underreplicated) state. // Note that we inject reusable listeners into all nodes to prevent tests // running in parallel from taking over ports of stopped nodes and responding // to gateway node with errors. // TODO(oleg): Make test run with 7 nodes to exercise cases where multiple // replicas survive. Current startup and allocator behaviour would make // this test flaky. sa := make(map[int]base.TestServerArgs) for i := 0; i < 3; i++ { sa[i] = base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ StickyVFSRegistry: server.NewStickyVFSRegistry(), }, }, StoreSpecs: []base.StoreSpec{ { InMemory: true, }, }, } } tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, }, ReusableListenerReg: listenerReg, ServerArgsPerNode: sa, }) tc.Start(t) s := sqlutils.MakeSQLRunner(tc.Conns[0]) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tc.Stopper().Stop(ctx) // We use scratch range to test special case for pending update on the // descriptor which has to be cleaned up before recovery could proceed. // For that we'll ensure it is not empty and then put an intent. After // recovery, we'll check that the range is still accessible for writes as // normal. sk := tc.ScratchRange(t) require.NoError(t, tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"), "failed to write value to scratch range") createIntentOnRangeDescriptor(ctx, t, tc, sk) node1ID := tc.Servers[0].NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tc.StopServer(1) tc.StopServer(2) // Generate recovery plan and try to verify that plan file was generated and contains // meaningful data. This is not strictly necessary for verifying end-to-end flow, but // having assertions on generated data helps to identify which stage of pipeline broke // if test fails. planFile := dir + "/recovery-plan.json" out, err := c.RunWithCaptureArgs( []string{ "debug", "recover", "make-plan", "--confirm=y", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), "--plan=" + planFile, }) require.NoError(t, err, "failed to run make-plan") require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID), "planner didn't provide correct apply instructions") require.FileExists(t, planFile, "generated plan file") planFileContent, err := os.ReadFile(planFile) require.NoError(t, err, "test infra failed, can't open created plan file") plan := loqrecoverypb.ReplicaUpdatePlan{} jsonpb := protoutil.JSONPb{} require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan), "failed to deserialize replica recovery plan") require.NotEmpty(t, plan.Updates, "resulting plan contains no updates") out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "apply-plan", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), "--confirm=y", planFile, }) require.NoError(t, err, "failed to run apply plan") // Check that there were at least one mention of replica being promoted. require.Contains(t, out, "updating replica", "no replica updates were recorded") require.Contains(t, out, fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID), "apply plan failed to stage on expected nodes") // Verify plan is staged on nodes out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "verify", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), planFile, }) require.NoError(t, err, "failed to run verify plan") require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet") tc.StopServer(0) // NB: If recovery is not performed, server will just hang on startup. // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. require.NoError(t, tc.RestartServer(0), "restart failed") s = sqlutils.MakeSQLRunner(tc.Conns[0]) // Verifying that post start cleanup performed node decommissioning that // prevents old nodes from rejoining. ac := tc.GetAdminClient(t, 0) testutils.SucceedsSoon(t, func() error { dr, err := ac.DecommissionStatus(ctx, &serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}}) if err != nil { return err } for _, s := range dr.Status { if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED { return errors.Newf("expecting n%d to be decommissioned", s.NodeID) } } return nil }) // Validate that rangelog is updated by recovery records after cluster restarts. testutils.SucceedsSoon(t, func() error { r := s.QueryRow(t, `select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`) var recoveries int r.Scan(&recoveries) if recoveries != len(plan.Updates) { return errors.Errorf("found %d recovery events while expecting %d", recoveries, len(plan.Updates)) } return nil }) // Verify recovery complete. out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "verify", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), planFile, }) require.NoError(t, err, "failed to run verify plan") require.Contains(t, out, "Loss of quorum recovery is complete.") // We were using scratch range to test cleanup of pending transaction on // rangedescriptor key. We want to verify that after recovery, range is still // writable e.g. recovery succeeded. require.NoError(t, tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"), "failed to write value to scratch range after recovery") // Finally split scratch range to ensure metadata ranges are recovered. _, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42})) require.NoError(t, err, "failed to split range after recovery") } func TestUpdatePlanVsClusterDiff(t *testing.T) { defer leaktest.AfterTest(t)() var empty uuid.UUID planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000") otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001") applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z") status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus { s := loqrecoverypb.NodeRecoveryStatus{ NodeID: id, } if !pending.Equal(empty) { s.PendingPlanID = &pending } if !applied.Equal(empty) { s.AppliedPlanID = &applied s.ApplyTimestamp = &applyTime } s.Error = err return s } for _, d := range []struct { name string updatedNodes []int staleLeases []int status []loqrecoverypb.NodeRecoveryStatus pending int errors int report []string }{ { name: "after staging", updatedNodes: []int{1, 2}, staleLeases: []int{3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, planID, empty, ""), status(3, planID, empty, ""), }, pending: 3, report: []string{ " plan application pending on node n1", " plan application pending on node n2", " plan application pending on node n3", }, }, { name: "partially applied", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, ""), status(3, planID, empty, ""), }, pending: 2, report: []string{ " plan application pending on node n1", " plan applied successfully on node n2", " plan application pending on node n3", }, }, { name: "fully applied", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, empty, planID, ""), status(2, empty, planID, ""), status(3, empty, planID, ""), }, report: []string{ " plan applied successfully on node n1", " plan applied successfully on node n2", " plan applied successfully on node n3", }, }, { name: "staging lost no node", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(3, planID, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application pending on node n3", " failed to find node n2 where plan must be staged", }, }, { name: "staging lost no plan", updatedNodes: []int{1, 2}, staleLeases: []int{3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, planID, empty, ""), status(3, empty, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application pending on node n2", " failed to find staged plan on node n3", }, }, { name: "partial failure", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, "found stale replica"), status(3, planID, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application failed on node n2: found stale replica", " plan application pending on node n3", }, }, { name: "no plan", status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, "found stale replica"), status(3, empty, otherPlanID, ""), }, report: []string{ " node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000", " node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica", " node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC", }, }, { name: "wrong plan", updatedNodes: []int{1, 2}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, otherPlanID, empty, ""), status(3, otherPlanID, empty, ""), }, pending: 1, errors: 2, report: []string{ " plan application pending on node n1", " unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2", " unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3", }, }, } { t.Run(d.name, func(t *testing.T) { plan := loqrecoverypb.ReplicaUpdatePlan{ PlanID: planID, } // Plan will contain single replica update for each requested node. rangeSeq := 1 for _, id := range d.updatedNodes { plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{ RangeID: roachpb.RangeID(rangeSeq), StartKey: nil, OldReplicaID: roachpb.ReplicaID(1), NewReplica: roachpb.ReplicaDescriptor{ NodeID: roachpb.NodeID(id), StoreID: roachpb.StoreID(id), ReplicaID: roachpb.ReplicaID(rangeSeq + 17), }, NextReplicaID: roachpb.ReplicaID(rangeSeq + 18), }) } for _, id := range d.staleLeases { plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id)) } diff := diffPlanWithNodeStatus(plan, d.status) require.Equal(t, d.pending, diff.pending, "number of pending changes") require.Equal(t, d.errors, diff.errors, "number of node errors") if d.report != nil { require.Equal(t, len(d.report), len(diff.report), "number of lines in diff") for i := range d.report { require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i) } } }) } } func TestTruncateKeyOutput(t *testing.T) { defer leaktest.AfterTest(t)() for _, d := range []struct { len uint result string }{ { len: 13, result: "/System/No...", }, { len: 30, result: "/System/NodeLiveness", }, { len: 3, result: "/Sy", }, { len: 4, result: "/...", }, } { t.Run("", func(t *testing.T) { helper := outputFormatHelper{ maxPrintedKeyLength: d.len, } require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix)) }) } } func TestTruncateSpanOutput(t *testing.T) { defer leaktest.AfterTest(t)() for _, d := range []struct { len uint result string }{ { len: 30, result: "/System/{NodeLiveness-Syste...", }, { len: 90, result: "/System/{NodeLiveness-SystemSpanConfigKeys}", }, { len: 3, result: "/Sy", }, { len: 4, result: "/...", }, } { t.Run("", func(t *testing.T) { helper := outputFormatHelper{ maxPrintedKeyLength: d.len, } require.Equal(t, d.result, helper.formatSpan(roachpb.Span{ Key: keys.NodeLivenessPrefix, EndKey: keys.SystemSpanConfigPrefix, })) }) } }
pkg/cli/debug_recover_loss_of_quorum_test.go
1
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.9947983026504517, 0.04403398185968399, 0.00016065362433437258, 0.00017633996321819723, 0.1958015263080597 ]
{ "id": 9, "code_window": [ "\tdefer leaktest.AfterTest(t)()\n", "\tdefer log.Scope(t).Close(t)\n", "\n", "\tcluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{},\n", "\t})\n", "\tdefer cluster.Stopper().Stop(context.Background())\n", "\ttestConn := cluster.ServerConn(0 /* idx */)\n", "\tsqlDB := sqlutils.MakeSQLRunner(testConn)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// The zip queries include queries that are only meant to work\n", "\t\t\t// in a system tenant. These would fail if pointed to a\n", "\t\t\t// secondary tenant.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/zip_table_registry_test.go", "type": "replace", "edit_start_line_idx": 165 }
# CockroachDB Style guide The CockroachDB Go style guide can be found here: https://wiki.crdb.io/wiki/spaces/CRDB/pages/181371303/Go+style+guide
docs/style.md
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.00017105910228565335, 0.00017105910228565335, 0.00017105910228565335, 0.00017105910228565335, 0 ]
{ "id": 9, "code_window": [ "\tdefer leaktest.AfterTest(t)()\n", "\tdefer log.Scope(t).Close(t)\n", "\n", "\tcluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{},\n", "\t})\n", "\tdefer cluster.Stopper().Stop(context.Background())\n", "\ttestConn := cluster.ServerConn(0 /* idx */)\n", "\tsqlDB := sqlutils.MakeSQLRunner(testConn)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// The zip queries include queries that are only meant to work\n", "\t\t\t// in a system tenant. These would fail if pointed to a\n", "\t\t\t// secondary tenant.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/zip_table_registry_test.go", "type": "replace", "edit_start_line_idx": 165 }
SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, ct.relname AS TABLE_NAME, a.attname AS COLUMN_NAME, (i.keys).n AS KEY_SEQ, ci.relname AS PK_NAME FROM pg_catalog.pg_class ct JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) JOIN (SELECT i.indexrelid, i.indrelid, i.indisprimary, information_schema._pg_expandarray(i.indkey) AS keys FROM pg_catalog.pg_index i) i ON (a.attnum = (i.keys).x AND a.attrelid = i.indrelid) JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) WHERE true AND ct.relname = 'j' AND i.indisprimary ORDER BY table_name, pk_name, key_seq
pkg/sql/sem/tree/testdata/pretty/join2.sql
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.00017478848167229444, 0.00017260899767279625, 0.0001704295282252133, 0.00017260899767279625, 0.0000021794767235405743 ]
{ "id": 9, "code_window": [ "\tdefer leaktest.AfterTest(t)()\n", "\tdefer log.Scope(t).Close(t)\n", "\n", "\tcluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{},\n", "\t})\n", "\tdefer cluster.Stopper().Stop(context.Background())\n", "\ttestConn := cluster.ServerConn(0 /* idx */)\n", "\tsqlDB := sqlutils.MakeSQLRunner(testConn)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// The zip queries include queries that are only meant to work\n", "\t\t\t// in a system tenant. These would fail if pointed to a\n", "\t\t\t// secondary tenant.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/zip_table_registry_test.go", "type": "replace", "edit_start_line_idx": 165 }
// Copyright 2023 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package admission import ( "fmt" "testing" "time" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/datadriven" "github.com/stretchr/testify/require" ) func TestSequencer(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) var seq *sequencer var lastSeqNum int64 datadriven.RunTest(t, datapathutils.TestDataPath(t, "sequencer"), func(t *testing.T, d *datadriven.TestData) string { switch d.Cmd { case "init": seq = &sequencer{} return "" case "sequence": var arg, movement string // Parse create-time=<duration>. d.ScanArgs(t, "create-time", &arg) dur, err := time.ParseDuration(arg) require.NoError(t, err) // Parse log-position=<int>/<int>. logPosition := parseLogPosition(t, d) _ = logPosition // unused sequenceNum := seq.sequence(tzero.Add(dur).UnixNano()) if lastSeqNum < sequenceNum { movement = " (advanced)" } lastSeqNum = sequenceNum return fmt.Sprintf("seq=%d ≈%s%s", sequenceNum, timeutil.FromUnixNanos(sequenceNum).Sub(tzero), movement, ) default: return fmt.Sprintf("unknown command: %s", d.Cmd) } }, ) }
pkg/util/admission/sequencer_test.go
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.0002446583821438253, 0.0001841219054767862, 0.00017119207768701017, 0.00017412642773706466, 0.000024890514396247454 ]
{ "id": 10, "code_window": [ "\tdefer leaktest.AfterTest(t)()\n", "\tdefer log.Scope(t).Close(t)\n", "\n", "\tcluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{},\n", "\t})\n", "\tdefer cluster.Stopper().Stop(context.Background())\n", "\ttestConn := cluster.ServerConn(0 /* idx */)\n", "\tsqlDB := sqlutils.MakeSQLRunner(testConn)\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// The zip queries include queries that are only meant to work\n", "\t\t\t// in a system tenant. These would fail if pointed to a\n", "\t\t\t// secondary tenant.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/zip_table_registry_test.go", "type": "replace", "edit_start_line_idx": 194 }
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package cli import ( "context" "fmt" "os" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/listenerutil" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) // TestCollectInfoFromMultipleStores performs basic sanity checks on replica info collection. // This is done by running three node cluster with disk backed storage, // stopping it and verifying content of collected replica info file. // This check verifies that: // // we successfully iterate requested stores, // data is written in expected location, // data contains info only about stores requested. func TestCollectInfoFromMultipleStores(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, 1: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-2"}}}, 2: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-3"}}}, }, }) tc.Start(t) defer tc.Stopper().Stop(ctx) // Wait up-replication. require.NoError(t, tc.WaitForFullReplication()) // Shutdown. tc.Stopper().Stop(ctx) replicaInfoFileName := dir + "/node-1.json" c.RunWithArgs([]string{"debug", "recover", "collect-info", "--store=" + dir + "/store-1", "--store=" + dir + "/store-2", replicaInfoFileName}) replicas, err := readReplicaInfoData([]string{replicaInfoFileName}) require.NoError(t, err, "failed to read generated replica info") stores := map[roachpb.StoreID]interface{}{} for _, r := range replicas.LocalInfo[0].Replicas { stores[r.StoreID] = struct{}{} } require.Equal(t, 2, len(stores), "collected replicas from stores") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version, "collected version info from stores") } // TestCollectInfoFromOnlineCluster verifies that given a test cluster with // one stopped node, we can collect replica info and metadata from remaining // nodes using an admin recovery call. func TestCollectInfoFromOnlineCluster(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ StoreSpecs: []base.StoreSpec{{InMemory: true}}, Insecure: true, }, }) tc.Start(t) defer tc.Stopper().Stop(ctx) require.NoError(t, tc.WaitForFullReplication()) tc.ToggleReplicateQueues(false) r := tc.ServerConn(0).QueryRow("select count(*) from crdb_internal.ranges_no_leases") var totalRanges int require.NoError(t, r.Scan(&totalRanges), "failed to query range count") tc.StopServer(0) replicaInfoFileName := dir + "/all-nodes.json" c.RunWithArgs([]string{ "debug", "recover", "collect-info", "--insecure", "--host", tc.Server(2).AdvRPCAddr(), replicaInfoFileName, }) replicas, err := readReplicaInfoData([]string{replicaInfoFileName}) require.NoError(t, err, "failed to read generated replica info") stores := map[roachpb.StoreID]interface{}{} totalReplicas := 0 for _, li := range replicas.LocalInfo { for _, r := range li.Replicas { stores[r.StoreID] = struct{}{} } totalReplicas += len(li.Replicas) } require.Equal(t, 2, len(stores), "collected replicas from stores") require.Equal(t, 2, len(replicas.LocalInfo), "collected info is not split by node") require.Equal(t, totalRanges*2, totalReplicas, "number of collected replicas") require.Equal(t, totalRanges, len(replicas.Descriptors), "number of collected descriptors from metadata") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), replicas.Version, "collected version info from stores") } // TestLossOfQuorumRecovery performs a sanity check on end to end recovery workflow. // This test doesn't try to validate all possible test cases, but instead check that // artifacts are correctly produced and overall cluster recovery could be performed // where it would be completely broken otherwise. func TestLossOfQuorumRecovery(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() // Test cluster contains 3 nodes that we would turn into a single node // cluster using loss of quorum recovery. After it is stopped, single node // would not be able to progress, but we will apply recovery procedure and // mark on replicas on node 1 as designated survivors. After that, starting // single node should succeed. tcBefore := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, }, }) tcBefore.Start(t) s := sqlutils.MakeSQLRunner(tcBefore.Conns[0]) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tcBefore.Stopper().Stop(ctx) // We use scratch range to test special case for pending update on the // descriptor which has to be cleaned up before recovery could proceed. // For that we'll ensure it is not empty and then put an intent. After // recovery, we'll check that the range is still accessible for writes as // normal. sk := tcBefore.ScratchRange(t) require.NoError(t, tcBefore.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"), "failed to write value to scratch range") createIntentOnRangeDescriptor(ctx, t, tcBefore, sk) node1ID := tcBefore.Servers[0].NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tcBefore.Stopper().Stop(ctx) server1StoreDir := dir + "/store-1" replicaInfoFileName := dir + "/node-1.json" c.RunWithArgs( []string{"debug", "recover", "collect-info", "--store=" + server1StoreDir, replicaInfoFileName}) // Generate recovery plan and try to verify that plan file was generated and contains // meaningful data. This is not strictly necessary for verifying end-to-end flow, but // having assertions on generated data helps to identify which stage of pipeline broke // if test fails. planFile := dir + "/recovery-plan.json" out, err := c.RunWithCaptureArgs( []string{"debug", "recover", "make-plan", "--confirm=y", "--plan=" + planFile, replicaInfoFileName}) require.NoError(t, err, "failed to run make-plan") require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID), "planner didn't provide correct apply instructions") require.FileExists(t, planFile, "generated plan file") planFileContent, err := os.ReadFile(planFile) require.NoError(t, err, "test infra failed, can't open created plan file") plan := loqrecoverypb.ReplicaUpdatePlan{} jsonpb := protoutil.JSONPb{} require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan), "failed to deserialize replica recovery plan") require.NotEmpty(t, plan.Updates, "resulting plan contains no updates") out, err = c.RunWithCaptureArgs( []string{"debug", "recover", "apply-plan", "--confirm=y", "--store=" + server1StoreDir, planFile}) require.NoError(t, err, "failed to run apply plan") // Check that there were at least one mention of replica being promoted. require.Contains(t, out, "will be updated", "no replica updates were recorded") require.Contains(t, out, fmt.Sprintf("Updated store(s): s%d", node1ID), "apply plan was not executed on requested node") tcAfter := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgsPerNode: map[int]base.TestServerArgs{ 0: {StoreSpecs: []base.StoreSpec{{Path: dir + "/store-1"}}}, }, }) // NB: If recovery is not performed, new cluster will just hang on startup. // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. tcAfter.Start(t) defer tcAfter.Stopper().Stop(ctx) // In the new cluster, we will still have nodes 2 and 3 remaining from the first // attempt. That would increase number of replicas on system ranges to 5 and we // would not be able to upreplicate properly. So we need to decommission old nodes // first before proceeding. adminClient := tcAfter.Server(0).GetAdminClient(t) require.NoError(t, runDecommissionNodeImpl( ctx, adminClient, nodeDecommissionWaitNone, nodeDecommissionChecksSkip, false, []roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()), "Failed to decommission removed nodes") for i := 0; i < len(tcAfter.Servers); i++ { require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(true) return nil }), "Failed to activate replication queue") } require.NoError(t, tcAfter.WaitForZoneConfigPropagation(), "Failed to ensure zone configs are propagated") require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication") for i := 0; i < len(tcAfter.Servers); i++ { require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceConsistencyQueueProcess() }), "Failed to force replicas to consistency queue") } // As a validation step we will just pick one range and get its replicas to see // if they were up-replicated to the new nodes. s = sqlutils.MakeSQLRunner(tcAfter.Conns[0]) r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1") var replicas string r.Scan(&replicas) require.Equal(t, "{1,4,5}", replicas, "Replicas after loss of quorum recovery") // Validate that rangelog is updated by recovery records after cluster restarts. testutils.SucceedsSoon(t, func() error { r := s.QueryRow(t, `select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`) var recoveries int r.Scan(&recoveries) if recoveries != len(plan.Updates) { return errors.Errorf("found %d recovery events while expecting %d", recoveries, len(plan.Updates)) } return nil }) // We were using scratch range to test cleanup of pending transaction on // rangedescriptor key. We want to verify that after recovery, range is still // writable e.g. recovery succeeded. require.NoError(t, tcAfter.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"), "failed to write value to scratch range after recovery") } // TestStageVersionCheck verifies that we can force plan with different internal // version onto cluster. To do this, we create a plan with internal version // above current but matching major and minor. Then we check that staging fails // and that force flag will update plan version to match local node. func TestStageVersionCheck(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() _, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() listenerReg := listenerutil.NewListenerRegistry() defer listenerReg.Close() storeReg := server.NewStickyVFSRegistry() tc := testcluster.NewTestCluster(t, 4, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgsPerNode: map[int]base.TestServerArgs{ 0: { Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ StickyVFSRegistry: storeReg, }, }, StoreSpecs: []base.StoreSpec{ {InMemory: true, StickyVFSID: "1"}, }, }, }, ReusableListenerReg: listenerReg, }) tc.Start(t) defer tc.Stopper().Stop(ctx) tc.StopServer(3) adminClient := tc.Server(0).GetAdminClient(t) v := clusterversion.ByKey(clusterversion.BinaryVersionKey) v.Internal++ // To avoid crafting real replicas we use StaleLeaseholderNodeIDs to force // node to stage plan for verification. p := loqrecoverypb.ReplicaUpdatePlan{ PlanID: uuid.FastMakeV4(), Version: v, ClusterID: tc.Server(0).StorageClusterID().String(), DecommissionedNodeIDs: []roachpb.NodeID{4}, StaleLeaseholderNodeIDs: []roachpb.NodeID{1}, } // Attempts to stage plan with different internal version must fail. _, err := adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{ Plan: &p, AllNodes: true, ForcePlan: false, ForceLocalInternalVersion: false, }) require.ErrorContains(t, err, "doesn't match cluster active version") // Enable "stuck upgrade bypass" to stage plan on the cluster. _, err = adminClient.RecoveryStagePlan(ctx, &serverpb.RecoveryStagePlanRequest{ Plan: &p, AllNodes: true, ForcePlan: false, ForceLocalInternalVersion: true, }) require.NoError(t, err, "force local must fix incorrect version") // Check that stored plan has version matching cluster version. ps := loqrecovery.NewPlanStore("", storeReg.Get("1")) p, ok, err := ps.LoadPlan() require.NoError(t, err, "failed to read node 0 plan") require.True(t, ok, "plan was not staged") require.Equal(t, clusterversion.ByKey(clusterversion.BinaryVersionKey), p.Version, "plan version was not updated") } func createIntentOnRangeDescriptor( ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key, ) { txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1) var desc roachpb.RangeDescriptor // Pick one of the predefined split points. rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk)) if err := txn.GetProto(ctx, rdKey, &desc); err != nil { t.Fatal(err) } desc.NextReplicaID++ if err := txn.Put(ctx, rdKey, &desc); err != nil { t.Fatal(err) } // At this point the intent has been written to Pebble but this // write was not synced (only the raft log append was synced). We // need to force another sync, but we're far from the storage // layer here so the easiest thing to do is simply perform a // second write. This will force the first write to be persisted // to disk (the second write may or may not make it to disk due to // timing). desc.NextReplicaID++ if err := txn.Put(ctx, rdKey, &desc); err != nil { t.Fatal(err) } } func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) skip.UnderDeadlock(t, "slow under deadlock") ctx := context.Background() dir, cleanupFn := testutils.TempDir(t) defer cleanupFn() c := NewCLITest(TestCLIParams{ NoServer: true, }) defer c.Cleanup() listenerReg := listenerutil.NewListenerRegistry() defer listenerReg.Close() // Test cluster contains 3 nodes that we would turn into a single node // cluster using loss of quorum recovery. To do that, we will terminate // two nodes and run recovery on remaining one. Restarting node should // bring it back to healthy (but underreplicated) state. // Note that we inject reusable listeners into all nodes to prevent tests // running in parallel from taking over ports of stopped nodes and responding // to gateway node with errors. // TODO(oleg): Make test run with 7 nodes to exercise cases where multiple // replicas survive. Current startup and allocator behaviour would make // this test flaky. sa := make(map[int]base.TestServerArgs) for i := 0; i < 3; i++ { sa[i] = base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ StickyVFSRegistry: server.NewStickyVFSRegistry(), }, }, StoreSpecs: []base.StoreSpec{ { InMemory: true, }, }, } } tc := testcluster.NewTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, }, ReusableListenerReg: listenerReg, ServerArgsPerNode: sa, }) tc.Start(t) s := sqlutils.MakeSQLRunner(tc.Conns[0]) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tc.Stopper().Stop(ctx) // We use scratch range to test special case for pending update on the // descriptor which has to be cleaned up before recovery could proceed. // For that we'll ensure it is not empty and then put an intent. After // recovery, we'll check that the range is still accessible for writes as // normal. sk := tc.ScratchRange(t) require.NoError(t, tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value"), "failed to write value to scratch range") createIntentOnRangeDescriptor(ctx, t, tc, sk) node1ID := tc.Servers[0].NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tc.StopServer(1) tc.StopServer(2) // Generate recovery plan and try to verify that plan file was generated and contains // meaningful data. This is not strictly necessary for verifying end-to-end flow, but // having assertions on generated data helps to identify which stage of pipeline broke // if test fails. planFile := dir + "/recovery-plan.json" out, err := c.RunWithCaptureArgs( []string{ "debug", "recover", "make-plan", "--confirm=y", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), "--plan=" + planFile, }) require.NoError(t, err, "failed to run make-plan") require.Contains(t, out, fmt.Sprintf("- node n%d", node1ID), "planner didn't provide correct apply instructions") require.FileExists(t, planFile, "generated plan file") planFileContent, err := os.ReadFile(planFile) require.NoError(t, err, "test infra failed, can't open created plan file") plan := loqrecoverypb.ReplicaUpdatePlan{} jsonpb := protoutil.JSONPb{} require.NoError(t, jsonpb.Unmarshal(planFileContent, &plan), "failed to deserialize replica recovery plan") require.NotEmpty(t, plan.Updates, "resulting plan contains no updates") out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "apply-plan", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), "--confirm=y", planFile, }) require.NoError(t, err, "failed to run apply plan") // Check that there were at least one mention of replica being promoted. require.Contains(t, out, "updating replica", "no replica updates were recorded") require.Contains(t, out, fmt.Sprintf("Plan staged. To complete recovery restart nodes n%d.", node1ID), "apply plan failed to stage on expected nodes") // Verify plan is staged on nodes out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "verify", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), planFile, }) require.NoError(t, err, "failed to run verify plan") require.Contains(t, out, "ERROR: loss of quorum recovery is not finished yet") tc.StopServer(0) // NB: If recovery is not performed, server will just hang on startup. // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. require.NoError(t, tc.RestartServer(0), "restart failed") s = sqlutils.MakeSQLRunner(tc.Conns[0]) // Verifying that post start cleanup performed node decommissioning that // prevents old nodes from rejoining. ac := tc.GetAdminClient(t, 0) testutils.SucceedsSoon(t, func() error { dr, err := ac.DecommissionStatus(ctx, &serverpb.DecommissionStatusRequest{NodeIDs: []roachpb.NodeID{2, 3}}) if err != nil { return err } for _, s := range dr.Status { if s.Membership != livenesspb.MembershipStatus_DECOMMISSIONED { return errors.Newf("expecting n%d to be decommissioned", s.NodeID) } } return nil }) // Validate that rangelog is updated by recovery records after cluster restarts. testutils.SucceedsSoon(t, func() error { r := s.QueryRow(t, `select count(*) from system.rangelog where "eventType" = 'unsafe_quorum_recovery'`) var recoveries int r.Scan(&recoveries) if recoveries != len(plan.Updates) { return errors.Errorf("found %d recovery events while expecting %d", recoveries, len(plan.Updates)) } return nil }) // Verify recovery complete. out, err = c.RunWithCaptureArgs( []string{ "debug", "recover", "verify", "--certs-dir=test_certs", "--host=" + tc.Server(0).AdvRPCAddr(), planFile, }) require.NoError(t, err, "failed to run verify plan") require.Contains(t, out, "Loss of quorum recovery is complete.") // We were using scratch range to test cleanup of pending transaction on // rangedescriptor key. We want to verify that after recovery, range is still // writable e.g. recovery succeeded. require.NoError(t, tc.Server(0).DB().Put(ctx, testutils.MakeKey(sk, []byte{1}), "value2"), "failed to write value to scratch range after recovery") // Finally split scratch range to ensure metadata ranges are recovered. _, _, err = tc.Server(0).SplitRange(testutils.MakeKey(sk, []byte{42})) require.NoError(t, err, "failed to split range after recovery") } func TestUpdatePlanVsClusterDiff(t *testing.T) { defer leaktest.AfterTest(t)() var empty uuid.UUID planID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174000") otherPlanID, _ := uuid.FromString("123e4567-e89b-12d3-a456-426614174001") applyTime, _ := time.Parse(time.RFC3339, "2023-01-24T10:30:00Z") status := func(id roachpb.NodeID, pending, applied uuid.UUID, err string) loqrecoverypb.NodeRecoveryStatus { s := loqrecoverypb.NodeRecoveryStatus{ NodeID: id, } if !pending.Equal(empty) { s.PendingPlanID = &pending } if !applied.Equal(empty) { s.AppliedPlanID = &applied s.ApplyTimestamp = &applyTime } s.Error = err return s } for _, d := range []struct { name string updatedNodes []int staleLeases []int status []loqrecoverypb.NodeRecoveryStatus pending int errors int report []string }{ { name: "after staging", updatedNodes: []int{1, 2}, staleLeases: []int{3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, planID, empty, ""), status(3, planID, empty, ""), }, pending: 3, report: []string{ " plan application pending on node n1", " plan application pending on node n2", " plan application pending on node n3", }, }, { name: "partially applied", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, ""), status(3, planID, empty, ""), }, pending: 2, report: []string{ " plan application pending on node n1", " plan applied successfully on node n2", " plan application pending on node n3", }, }, { name: "fully applied", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, empty, planID, ""), status(2, empty, planID, ""), status(3, empty, planID, ""), }, report: []string{ " plan applied successfully on node n1", " plan applied successfully on node n2", " plan applied successfully on node n3", }, }, { name: "staging lost no node", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(3, planID, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application pending on node n3", " failed to find node n2 where plan must be staged", }, }, { name: "staging lost no plan", updatedNodes: []int{1, 2}, staleLeases: []int{3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, planID, empty, ""), status(3, empty, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application pending on node n2", " failed to find staged plan on node n3", }, }, { name: "partial failure", updatedNodes: []int{1, 2, 3}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, "found stale replica"), status(3, planID, empty, ""), }, pending: 2, errors: 1, report: []string{ " plan application pending on node n1", " plan application failed on node n2: found stale replica", " plan application pending on node n3", }, }, { name: "no plan", status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, empty, planID, "found stale replica"), status(3, empty, otherPlanID, ""), }, report: []string{ " node n1 staged plan: 123e4567-e89b-12d3-a456-426614174000", " node n2 failed to apply plan 123e4567-e89b-12d3-a456-426614174000: found stale replica", " node n3 applied plan: 123e4567-e89b-12d3-a456-426614174001 at 2023-01-24 10:30:00 +0000 UTC", }, }, { name: "wrong plan", updatedNodes: []int{1, 2}, status: []loqrecoverypb.NodeRecoveryStatus{ status(1, planID, empty, ""), status(2, otherPlanID, empty, ""), status(3, otherPlanID, empty, ""), }, pending: 1, errors: 2, report: []string{ " plan application pending on node n1", " unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n2", " unexpected staged plan 123e4567-e89b-12d3-a456-426614174001 on node n3", }, }, } { t.Run(d.name, func(t *testing.T) { plan := loqrecoverypb.ReplicaUpdatePlan{ PlanID: planID, } // Plan will contain single replica update for each requested node. rangeSeq := 1 for _, id := range d.updatedNodes { plan.Updates = append(plan.Updates, loqrecoverypb.ReplicaUpdate{ RangeID: roachpb.RangeID(rangeSeq), StartKey: nil, OldReplicaID: roachpb.ReplicaID(1), NewReplica: roachpb.ReplicaDescriptor{ NodeID: roachpb.NodeID(id), StoreID: roachpb.StoreID(id), ReplicaID: roachpb.ReplicaID(rangeSeq + 17), }, NextReplicaID: roachpb.ReplicaID(rangeSeq + 18), }) } for _, id := range d.staleLeases { plan.StaleLeaseholderNodeIDs = append(plan.StaleLeaseholderNodeIDs, roachpb.NodeID(id)) } diff := diffPlanWithNodeStatus(plan, d.status) require.Equal(t, d.pending, diff.pending, "number of pending changes") require.Equal(t, d.errors, diff.errors, "number of node errors") if d.report != nil { require.Equal(t, len(d.report), len(diff.report), "number of lines in diff") for i := range d.report { require.Equal(t, d.report[i], diff.report[i], "wrong line %d of report", i) } } }) } } func TestTruncateKeyOutput(t *testing.T) { defer leaktest.AfterTest(t)() for _, d := range []struct { len uint result string }{ { len: 13, result: "/System/No...", }, { len: 30, result: "/System/NodeLiveness", }, { len: 3, result: "/Sy", }, { len: 4, result: "/...", }, } { t.Run("", func(t *testing.T) { helper := outputFormatHelper{ maxPrintedKeyLength: d.len, } require.Equal(t, d.result, helper.formatKey(keys.NodeLivenessPrefix)) }) } } func TestTruncateSpanOutput(t *testing.T) { defer leaktest.AfterTest(t)() for _, d := range []struct { len uint result string }{ { len: 30, result: "/System/{NodeLiveness-Syste...", }, { len: 90, result: "/System/{NodeLiveness-SystemSpanConfigKeys}", }, { len: 3, result: "/Sy", }, { len: 4, result: "/...", }, } { t.Run("", func(t *testing.T) { helper := outputFormatHelper{ maxPrintedKeyLength: d.len, } require.Equal(t, d.result, helper.formatSpan(roachpb.Span{ Key: keys.NodeLivenessPrefix, EndKey: keys.SystemSpanConfigPrefix, })) }) } }
pkg/cli/debug_recover_loss_of_quorum_test.go
1
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.9962871074676514, 0.04548858851194382, 0.00016007607337087393, 0.00017702343757264316, 0.20123456418514252 ]
{ "id": 10, "code_window": [ "\tdefer leaktest.AfterTest(t)()\n", "\tdefer log.Scope(t).Close(t)\n", "\n", "\tcluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{},\n", "\t})\n", "\tdefer cluster.Stopper().Stop(context.Background())\n", "\ttestConn := cluster.ServerConn(0 /* idx */)\n", "\tsqlDB := sqlutils.MakeSQLRunner(testConn)\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// The zip queries include queries that are only meant to work\n", "\t\t\t// in a system tenant. These would fail if pointed to a\n", "\t\t\t// secondary tenant.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/zip_table_registry_test.go", "type": "replace", "edit_start_line_idx": 194 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. export function pluralize(value: number, singular: string, plural: string) { if (value === 1) { return singular; } return plural; }
pkg/ui/workspaces/db-console/src/util/pluralize.tsx
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.00018107745563611388, 0.0001803157210815698, 0.0001795539865270257, 0.0001803157210815698, 7.617345545440912e-7 ]
{ "id": 10, "code_window": [ "\tdefer leaktest.AfterTest(t)()\n", "\tdefer log.Scope(t).Close(t)\n", "\n", "\tcluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{},\n", "\t})\n", "\tdefer cluster.Stopper().Stop(context.Background())\n", "\ttestConn := cluster.ServerConn(0 /* idx */)\n", "\tsqlDB := sqlutils.MakeSQLRunner(testConn)\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// The zip queries include queries that are only meant to work\n", "\t\t\t// in a system tenant. These would fail if pointed to a\n", "\t\t\t// secondary tenant.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/zip_table_registry_test.go", "type": "replace", "edit_start_line_idx": 194 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package optbuilder import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) // buildUpdate builds a memo group for an UpdateOp expression. First, an input // expression is constructed that outputs the existing values for all rows from // the target table that match the WHERE clause. Additional column(s) that // provide updated values are projected for each of the SET expressions, as well // as for any computed columns. For example: // // CREATE TABLE abc (a INT PRIMARY KEY, b INT, c INT) // UPDATE abc SET b=1 WHERE a=2 // // This would create an input expression similar to this SQL: // // SELECT a AS oa, b AS ob, c AS oc, 1 AS nb FROM abc WHERE a=2 // // The execution engine evaluates this relational expression and uses the // resulting values to form the KV keys and values. // // Tuple SET expressions are decomposed into individual columns: // // UPDATE abc SET (b, c)=(1, 2) WHERE a=3 // => // SELECT a AS oa, b AS ob, c AS oc, 1 AS nb, 2 AS nc FROM abc WHERE a=3 // // Subqueries become correlated left outer joins: // // UPDATE abc SET b=(SELECT y FROM xyz WHERE x=a) // => // SELECT a AS oa, b AS ob, c AS oc, y AS nb // FROM abc // LEFT JOIN LATERAL (SELECT y FROM xyz WHERE x=a) // ON True // // Computed columns result in an additional wrapper projection that can depend // on input columns. // // Note that the ORDER BY clause can only be used if the LIMIT clause is also // present. In that case, the ordering determines which rows are included by the // limit. The ORDER BY makes no additional guarantees about the order in which // mutations are applied, or the order of any returned rows (i.e. it won't // become a physical property required of the Update operator). func (b *Builder) buildUpdate(upd *tree.Update, inScope *scope) (outScope *scope) { if upd.OrderBy != nil && upd.Limit == nil { panic(pgerror.Newf(pgcode.Syntax, "UPDATE statement requires LIMIT when ORDER BY is used")) } // UX friendliness safeguard. if upd.Where == nil && b.evalCtx.SessionData().SafeUpdates { panic(pgerror.DangerousStatementf("UPDATE without WHERE clause")) } // Find which table we're working on, check the permissions. tab, depName, alias, refColumns := b.resolveTableForMutation(upd.Table, privilege.UPDATE) if refColumns != nil { panic(pgerror.Newf(pgcode.Syntax, "cannot specify a list of column IDs with UPDATE")) } // Check Select permission as well, since existing values must be read. b.checkPrivilege(depName, tab, privilege.SELECT) // Check if this table has already been mutated in another subquery. b.checkMultipleMutations(tab, generalMutation) var mb mutationBuilder mb.init(b, "update", tab, alias) // Build the input expression that selects the rows that will be updated: // // WITH <with> // SELECT <cols> FROM <table> WHERE <where> // ORDER BY <order-by> LIMIT <limit> // // All columns from the update table will be projected. mb.buildInputForUpdate(inScope, upd.Table, upd.From, upd.Where, upd.Limit, upd.OrderBy) // Derive the columns that will be updated from the SET expressions. mb.addTargetColsForUpdate(upd.Exprs) // Build each of the SET expressions. mb.addUpdateCols(upd.Exprs) // Build the final update statement, including any returned expressions. if resultsNeeded(upd.Returning) { mb.buildUpdate(upd.Returning.(*tree.ReturningExprs)) } else { mb.buildUpdate(nil /* returning */) } return mb.outScope } // addTargetColsForUpdate compiles the given SET expressions and adds the user- // specified column names to the list of table columns that will be updated by // the Update operation. Verify that the RHS of the SET expression provides // exactly as many columns as are expected by the named SET columns. func (mb *mutationBuilder) addTargetColsForUpdate(exprs tree.UpdateExprs) { if len(mb.targetColList) != 0 { panic(errors.AssertionFailedf("addTargetColsForUpdate cannot be called more than once")) } for _, expr := range exprs { mb.addTargetColsByName(expr.Names) if expr.Tuple { n := -1 switch t := expr.Expr.(type) { case *tree.Subquery: // Build the subquery in order to determine how many columns it // projects, and store it for later use in the addUpdateCols method. // Use the data types of the target columns to resolve expressions // with ambiguous types (e.g. should 1 be interpreted as an INT or // as a FLOAT). desiredTypes := make([]*types.T, len(expr.Names)) targetIdx := len(mb.targetColList) - len(expr.Names) for i := range desiredTypes { desiredTypes[i] = mb.md.ColumnMeta(mb.targetColList[targetIdx+i]).Type } outScope := mb.b.buildSelectStmt(t.Select, noRowLocking, desiredTypes, mb.outScope) mb.subqueries = append(mb.subqueries, outScope) n = len(outScope.cols) case *tree.Tuple: n = len(t.Exprs) } if n < 0 { panic(unimplementedWithIssueDetailf(35713, fmt.Sprintf("%T", expr.Expr), "source for a multiple-column UPDATE item must be a sub-SELECT or ROW() expression; not supported: %T", expr.Expr)) } if len(expr.Names) != n { panic(pgerror.Newf(pgcode.Syntax, "number of columns (%d) does not match number of values (%d)", len(expr.Names), n)) } } } } // addUpdateCols builds nested Project and LeftOuterJoin expressions that // correspond to the given SET expressions: // // SET a=1 (single-column SET) // Add as synthesized Project column: // SELECT <fetch-cols>, 1 FROM <input> // // SET (a, b)=(1, 2) (tuple SET) // Add as multiple Project columns: // SELECT <fetch-cols>, 1, 2 FROM <input> // // SET (a, b)=(SELECT 1, 2) (subquery) // Wrap input in Max1Row + LeftJoinApply expressions: // SELECT * FROM <fetch-cols> LEFT JOIN LATERAL (SELECT 1, 2) ON True // // Multiple subqueries result in multiple left joins successively wrapping the // input. A final Project operator is built if any single-column or tuple SET // expressions are present. func (mb *mutationBuilder) addUpdateCols(exprs tree.UpdateExprs) { // SET expressions should reject aggregates, generators, etc. scalarProps := &mb.b.semaCtx.Properties defer scalarProps.Restore(*scalarProps) mb.b.semaCtx.Properties.Require("UPDATE SET", tree.RejectSpecial) // UPDATE input columns are accessible to SET expressions. inScope := mb.outScope // Project additional column(s) for each update expression (can be multiple // columns in case of tuple assignment). projectionsScope := mb.outScope.replace() projectionsScope.appendColumnsFromScope(mb.outScope) addCol := func(expr tree.Expr, targetColID opt.ColumnID) { ord := mb.tabID.ColumnOrdinal(targetColID) targetCol := mb.tab.Column(ord) // Allow right side of SET to be DEFAULT. if _, ok := expr.(tree.DefaultVal); ok { expr = mb.parseDefaultExpr(targetColID) } else { // GENERATED ALWAYS AS IDENTITY columns are not allowed to be // explicitly written to. // // TODO(janexing): Implement the OVERRIDING SYSTEM VALUE syntax for // INSERT which allows a GENERATED ALWAYS AS IDENTITY column to be // overwritten. // See https://github.com/cockroachdb/cockroach/issues/68201. if targetCol.IsGeneratedAlwaysAsIdentity() { panic(sqlerrors.NewGeneratedAlwaysAsIdentityColumnUpdateError(string(targetCol.ColName()))) } } // Add new column to the projections scope. texpr := inScope.resolveType(expr, targetCol.DatumType()) targetColName := targetCol.ColName() colName := scopeColName(targetColName).WithMetadataName(string(targetColName) + "_new") scopeCol := projectionsScope.addColumn(colName, texpr) mb.b.buildScalar(texpr, inScope, projectionsScope, scopeCol, nil) // Add the column ID to the list of columns to update. mb.updateColIDs[ord] = scopeCol.id } n := 0 subquery := 0 for _, set := range exprs { if set.Tuple { switch t := set.Expr.(type) { case *tree.Subquery: // Get the subquery scope that was built by addTargetColsForUpdate. subqueryScope := mb.subqueries[subquery] subquery++ // Type check and rename columns. for i := range subqueryScope.cols { ord := mb.tabID.ColumnOrdinal(mb.targetColList[n]) targetCol := mb.tab.Column(ord) subqueryScope.cols[i].name = scopeColName(targetCol.ColName()) // Add the column ID to the list of columns to update. mb.updateColIDs[ord] = subqueryScope.cols[i].id n++ } // Lazily create new scope to hold results of join. if mb.outScope == inScope { mb.outScope = inScope.replace() mb.outScope.appendColumnsFromScope(inScope) mb.outScope.expr = inScope.expr } // Wrap input with Max1Row + LOJ. mb.outScope.appendColumnsFromScope(subqueryScope) mb.outScope.expr = mb.b.factory.ConstructLeftJoinApply( mb.outScope.expr, mb.b.factory.ConstructMax1Row(subqueryScope.expr, multiRowSubqueryErrText), memo.TrueFilter, memo.EmptyJoinPrivate, ) // Project all subquery output columns. projectionsScope.appendColumnsFromScope(subqueryScope) case *tree.Tuple: for _, expr := range t.Exprs { addCol(expr, mb.targetColList[n]) n++ } } } else { addCol(set.Expr, mb.targetColList[n]) n++ } } mb.b.constructProjectForScope(mb.outScope, projectionsScope) mb.outScope = projectionsScope // Add assignment casts for update columns. mb.addAssignmentCasts(mb.updateColIDs) // Add additional columns for computed expressions that may depend on the // updated columns. mb.addSynthesizedColsForUpdate() } // addSynthesizedColsForUpdate wraps an Update input expression with a Project // operator containing any computed columns that need to be updated. This // includes write-only mutation columns that are computed. func (mb *mutationBuilder) addSynthesizedColsForUpdate() { // Allow mutation columns to be referenced by other computed mutation // columns (otherwise the scope will raise an error if a mutation column // is referenced). These do not need to be set back to true again because // mutation columns are not projected by the Update operator. for i := range mb.outScope.cols { mb.outScope.cols[i].mutation = false } // Add non-computed columns that are being dropped or added (mutated) to the // table. These are not visible to queries, and will always be updated to // their default values. This is necessary because they may not yet have been // set by the backfiller. mb.addSynthesizedDefaultCols( mb.updateColIDs, false, /* includeOrdinary */ true, /* applyOnUpdate */ ) // Add assignment casts for default column values. mb.addAssignmentCasts(mb.updateColIDs) // Disambiguate names so that references in the computed expression refer to // the correct columns. mb.disambiguateColumns() // Add all computed columns in case their values have changed. mb.addSynthesizedComputedCols(mb.updateColIDs, true /* restrict */) // Add assignment casts for computed column values. mb.addAssignmentCasts(mb.updateColIDs) } // buildUpdate constructs an Update operator, possibly wrapped by a Project // operator that corresponds to the given RETURNING clause. func (mb *mutationBuilder) buildUpdate(returning *tree.ReturningExprs) { // Disambiguate names so that references in any expressions, such as a // check constraint, refer to the correct columns. mb.disambiguateColumns() // Add any check constraint boolean columns to the input. mb.addCheckConstraintCols(true /* isUpdate */) // Add the partial index predicate expressions to the table metadata. // These expressions are used to prune fetch columns during // normalization. mb.b.addPartialIndexPredicatesForTable(mb.md.TableMeta(mb.tabID), nil /* scan */) // Project partial index PUT and DEL boolean columns. mb.projectPartialIndexPutAndDelCols() mb.buildUniqueChecksForUpdate() mb.buildFKChecksForUpdate() private := mb.makeMutationPrivate(returning != nil) for _, col := range mb.extraAccessibleCols { if col.id != 0 { private.PassthroughCols = append(private.PassthroughCols, col.id) } } mb.outScope.expr = mb.b.factory.ConstructUpdate( mb.outScope.expr, mb.uniqueChecks, mb.fkChecks, private, ) mb.buildReturning(returning) }
pkg/sql/opt/optbuilder/update.go
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.00034511418198235333, 0.0001804573112167418, 0.00016372535901609808, 0.0001737460697768256, 0.000033341068046865985 ]
{ "id": 10, "code_window": [ "\tdefer leaktest.AfterTest(t)()\n", "\tdefer log.Scope(t).Close(t)\n", "\n", "\tcluster := serverutils.StartCluster(t, 1 /* numNodes */, base.TestClusterArgs{\n", "\t\tServerArgs: base.TestServerArgs{},\n", "\t})\n", "\tdefer cluster.Stopper().Stop(context.Background())\n", "\ttestConn := cluster.ServerConn(0 /* idx */)\n", "\tsqlDB := sqlutils.MakeSQLRunner(testConn)\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tServerArgs: base.TestServerArgs{\n", "\t\t\t// The zip queries include queries that are only meant to work\n", "\t\t\t// in a system tenant. These would fail if pointed to a\n", "\t\t\t// secondary tenant.\n", "\t\t\tDefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant,\n", "\t\t},\n" ], "file_path": "pkg/cli/zip_table_registry_test.go", "type": "replace", "edit_start_line_idx": 194 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "ptreconcile", srcs = [ "metrics.go", "reconciler.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptreconcile", visibility = ["//visibility:public"], deps = [ "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/settings", "//pkg/settings/cluster", "//pkg/sql/isql", "//pkg/util/log", "//pkg/util/metric", "//pkg/util/stop", "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", "@com_github_prometheus_client_model//go", ], ) go_test( name = "ptreconcile_test", size = "small", srcs = [ "main_test.go", "reconciler_test.go", ], args = ["-test.timeout=55s"], deps = [ ":ptreconcile", "//pkg/base", "//pkg/keys", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/kv/kvserver/protectedts/ptstorage", "//pkg/roachpb", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/server", "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/testcluster", "//pkg/util/leaktest", "//pkg/util/randutil", "//pkg/util/syncutil", "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], )
pkg/kv/kvserver/protectedts/ptreconcile/BUILD.bazel
0
https://github.com/cockroachdb/cockroach/commit/0d77d949392d0daabeb77a82d468b15d4774e1de
[ 0.00017889127775561064, 0.0001744016190059483, 0.00017001629748847336, 0.0001741040323395282, 0.0000028695089895336423 ]
{ "id": 0, "code_window": [ " \"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/filters:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//staging/src/k8s.io/apiserver/pkg/features:go_default_library\",\n" ], "file_path": "cmd/kube-apiserver/app/BUILD", "type": "add", "edit_start_line_idx": 61 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package app does all of the work necessary to create a Kubernetes // APIServer by binding together the API, master and APIServer infrastructure. // It can be configured and called directly or via the hyperkube framework. package app import ( "fmt" "io/ioutil" "net/http" "strings" "sync" "github.com/golang/glog" apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" genericoptions "k8s.io/apiserver/pkg/server/options" kubeexternalinformers "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" "k8s.io/kube-aggregator/pkg/apis/apiregistration" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme" apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion" informers "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion" "k8s.io/kube-aggregator/pkg/controllers/autoregister" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/pkg/master/controller/crdregistration" ) func createAggregatorConfig( kubeAPIServerConfig genericapiserver.Config, commandOptions *options.ServerRunOptions, externalInformers kubeexternalinformers.SharedInformerFactory, serviceResolver aggregatorapiserver.ServiceResolver, proxyTransport *http.Transport, pluginInitializers []admission.PluginInitializer, ) (*aggregatorapiserver.Config, error) { // make a shallow copy to let us twiddle a few things // most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the aggregator genericConfig := kubeAPIServerConfig // override genericConfig.AdmissionControl with kube-aggregator's scheme, // because aggregator apiserver should use its own scheme to convert its own resources. commandOptions.Admission.ApplyTo( &genericConfig, externalInformers, genericConfig.LoopbackClientConfig, aggregatorscheme.Scheme, pluginInitializers...) // the aggregator doesn't wire these up. It just delegates them to the kubeapiserver genericConfig.EnableSwaggerUI = false genericConfig.SwaggerConfig = nil // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion) genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} // override MergedResourceConfig with aggregator defaults and registry if err := commandOptions.APIEnablement.ApplyTo( &genericConfig, aggregatorapiserver.DefaultAPIResourceConfigSource(), aggregatorscheme.Scheme); err != nil { return nil, err } var err error var certBytes, keyBytes []byte if len(commandOptions.ProxyClientCertFile) > 0 && len(commandOptions.ProxyClientKeyFile) > 0 { certBytes, err = ioutil.ReadFile(commandOptions.ProxyClientCertFile) if err != nil { return nil, err } keyBytes, err = ioutil.ReadFile(commandOptions.ProxyClientKeyFile) if err != nil { return nil, err } } aggregatorConfig := &aggregatorapiserver.Config{ GenericConfig: &genericapiserver.RecommendedConfig{ Config: genericConfig, SharedInformerFactory: externalInformers, }, ExtraConfig: aggregatorapiserver.ExtraConfig{ ProxyClientCert: certBytes, ProxyClientKey: keyBytes, ServiceResolver: serviceResolver, ProxyTransport: proxyTransport, }, } return aggregatorConfig, nil } func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) { aggregatorServer, err := aggregatorConfig.Complete().NewWithDelegate(delegateAPIServer) if err != nil { return nil, err } // create controllers for auto-registration apiRegistrationClient, err := apiregistrationclient.NewForConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig) if err != nil { return nil, err } autoRegistrationController := autoregister.NewAutoRegisterController(aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), apiRegistrationClient) apiServices := apiServicesToRegister(delegateAPIServer, autoRegistrationController) crdRegistrationController := crdregistration.NewAutoRegistrationController( apiExtensionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions(), autoRegistrationController) aggregatorServer.GenericAPIServer.AddPostStartHook("kube-apiserver-autoregistration", func(context genericapiserver.PostStartHookContext) error { go crdRegistrationController.Run(5, context.StopCh) go func() { // let the CRD controller process the initial set of CRDs before starting the autoregistration controller. // this prevents the autoregistration controller's initial sync from deleting APIServices for CRDs that still exist. // we only need to do this if CRDs are enabled on this server. We can't use discovery because we are the source for discovery. if aggregatorConfig.GenericConfig.MergedResourceConfig.AnyVersionForGroupEnabled("apiextensions.k8s.io") { crdRegistrationController.WaitForInitialSync() } autoRegistrationController.Run(5, context.StopCh) }() return nil }) aggregatorServer.GenericAPIServer.AddHealthzChecks( makeAPIServiceAvailableHealthzCheck( "autoregister-completion", apiServices, aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), ), ) return aggregatorServer, nil } func makeAPIService(gv schema.GroupVersion) *apiregistration.APIService { apiServicePriority, ok := apiVersionPriorities[gv] if !ok { // if we aren't found, then we shouldn't register ourselves because it could result in a CRD group version // being permanently stuck in the APIServices list. glog.Infof("Skipping APIService creation for %v", gv) return nil } return &apiregistration.APIService{ ObjectMeta: metav1.ObjectMeta{Name: gv.Version + "." + gv.Group}, Spec: apiregistration.APIServiceSpec{ Group: gv.Group, Version: gv.Version, GroupPriorityMinimum: apiServicePriority.group, VersionPriority: apiServicePriority.version, }, } } // makeAPIServiceAvailableHealthzCheck returns a healthz check that returns healthy // once all of the specified services have been observed to be available at least once. func makeAPIServiceAvailableHealthzCheck(name string, apiServices []*apiregistration.APIService, apiServiceInformer informers.APIServiceInformer) healthz.HealthzChecker { // Track the auto-registered API services that have not been observed to be available yet pendingServiceNamesLock := &sync.RWMutex{} pendingServiceNames := sets.NewString() for _, service := range apiServices { pendingServiceNames.Insert(service.Name) } // When an APIService in the list is seen as available, remove it from the pending list handleAPIServiceChange := func(service *apiregistration.APIService) { pendingServiceNamesLock.Lock() defer pendingServiceNamesLock.Unlock() if !pendingServiceNames.Has(service.Name) { return } if apiregistration.IsAPIServiceConditionTrue(service, apiregistration.Available) { pendingServiceNames.Delete(service.Name) } } // Watch add/update events for APIServices apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { handleAPIServiceChange(obj.(*apiregistration.APIService)) }, UpdateFunc: func(old, new interface{}) { handleAPIServiceChange(new.(*apiregistration.APIService)) }, }) // Don't return healthy until the pending list is empty return healthz.NamedCheck(name, func(r *http.Request) error { pendingServiceNamesLock.RLock() defer pendingServiceNamesLock.RUnlock() if pendingServiceNames.Len() > 0 { return fmt.Errorf("missing APIService: %v", pendingServiceNames.List()) } return nil }) } // priority defines group priority that is used in discovery. This controls // group position in the kubectl output. type priority struct { // group indicates the order of the group relative to other groups. group int32 // version indicates the relative order of the version inside of its group. version int32 } // The proper way to resolve this letting the aggregator know the desired group and version-within-group order of the underlying servers // is to refactor the genericapiserver.DelegationTarget to include a list of priorities based on which APIs were installed. // This requires the APIGroupInfo struct to evolve and include the concept of priorities and to avoid mistakes, the core storage map there needs to be updated. // That ripples out every bit as far as you'd expect, so for 1.7 we'll include the list here instead of being built up during storage. var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "", Version: "v1"}: {group: 18000, version: 1}, // extensions is above the rest for CLI compatibility, though the level of unqualified resource compatibility we // can reasonably expect seems questionable. {Group: "extensions", Version: "v1beta1"}: {group: 17900, version: 1}, // to my knowledge, nothing below here collides {Group: "apps", Version: "v1beta1"}: {group: 17800, version: 1}, {Group: "apps", Version: "v1beta2"}: {group: 17800, version: 9}, {Group: "apps", Version: "v1"}: {group: 17800, version: 15}, {Group: "events.k8s.io", Version: "v1beta1"}: {group: 17750, version: 5}, {Group: "authentication.k8s.io", Version: "v1"}: {group: 17700, version: 15}, {Group: "authentication.k8s.io", Version: "v1beta1"}: {group: 17700, version: 9}, {Group: "authorization.k8s.io", Version: "v1"}: {group: 17600, version: 15}, {Group: "authorization.k8s.io", Version: "v1beta1"}: {group: 17600, version: 9}, {Group: "autoscaling", Version: "v1"}: {group: 17500, version: 15}, {Group: "autoscaling", Version: "v2beta1"}: {group: 17500, version: 9}, {Group: "batch", Version: "v1"}: {group: 17400, version: 15}, {Group: "batch", Version: "v1beta1"}: {group: 17400, version: 9}, {Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9}, {Group: "certificates.k8s.io", Version: "v1beta1"}: {group: 17300, version: 9}, {Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15}, {Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9}, {Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15}, {Group: "rbac.authorization.k8s.io", Version: "v1beta1"}: {group: 17000, version: 12}, {Group: "rbac.authorization.k8s.io", Version: "v1alpha1"}: {group: 17000, version: 9}, {Group: "settings.k8s.io", Version: "v1alpha1"}: {group: 16900, version: 9}, {Group: "storage.k8s.io", Version: "v1"}: {group: 16800, version: 15}, {Group: "storage.k8s.io", Version: "v1beta1"}: {group: 16800, version: 9}, {Group: "storage.k8s.io", Version: "v1alpha1"}: {group: 16800, version: 1}, {Group: "apiextensions.k8s.io", Version: "v1beta1"}: {group: 16700, version: 9}, {Group: "admissionregistration.k8s.io", Version: "v1"}: {group: 16700, version: 15}, {Group: "admissionregistration.k8s.io", Version: "v1beta1"}: {group: 16700, version: 12}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1"}: {group: 16700, version: 9}, {Group: "scheduling.k8s.io", Version: "v1beta1"}: {group: 16600, version: 12}, {Group: "scheduling.k8s.io", Version: "v1alpha1"}: {group: 16600, version: 9}, {Group: "coordination.k8s.io", Version: "v1beta1"}: {group: 16500, version: 9}, // Append a new group to the end of the list if unsure. // You can use min(existing group)-100 as the initial value for a group. // Version can be set to 9 (to have space around) for a new group. } func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, registration autoregister.AutoAPIServiceRegistration) []*apiregistration.APIService { apiServices := []*apiregistration.APIService{} for _, curr := range delegateAPIServer.ListedPaths() { if curr == "/api/v1" { apiService := makeAPIService(schema.GroupVersion{Group: "", Version: "v1"}) registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) continue } if !strings.HasPrefix(curr, "/apis/") { continue } // this comes back in a list that looks like /apis/rbac.authorization.k8s.io/v1alpha1 tokens := strings.Split(curr, "/") if len(tokens) != 4 { continue } apiService := makeAPIService(schema.GroupVersion{Group: tokens[2], Version: tokens[3]}) if apiService == nil { continue } registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) } return apiServices }
cmd/kube-apiserver/app/aggregator.go
1
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.008824644610285759, 0.0006558863678947091, 0.00016039250476751477, 0.00016816843708511442, 0.0017736601876094937 ]
{ "id": 0, "code_window": [ " \"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/filters:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//staging/src/k8s.io/apiserver/pkg/features:go_default_library\",\n" ], "file_path": "cmd/kube-apiserver/app/BUILD", "type": "add", "edit_start_line_idx": 61 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1_test import ( "reflect" "testing" autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api/legacyscheme" _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" . "k8s.io/kubernetes/pkg/apis/autoscaling/v1" _ "k8s.io/kubernetes/pkg/apis/core/install" utilpointer "k8s.io/utils/pointer" ) func TestSetDefaultHPA(t *testing.T) { tests := []struct { hpa autoscalingv1.HorizontalPodAutoscaler expectReplicas int32 test string }{ { hpa: autoscalingv1.HorizontalPodAutoscaler{}, expectReplicas: 1, test: "unspecified min replicas, use the default value", }, { hpa: autoscalingv1.HorizontalPodAutoscaler{ Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ MinReplicas: utilpointer.Int32Ptr(3), }, }, expectReplicas: 3, test: "set min replicas to 3", }, } for _, test := range tests { hpa := &test.hpa obj2 := roundTrip(t, runtime.Object(hpa)) hpa2, ok := obj2.(*autoscalingv1.HorizontalPodAutoscaler) if !ok { t.Fatalf("unexpected object: %v", obj2) } if hpa2.Spec.MinReplicas == nil { t.Errorf("unexpected nil MinReplicas") } else if test.expectReplicas != *hpa2.Spec.MinReplicas { t.Errorf("expected: %d MinReplicas, got: %d", test.expectReplicas, *hpa2.Spec.MinReplicas) } } } func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { data, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(SchemeGroupVersion), obj) if err != nil { t.Errorf("%v\n %#v", err, obj) return nil } obj2, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data) if err != nil { t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) return nil } obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) err = legacyscheme.Scheme.Convert(obj2, obj3, nil) if err != nil { t.Errorf("%v\nSource: %#v", err, obj2) return nil } return obj3 }
pkg/apis/autoscaling/v1/defaults_test.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.000174365341081284, 0.00017042251420207322, 0.00016810688248369843, 0.0001694592065177858, 0.0000020618861071852734 ]
{ "id": 0, "code_window": [ " \"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/filters:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//staging/src/k8s.io/apiserver/pkg/features:go_default_library\",\n" ], "file_path": "cmd/kube-apiserver/app/BUILD", "type": "add", "edit_start_line_idx": 61 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package remote contains gRPC implementation of internalapi.RuntimeService // and internalapi.ImageManagerService. package remote
pkg/kubelet/remote/doc.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.000174365341081284, 0.0001741399464663118, 0.0001739145372994244, 0.0001741399464663118, 2.2540189092978835e-7 ]
{ "id": 0, "code_window": [ " \"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/filters:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library\",\n", " \"//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//staging/src/k8s.io/apiserver/pkg/features:go_default_library\",\n" ], "file_path": "cmd/kube-apiserver/app/BUILD", "type": "add", "edit_start_line_idx": 61 }
package client import ( "net/url" "github.com/docker/docker/api/types" "golang.org/x/net/context" ) // ContainerStats returns near realtime stats for a given container. // It's up to the caller to close the io.ReadCloser returned. func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { query := url.Values{} query.Set("stream", "0") if stream { query.Set("stream", "1") } resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) if err != nil { return types.ContainerStats{}, err } osType := getDockerOS(resp.header.Get("Server")) return types.ContainerStats{Body: resp.body, OSType: osType}, err }
vendor/github.com/docker/docker/client/container_stats.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00016834250709507614, 0.00016581961244810373, 0.00016360534937120974, 0.0001655109808780253, 0.0000019462111140455818 ]
{ "id": 1, "code_window": [ "\tapiextensionsinformers \"k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion\"\n", "\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n", "\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n", "\t\"k8s.io/apimachinery/pkg/util/sets\"\n", "\t\"k8s.io/apiserver/pkg/admission\"\n", "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\t\"k8s.io/apiserver/pkg/server/healthz\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\"k8s.io/apiserver/pkg/features\"\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 35 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package app does all of the work necessary to create a Kubernetes // APIServer by binding together the API, master and APIServer infrastructure. // It can be configured and called directly or via the hyperkube framework. package app import ( "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver" apiextensionsoptions "k8s.io/apiextensions-apiserver/pkg/cmd/server/options" "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" genericoptions "k8s.io/apiserver/pkg/server/options" kubeexternalinformers "k8s.io/client-go/informers" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" ) func createAPIExtensionsConfig( kubeAPIServerConfig genericapiserver.Config, externalInformers kubeexternalinformers.SharedInformerFactory, pluginInitializers []admission.PluginInitializer, commandOptions *options.ServerRunOptions, masterCount int, ) (*apiextensionsapiserver.Config, error) { // make a shallow copy to let us twiddle a few things // most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the apiextensions genericConfig := kubeAPIServerConfig // override genericConfig.AdmissionControl with apiextensions' scheme, // because apiextentions apiserver should use its own scheme to convert resources. commandOptions.Admission.ApplyTo( &genericConfig, externalInformers, genericConfig.LoopbackClientConfig, apiextensionsapiserver.Scheme, pluginInitializers...) // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion) genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} // override MergedResourceConfig with apiextensions defaults and registry if err := commandOptions.APIEnablement.ApplyTo( &genericConfig, apiextensionsapiserver.DefaultAPIResourceConfigSource(), apiextensionsapiserver.Scheme); err != nil { return nil, err } apiextensionsConfig := &apiextensionsapiserver.Config{ GenericConfig: &genericapiserver.RecommendedConfig{ Config: genericConfig, SharedInformerFactory: externalInformers, }, ExtraConfig: apiextensionsapiserver.ExtraConfig{ CRDRESTOptionsGetter: apiextensionsoptions.NewCRDRESTOptionsGetter(etcdOptions), MasterCount: masterCount, }, } return apiextensionsConfig, nil } func createAPIExtensionsServer(apiextensionsConfig *apiextensionsapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget) (*apiextensionsapiserver.CustomResourceDefinitions, error) { return apiextensionsConfig.Complete().New(delegateAPIServer) }
cmd/kube-apiserver/app/apiextensions.go
1
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.028677906841039658, 0.012121177278459072, 0.00017094137729145586, 0.014122319407761097, 0.009714199230074883 ]
{ "id": 1, "code_window": [ "\tapiextensionsinformers \"k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion\"\n", "\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n", "\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n", "\t\"k8s.io/apimachinery/pkg/util/sets\"\n", "\t\"k8s.io/apiserver/pkg/admission\"\n", "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\t\"k8s.io/apiserver/pkg/server/healthz\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\"k8s.io/apiserver/pkg/features\"\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 35 }
pkg/kubectl/cmd/testdata/edit/testcase-edit-error-reedit/0.request
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00016845876234583557, 0.00016845876234583557, 0.00016845876234583557, 0.00016845876234583557, 0 ]
{ "id": 1, "code_window": [ "\tapiextensionsinformers \"k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion\"\n", "\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n", "\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n", "\t\"k8s.io/apimachinery/pkg/util/sets\"\n", "\t\"k8s.io/apiserver/pkg/admission\"\n", "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\t\"k8s.io/apiserver/pkg/server/healthz\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\"k8s.io/apiserver/pkg/features\"\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 35 }
package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", ) go_library( name = "go_default_library", srcs = ["mock_cni.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/testing", deps = [ "//vendor/github.com/containernetworking/cni/libcni:go_default_library", "//vendor/github.com/containernetworking/cni/pkg/types:go_default_library", "//vendor/github.com/stretchr/testify/mock:go_default_library", ], ) filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], visibility = ["//visibility:private"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], )
pkg/kubelet/dockershim/network/cni/testing/BUILD
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00017343793297186494, 0.000170032843016088, 0.00016661638801451772, 0.00017003854736685753, 0.000002657881850609556 ]
{ "id": 1, "code_window": [ "\tapiextensionsinformers \"k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion\"\n", "\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n", "\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n", "\t\"k8s.io/apimachinery/pkg/util/sets\"\n", "\t\"k8s.io/apiserver/pkg/admission\"\n", "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\t\"k8s.io/apiserver/pkg/server/healthz\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\"k8s.io/apiserver/pkg/features\"\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 35 }
package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", "go_test", ) go_library( name = "go_default_library", srcs = ["chaosclient.go"], importpath = "k8s.io/kubernetes/pkg/client/chaosclient", deps = ["//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library"], ) go_test( name = "go_default_test", srcs = ["chaosclient_test.go"], embed = [":go_default_library"], ) filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], visibility = ["//visibility:private"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], )
pkg/client/chaosclient/BUILD
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.0002943441504612565, 0.00020223019237164408, 0.0001710860087769106, 0.00017174531240016222, 0.000053182746341917664 ]
{ "id": 2, "code_window": [ "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\t\"k8s.io/apiserver/pkg/server/healthz\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n", "\tkubeexternalinformers \"k8s.io/client-go/informers\"\n", "\t\"k8s.io/client-go/tools/cache\"\n", "\t\"k8s.io/kube-aggregator/pkg/apis/apiregistration\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 38 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package app does all of the work necessary to create a Kubernetes // APIServer by binding together the API, master and APIServer infrastructure. // It can be configured and called directly or via the hyperkube framework. package app import ( "fmt" "io/ioutil" "net/http" "strings" "sync" "github.com/golang/glog" apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" genericoptions "k8s.io/apiserver/pkg/server/options" kubeexternalinformers "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" "k8s.io/kube-aggregator/pkg/apis/apiregistration" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme" apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion" informers "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion" "k8s.io/kube-aggregator/pkg/controllers/autoregister" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/pkg/master/controller/crdregistration" ) func createAggregatorConfig( kubeAPIServerConfig genericapiserver.Config, commandOptions *options.ServerRunOptions, externalInformers kubeexternalinformers.SharedInformerFactory, serviceResolver aggregatorapiserver.ServiceResolver, proxyTransport *http.Transport, pluginInitializers []admission.PluginInitializer, ) (*aggregatorapiserver.Config, error) { // make a shallow copy to let us twiddle a few things // most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the aggregator genericConfig := kubeAPIServerConfig // override genericConfig.AdmissionControl with kube-aggregator's scheme, // because aggregator apiserver should use its own scheme to convert its own resources. commandOptions.Admission.ApplyTo( &genericConfig, externalInformers, genericConfig.LoopbackClientConfig, aggregatorscheme.Scheme, pluginInitializers...) // the aggregator doesn't wire these up. It just delegates them to the kubeapiserver genericConfig.EnableSwaggerUI = false genericConfig.SwaggerConfig = nil // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion) genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} // override MergedResourceConfig with aggregator defaults and registry if err := commandOptions.APIEnablement.ApplyTo( &genericConfig, aggregatorapiserver.DefaultAPIResourceConfigSource(), aggregatorscheme.Scheme); err != nil { return nil, err } var err error var certBytes, keyBytes []byte if len(commandOptions.ProxyClientCertFile) > 0 && len(commandOptions.ProxyClientKeyFile) > 0 { certBytes, err = ioutil.ReadFile(commandOptions.ProxyClientCertFile) if err != nil { return nil, err } keyBytes, err = ioutil.ReadFile(commandOptions.ProxyClientKeyFile) if err != nil { return nil, err } } aggregatorConfig := &aggregatorapiserver.Config{ GenericConfig: &genericapiserver.RecommendedConfig{ Config: genericConfig, SharedInformerFactory: externalInformers, }, ExtraConfig: aggregatorapiserver.ExtraConfig{ ProxyClientCert: certBytes, ProxyClientKey: keyBytes, ServiceResolver: serviceResolver, ProxyTransport: proxyTransport, }, } return aggregatorConfig, nil } func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) { aggregatorServer, err := aggregatorConfig.Complete().NewWithDelegate(delegateAPIServer) if err != nil { return nil, err } // create controllers for auto-registration apiRegistrationClient, err := apiregistrationclient.NewForConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig) if err != nil { return nil, err } autoRegistrationController := autoregister.NewAutoRegisterController(aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), apiRegistrationClient) apiServices := apiServicesToRegister(delegateAPIServer, autoRegistrationController) crdRegistrationController := crdregistration.NewAutoRegistrationController( apiExtensionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions(), autoRegistrationController) aggregatorServer.GenericAPIServer.AddPostStartHook("kube-apiserver-autoregistration", func(context genericapiserver.PostStartHookContext) error { go crdRegistrationController.Run(5, context.StopCh) go func() { // let the CRD controller process the initial set of CRDs before starting the autoregistration controller. // this prevents the autoregistration controller's initial sync from deleting APIServices for CRDs that still exist. // we only need to do this if CRDs are enabled on this server. We can't use discovery because we are the source for discovery. if aggregatorConfig.GenericConfig.MergedResourceConfig.AnyVersionForGroupEnabled("apiextensions.k8s.io") { crdRegistrationController.WaitForInitialSync() } autoRegistrationController.Run(5, context.StopCh) }() return nil }) aggregatorServer.GenericAPIServer.AddHealthzChecks( makeAPIServiceAvailableHealthzCheck( "autoregister-completion", apiServices, aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), ), ) return aggregatorServer, nil } func makeAPIService(gv schema.GroupVersion) *apiregistration.APIService { apiServicePriority, ok := apiVersionPriorities[gv] if !ok { // if we aren't found, then we shouldn't register ourselves because it could result in a CRD group version // being permanently stuck in the APIServices list. glog.Infof("Skipping APIService creation for %v", gv) return nil } return &apiregistration.APIService{ ObjectMeta: metav1.ObjectMeta{Name: gv.Version + "." + gv.Group}, Spec: apiregistration.APIServiceSpec{ Group: gv.Group, Version: gv.Version, GroupPriorityMinimum: apiServicePriority.group, VersionPriority: apiServicePriority.version, }, } } // makeAPIServiceAvailableHealthzCheck returns a healthz check that returns healthy // once all of the specified services have been observed to be available at least once. func makeAPIServiceAvailableHealthzCheck(name string, apiServices []*apiregistration.APIService, apiServiceInformer informers.APIServiceInformer) healthz.HealthzChecker { // Track the auto-registered API services that have not been observed to be available yet pendingServiceNamesLock := &sync.RWMutex{} pendingServiceNames := sets.NewString() for _, service := range apiServices { pendingServiceNames.Insert(service.Name) } // When an APIService in the list is seen as available, remove it from the pending list handleAPIServiceChange := func(service *apiregistration.APIService) { pendingServiceNamesLock.Lock() defer pendingServiceNamesLock.Unlock() if !pendingServiceNames.Has(service.Name) { return } if apiregistration.IsAPIServiceConditionTrue(service, apiregistration.Available) { pendingServiceNames.Delete(service.Name) } } // Watch add/update events for APIServices apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { handleAPIServiceChange(obj.(*apiregistration.APIService)) }, UpdateFunc: func(old, new interface{}) { handleAPIServiceChange(new.(*apiregistration.APIService)) }, }) // Don't return healthy until the pending list is empty return healthz.NamedCheck(name, func(r *http.Request) error { pendingServiceNamesLock.RLock() defer pendingServiceNamesLock.RUnlock() if pendingServiceNames.Len() > 0 { return fmt.Errorf("missing APIService: %v", pendingServiceNames.List()) } return nil }) } // priority defines group priority that is used in discovery. This controls // group position in the kubectl output. type priority struct { // group indicates the order of the group relative to other groups. group int32 // version indicates the relative order of the version inside of its group. version int32 } // The proper way to resolve this letting the aggregator know the desired group and version-within-group order of the underlying servers // is to refactor the genericapiserver.DelegationTarget to include a list of priorities based on which APIs were installed. // This requires the APIGroupInfo struct to evolve and include the concept of priorities and to avoid mistakes, the core storage map there needs to be updated. // That ripples out every bit as far as you'd expect, so for 1.7 we'll include the list here instead of being built up during storage. var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "", Version: "v1"}: {group: 18000, version: 1}, // extensions is above the rest for CLI compatibility, though the level of unqualified resource compatibility we // can reasonably expect seems questionable. {Group: "extensions", Version: "v1beta1"}: {group: 17900, version: 1}, // to my knowledge, nothing below here collides {Group: "apps", Version: "v1beta1"}: {group: 17800, version: 1}, {Group: "apps", Version: "v1beta2"}: {group: 17800, version: 9}, {Group: "apps", Version: "v1"}: {group: 17800, version: 15}, {Group: "events.k8s.io", Version: "v1beta1"}: {group: 17750, version: 5}, {Group: "authentication.k8s.io", Version: "v1"}: {group: 17700, version: 15}, {Group: "authentication.k8s.io", Version: "v1beta1"}: {group: 17700, version: 9}, {Group: "authorization.k8s.io", Version: "v1"}: {group: 17600, version: 15}, {Group: "authorization.k8s.io", Version: "v1beta1"}: {group: 17600, version: 9}, {Group: "autoscaling", Version: "v1"}: {group: 17500, version: 15}, {Group: "autoscaling", Version: "v2beta1"}: {group: 17500, version: 9}, {Group: "batch", Version: "v1"}: {group: 17400, version: 15}, {Group: "batch", Version: "v1beta1"}: {group: 17400, version: 9}, {Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9}, {Group: "certificates.k8s.io", Version: "v1beta1"}: {group: 17300, version: 9}, {Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15}, {Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9}, {Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15}, {Group: "rbac.authorization.k8s.io", Version: "v1beta1"}: {group: 17000, version: 12}, {Group: "rbac.authorization.k8s.io", Version: "v1alpha1"}: {group: 17000, version: 9}, {Group: "settings.k8s.io", Version: "v1alpha1"}: {group: 16900, version: 9}, {Group: "storage.k8s.io", Version: "v1"}: {group: 16800, version: 15}, {Group: "storage.k8s.io", Version: "v1beta1"}: {group: 16800, version: 9}, {Group: "storage.k8s.io", Version: "v1alpha1"}: {group: 16800, version: 1}, {Group: "apiextensions.k8s.io", Version: "v1beta1"}: {group: 16700, version: 9}, {Group: "admissionregistration.k8s.io", Version: "v1"}: {group: 16700, version: 15}, {Group: "admissionregistration.k8s.io", Version: "v1beta1"}: {group: 16700, version: 12}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1"}: {group: 16700, version: 9}, {Group: "scheduling.k8s.io", Version: "v1beta1"}: {group: 16600, version: 12}, {Group: "scheduling.k8s.io", Version: "v1alpha1"}: {group: 16600, version: 9}, {Group: "coordination.k8s.io", Version: "v1beta1"}: {group: 16500, version: 9}, // Append a new group to the end of the list if unsure. // You can use min(existing group)-100 as the initial value for a group. // Version can be set to 9 (to have space around) for a new group. } func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, registration autoregister.AutoAPIServiceRegistration) []*apiregistration.APIService { apiServices := []*apiregistration.APIService{} for _, curr := range delegateAPIServer.ListedPaths() { if curr == "/api/v1" { apiService := makeAPIService(schema.GroupVersion{Group: "", Version: "v1"}) registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) continue } if !strings.HasPrefix(curr, "/apis/") { continue } // this comes back in a list that looks like /apis/rbac.authorization.k8s.io/v1alpha1 tokens := strings.Split(curr, "/") if len(tokens) != 4 { continue } apiService := makeAPIService(schema.GroupVersion{Group: tokens[2], Version: tokens[3]}) if apiService == nil { continue } registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) } return apiServices }
cmd/kube-apiserver/app/aggregator.go
1
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.20593616366386414, 0.008397995494306087, 0.00016383733600378036, 0.0002145495091099292, 0.036249224096536636 ]
{ "id": 2, "code_window": [ "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\t\"k8s.io/apiserver/pkg/server/healthz\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n", "\tkubeexternalinformers \"k8s.io/client-go/informers\"\n", "\t\"k8s.io/client-go/tools/cache\"\n", "\t\"k8s.io/kube-aggregator/pkg/apis/apiregistration\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 38 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // core contains modules that interface with the core api group package core // import "k8s.io/kubernetes/pkg/quota/evaluator/core"
pkg/quota/evaluator/core/doc.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00017573234799783677, 0.00017110799672082067, 0.00016648363089188933, 0.00017110799672082067, 0.0000046243585529737175 ]
{ "id": 2, "code_window": [ "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\t\"k8s.io/apiserver/pkg/server/healthz\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n", "\tkubeexternalinformers \"k8s.io/client-go/informers\"\n", "\t\"k8s.io/client-go/tools/cache\"\n", "\t\"k8s.io/kube-aggregator/pkg/apis/apiregistration\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 38 }
package tokens import "github.com/gophercloud/gophercloud" // CreateURL generates the URL used to create new Tokens. func CreateURL(client *gophercloud.ServiceClient) string { return client.ServiceURL("tokens") } // GetURL generates the URL used to Validate Tokens. func GetURL(client *gophercloud.ServiceClient, token string) string { return client.ServiceURL("tokens", token) }
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00017005682457238436, 0.00016839636373333633, 0.0001667359028942883, 0.00016839636373333633, 0.000001660460839048028 ]
{ "id": 2, "code_window": [ "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\t\"k8s.io/apiserver/pkg/server/healthz\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n", "\tkubeexternalinformers \"k8s.io/client-go/informers\"\n", "\t\"k8s.io/client-go/tools/cache\"\n", "\t\"k8s.io/kube-aggregator/pkg/apis/apiregistration\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 38 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package batch import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Job represents the configuration of a single job. type Job struct { metav1.TypeMeta // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta // Specification of the desired behavior of a job. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec JobSpec // Current status of a job. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status JobStatus } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // JobList is a collection of jobs. type JobList struct { metav1.TypeMeta // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta // items is the list of Jobs. Items []Job } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // JobTemplate describes a template for creating copies of a predefined pod. type JobTemplate struct { metav1.TypeMeta // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta // Defines jobs that will be created from this template. // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec } // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec JobSpec } // JobSpec describes how the job execution will look like. type JobSpec struct { // Specifies the maximum desired number of pods the job should // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. // +optional Parallelism *int32 // Specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. // +optional Completions *int32 // Optional duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer // +optional ActiveDeadlineSeconds *int64 // Optional number of retries before marking this job failed. // Defaults to 6 // +optional BackoffLimit *int32 // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed // Optional number of failed pods to retain. // +optional // FailedPodsLimit *int32 // A label query over pods that should match the pod count. // Normally, the system sets this field for you. // +optional Selector *metav1.LabelSelector // manualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. // When false or unset, the system pick labels unique to this job // and appends those labels to the pod template. When true, // the user is responsible for picking unique labels and specifying // the selector. Failure to pick a unique label may cause this // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. // +optional ManualSelector *bool // Describes the pod that will be created when executing a job. Template api.PodTemplateSpec } // JobStatus represents the current state of a Job. type JobStatus struct { // The latest available observations of an object's current state. // +optional Conditions []JobCondition // Represents time when the job was acknowledged by the job controller. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional StartTime *metav1.Time // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. // +optional CompletionTime *metav1.Time // The number of actively running pods. // +optional Active int32 // The number of pods which reached phase Succeeded. // +optional Succeeded int32 // The number of pods which reached phase Failed. // +optional Failed int32 } type JobConditionType string // These are valid conditions of a job. const ( // JobComplete means the job has completed its execution. JobComplete JobConditionType = "Complete" // JobFailed means the job has failed its execution. JobFailed JobConditionType = "Failed" ) // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. Type JobConditionType // Status of the condition, one of True, False, Unknown. Status api.ConditionStatus // Last time the condition was checked. // +optional LastProbeTime metav1.Time // Last time the condition transit from one status to another. // +optional LastTransitionTime metav1.Time // (brief) reason for the condition's last transition. // +optional Reason string // Human readable message indicating details about last transition. // +optional Message string } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CronJob represents the configuration of a single cron job. type CronJob struct { metav1.TypeMeta // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec CronJobSpec // Current status of a cron job. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status CronJobStatus } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CronJobList is a collection of cron jobs. type CronJobList struct { metav1.TypeMeta // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta // items is the list of CronJobs. Items []CronJob } // CronJobSpec describes how the job execution will look like and when it will actually run. type CronJobSpec struct { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule string // Optional deadline in seconds for starting the job if it misses scheduled // time for any reason. Missed jobs executions will be counted as failed ones. // +optional StartingDeadlineSeconds *int64 // Specifies how to treat concurrent executions of a Job. // Valid values are: // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy // This flag tells the controller to suspend subsequent executions, it does // not apply to already started executions. Defaults to false. // +optional Suspend *bool // Specifies the job that will be created when executing a CronJob. JobTemplate JobTemplateSpec // The number of successful finished jobs to retain. // This is a pointer to distinguish between explicit zero and not specified. // +optional SuccessfulJobsHistoryLimit *int32 // The number of failed finished jobs to retain. // This is a pointer to distinguish between explicit zero and not specified. // +optional FailedJobsHistoryLimit *int32 } // ConcurrencyPolicy describes how the job will be handled. // Only one of the following concurrent policies may be specified. // If none of the following policies is specified, the default one // is AllowConcurrent. type ConcurrencyPolicy string const ( // AllowConcurrent allows CronJobs to run concurrently. AllowConcurrent ConcurrencyPolicy = "Allow" // ForbidConcurrent forbids concurrent runs, skipping next run if previous // hasn't finished yet. ForbidConcurrent ConcurrencyPolicy = "Forbid" // ReplaceConcurrent cancels currently running job and replaces it with a new one. ReplaceConcurrent ConcurrencyPolicy = "Replace" ) // CronJobStatus represents the current state of a cron job. type CronJobStatus struct { // A list of pointers to currently running jobs. // +optional Active []api.ObjectReference // Information when was the last time the job was successfully scheduled. // +optional LastScheduleTime *metav1.Time }
pkg/apis/batch/types.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.000676326104439795, 0.00021866668248549104, 0.0001645900192670524, 0.00017055000353138894, 0.0001254960079677403 ]
{ "id": 3, "code_window": [ "\tgenericConfig.SwaggerConfig = nil\n", "\n", "\t// copy the etcd options so we don't mutate originals.\n", "\tetcdOptions := *commandOptions.Etcd\n", "\tetcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion)\n", "\tgenericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}\n", "\n", "\t// override MergedResourceConfig with aggregator defaults and registry\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tetcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 79 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package app does all of the work necessary to create a Kubernetes // APIServer by binding together the API, master and APIServer infrastructure. // It can be configured and called directly or via the hyperkube framework. package app import ( "fmt" "io/ioutil" "net/http" "strings" "sync" "github.com/golang/glog" apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" genericoptions "k8s.io/apiserver/pkg/server/options" kubeexternalinformers "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" "k8s.io/kube-aggregator/pkg/apis/apiregistration" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme" apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion" informers "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion" "k8s.io/kube-aggregator/pkg/controllers/autoregister" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/pkg/master/controller/crdregistration" ) func createAggregatorConfig( kubeAPIServerConfig genericapiserver.Config, commandOptions *options.ServerRunOptions, externalInformers kubeexternalinformers.SharedInformerFactory, serviceResolver aggregatorapiserver.ServiceResolver, proxyTransport *http.Transport, pluginInitializers []admission.PluginInitializer, ) (*aggregatorapiserver.Config, error) { // make a shallow copy to let us twiddle a few things // most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the aggregator genericConfig := kubeAPIServerConfig // override genericConfig.AdmissionControl with kube-aggregator's scheme, // because aggregator apiserver should use its own scheme to convert its own resources. commandOptions.Admission.ApplyTo( &genericConfig, externalInformers, genericConfig.LoopbackClientConfig, aggregatorscheme.Scheme, pluginInitializers...) // the aggregator doesn't wire these up. It just delegates them to the kubeapiserver genericConfig.EnableSwaggerUI = false genericConfig.SwaggerConfig = nil // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion) genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} // override MergedResourceConfig with aggregator defaults and registry if err := commandOptions.APIEnablement.ApplyTo( &genericConfig, aggregatorapiserver.DefaultAPIResourceConfigSource(), aggregatorscheme.Scheme); err != nil { return nil, err } var err error var certBytes, keyBytes []byte if len(commandOptions.ProxyClientCertFile) > 0 && len(commandOptions.ProxyClientKeyFile) > 0 { certBytes, err = ioutil.ReadFile(commandOptions.ProxyClientCertFile) if err != nil { return nil, err } keyBytes, err = ioutil.ReadFile(commandOptions.ProxyClientKeyFile) if err != nil { return nil, err } } aggregatorConfig := &aggregatorapiserver.Config{ GenericConfig: &genericapiserver.RecommendedConfig{ Config: genericConfig, SharedInformerFactory: externalInformers, }, ExtraConfig: aggregatorapiserver.ExtraConfig{ ProxyClientCert: certBytes, ProxyClientKey: keyBytes, ServiceResolver: serviceResolver, ProxyTransport: proxyTransport, }, } return aggregatorConfig, nil } func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) { aggregatorServer, err := aggregatorConfig.Complete().NewWithDelegate(delegateAPIServer) if err != nil { return nil, err } // create controllers for auto-registration apiRegistrationClient, err := apiregistrationclient.NewForConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig) if err != nil { return nil, err } autoRegistrationController := autoregister.NewAutoRegisterController(aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), apiRegistrationClient) apiServices := apiServicesToRegister(delegateAPIServer, autoRegistrationController) crdRegistrationController := crdregistration.NewAutoRegistrationController( apiExtensionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions(), autoRegistrationController) aggregatorServer.GenericAPIServer.AddPostStartHook("kube-apiserver-autoregistration", func(context genericapiserver.PostStartHookContext) error { go crdRegistrationController.Run(5, context.StopCh) go func() { // let the CRD controller process the initial set of CRDs before starting the autoregistration controller. // this prevents the autoregistration controller's initial sync from deleting APIServices for CRDs that still exist. // we only need to do this if CRDs are enabled on this server. We can't use discovery because we are the source for discovery. if aggregatorConfig.GenericConfig.MergedResourceConfig.AnyVersionForGroupEnabled("apiextensions.k8s.io") { crdRegistrationController.WaitForInitialSync() } autoRegistrationController.Run(5, context.StopCh) }() return nil }) aggregatorServer.GenericAPIServer.AddHealthzChecks( makeAPIServiceAvailableHealthzCheck( "autoregister-completion", apiServices, aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), ), ) return aggregatorServer, nil } func makeAPIService(gv schema.GroupVersion) *apiregistration.APIService { apiServicePriority, ok := apiVersionPriorities[gv] if !ok { // if we aren't found, then we shouldn't register ourselves because it could result in a CRD group version // being permanently stuck in the APIServices list. glog.Infof("Skipping APIService creation for %v", gv) return nil } return &apiregistration.APIService{ ObjectMeta: metav1.ObjectMeta{Name: gv.Version + "." + gv.Group}, Spec: apiregistration.APIServiceSpec{ Group: gv.Group, Version: gv.Version, GroupPriorityMinimum: apiServicePriority.group, VersionPriority: apiServicePriority.version, }, } } // makeAPIServiceAvailableHealthzCheck returns a healthz check that returns healthy // once all of the specified services have been observed to be available at least once. func makeAPIServiceAvailableHealthzCheck(name string, apiServices []*apiregistration.APIService, apiServiceInformer informers.APIServiceInformer) healthz.HealthzChecker { // Track the auto-registered API services that have not been observed to be available yet pendingServiceNamesLock := &sync.RWMutex{} pendingServiceNames := sets.NewString() for _, service := range apiServices { pendingServiceNames.Insert(service.Name) } // When an APIService in the list is seen as available, remove it from the pending list handleAPIServiceChange := func(service *apiregistration.APIService) { pendingServiceNamesLock.Lock() defer pendingServiceNamesLock.Unlock() if !pendingServiceNames.Has(service.Name) { return } if apiregistration.IsAPIServiceConditionTrue(service, apiregistration.Available) { pendingServiceNames.Delete(service.Name) } } // Watch add/update events for APIServices apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { handleAPIServiceChange(obj.(*apiregistration.APIService)) }, UpdateFunc: func(old, new interface{}) { handleAPIServiceChange(new.(*apiregistration.APIService)) }, }) // Don't return healthy until the pending list is empty return healthz.NamedCheck(name, func(r *http.Request) error { pendingServiceNamesLock.RLock() defer pendingServiceNamesLock.RUnlock() if pendingServiceNames.Len() > 0 { return fmt.Errorf("missing APIService: %v", pendingServiceNames.List()) } return nil }) } // priority defines group priority that is used in discovery. This controls // group position in the kubectl output. type priority struct { // group indicates the order of the group relative to other groups. group int32 // version indicates the relative order of the version inside of its group. version int32 } // The proper way to resolve this letting the aggregator know the desired group and version-within-group order of the underlying servers // is to refactor the genericapiserver.DelegationTarget to include a list of priorities based on which APIs were installed. // This requires the APIGroupInfo struct to evolve and include the concept of priorities and to avoid mistakes, the core storage map there needs to be updated. // That ripples out every bit as far as you'd expect, so for 1.7 we'll include the list here instead of being built up during storage. var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "", Version: "v1"}: {group: 18000, version: 1}, // extensions is above the rest for CLI compatibility, though the level of unqualified resource compatibility we // can reasonably expect seems questionable. {Group: "extensions", Version: "v1beta1"}: {group: 17900, version: 1}, // to my knowledge, nothing below here collides {Group: "apps", Version: "v1beta1"}: {group: 17800, version: 1}, {Group: "apps", Version: "v1beta2"}: {group: 17800, version: 9}, {Group: "apps", Version: "v1"}: {group: 17800, version: 15}, {Group: "events.k8s.io", Version: "v1beta1"}: {group: 17750, version: 5}, {Group: "authentication.k8s.io", Version: "v1"}: {group: 17700, version: 15}, {Group: "authentication.k8s.io", Version: "v1beta1"}: {group: 17700, version: 9}, {Group: "authorization.k8s.io", Version: "v1"}: {group: 17600, version: 15}, {Group: "authorization.k8s.io", Version: "v1beta1"}: {group: 17600, version: 9}, {Group: "autoscaling", Version: "v1"}: {group: 17500, version: 15}, {Group: "autoscaling", Version: "v2beta1"}: {group: 17500, version: 9}, {Group: "batch", Version: "v1"}: {group: 17400, version: 15}, {Group: "batch", Version: "v1beta1"}: {group: 17400, version: 9}, {Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9}, {Group: "certificates.k8s.io", Version: "v1beta1"}: {group: 17300, version: 9}, {Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15}, {Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9}, {Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15}, {Group: "rbac.authorization.k8s.io", Version: "v1beta1"}: {group: 17000, version: 12}, {Group: "rbac.authorization.k8s.io", Version: "v1alpha1"}: {group: 17000, version: 9}, {Group: "settings.k8s.io", Version: "v1alpha1"}: {group: 16900, version: 9}, {Group: "storage.k8s.io", Version: "v1"}: {group: 16800, version: 15}, {Group: "storage.k8s.io", Version: "v1beta1"}: {group: 16800, version: 9}, {Group: "storage.k8s.io", Version: "v1alpha1"}: {group: 16800, version: 1}, {Group: "apiextensions.k8s.io", Version: "v1beta1"}: {group: 16700, version: 9}, {Group: "admissionregistration.k8s.io", Version: "v1"}: {group: 16700, version: 15}, {Group: "admissionregistration.k8s.io", Version: "v1beta1"}: {group: 16700, version: 12}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1"}: {group: 16700, version: 9}, {Group: "scheduling.k8s.io", Version: "v1beta1"}: {group: 16600, version: 12}, {Group: "scheduling.k8s.io", Version: "v1alpha1"}: {group: 16600, version: 9}, {Group: "coordination.k8s.io", Version: "v1beta1"}: {group: 16500, version: 9}, // Append a new group to the end of the list if unsure. // You can use min(existing group)-100 as the initial value for a group. // Version can be set to 9 (to have space around) for a new group. } func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, registration autoregister.AutoAPIServiceRegistration) []*apiregistration.APIService { apiServices := []*apiregistration.APIService{} for _, curr := range delegateAPIServer.ListedPaths() { if curr == "/api/v1" { apiService := makeAPIService(schema.GroupVersion{Group: "", Version: "v1"}) registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) continue } if !strings.HasPrefix(curr, "/apis/") { continue } // this comes back in a list that looks like /apis/rbac.authorization.k8s.io/v1alpha1 tokens := strings.Split(curr, "/") if len(tokens) != 4 { continue } apiService := makeAPIService(schema.GroupVersion{Group: tokens[2], Version: tokens[3]}) if apiService == nil { continue } registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) } return apiServices }
cmd/kube-apiserver/app/aggregator.go
1
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.998471200466156, 0.046990975737571716, 0.00016489397967234254, 0.00017429303261451423, 0.1907375156879425 ]
{ "id": 3, "code_window": [ "\tgenericConfig.SwaggerConfig = nil\n", "\n", "\t// copy the etcd options so we don't mutate originals.\n", "\tetcdOptions := *commandOptions.Etcd\n", "\tetcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion)\n", "\tgenericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}\n", "\n", "\t// override MergedResourceConfig with aggregator defaults and registry\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tetcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 79 }
# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. FROM BASEIMAGE USER 1001
test/images/mounttest-user/Dockerfile
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.0001784650085028261, 0.00017766086966730654, 0.00017685674538370222, 0.00017766086966730654, 8.04131559561938e-7 ]
{ "id": 3, "code_window": [ "\tgenericConfig.SwaggerConfig = nil\n", "\n", "\t// copy the etcd options so we don't mutate originals.\n", "\tetcdOptions := *commandOptions.Etcd\n", "\tetcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion)\n", "\tgenericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}\n", "\n", "\t// override MergedResourceConfig with aggregator defaults and registry\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tetcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 79 }
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by lister-gen. DO NOT EDIT. package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" rbac "k8s.io/kubernetes/pkg/apis/rbac" ) // ClusterRoleLister helps list ClusterRoles. type ClusterRoleLister interface { // List lists all ClusterRoles in the indexer. List(selector labels.Selector) (ret []*rbac.ClusterRole, err error) // Get retrieves the ClusterRole from the index for a given name. Get(name string) (*rbac.ClusterRole, error) ClusterRoleListerExpansion } // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { indexer cache.Indexer } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { return &clusterRoleLister{indexer: indexer} } // List lists all ClusterRoles in the indexer. func (s *clusterRoleLister) List(selector labels.Selector) (ret []*rbac.ClusterRole, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { ret = append(ret, m.(*rbac.ClusterRole)) }) return ret, err } // Get retrieves the ClusterRole from the index for a given name. func (s *clusterRoleLister) Get(name string) (*rbac.ClusterRole, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(rbac.Resource("clusterrole"), name) } return obj.(*rbac.ClusterRole), nil }
pkg/client/listers/rbac/internalversion/clusterrole.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.0001793069241102785, 0.00017062628467101604, 0.00016354078252334148, 0.00016832903202157468, 0.000005724696620745817 ]
{ "id": 3, "code_window": [ "\tgenericConfig.SwaggerConfig = nil\n", "\n", "\t// copy the etcd options so we don't mutate originals.\n", "\tetcdOptions := *commandOptions.Etcd\n", "\tetcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion)\n", "\tgenericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}\n", "\n", "\t// override MergedResourceConfig with aggregator defaults and registry\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tetcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)\n" ], "file_path": "cmd/kube-apiserver/app/aggregator.go", "type": "add", "edit_start_line_idx": 79 }
kind: ReplicationController apiVersion: v1 metadata: name: etcd labels: etcd: "true" spec: replicas: 1 selector: etcd: "true" template: metadata: labels: etcd: "true" spec: containers: - name: etcd image: quay.io/coreos/etcd:v3.0.15 command: - "etcd" - "--listen-client-urls=https://0.0.0.0:4001" - "--advertise-client-urls=https://etcd.kube-public.svc:4001" - "--trusted-ca-file=/var/run/serving-ca/ca.crt" - "--cert-file=/var/run/serving-cert/tls.crt" - "--key-file=/var/run/serving-cert/tls.key" - "--client-cert-auth=true" - "--listen-peer-urls=https://0.0.0.0:7001" - "--initial-advertise-peer-urls=https://etcd.kube-public.svc:7001" - "--peer-trusted-ca-file=/var/run/serving-ca/ca.crt" - "--peer-cert-file=/var/run/serving-cert/tls.crt" - "--peer-key-file=/var/run/serving-cert/tls.key" - "--peer-client-cert-auth=true" - "--initial-cluster=default=https://etcd.kube-public.svc:7001" ports: - containerPort: 4001 volumeMounts: - mountPath: /var/run/serving-cert name: volume-serving-cert - mountPath: /var/run/serving-ca name: volume-etcd-ca volumes: - secret: defaultMode: 420 secretName: serving-etcd name: volume-serving-cert - configMap: defaultMode: 420 name: etcd-ca name: volume-etcd-ca
staging/src/k8s.io/kube-aggregator/artifacts/self-contained/etcd-pod.yaml
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00022277738025877625, 0.00017969733744394034, 0.0001675346284173429, 0.00017207411292474717, 0.000019384186089155264 ]
{ "id": 4, "code_window": [ "\t\"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1\"\n", "\tapiextensionsapiserver \"k8s.io/apiextensions-apiserver/pkg/apiserver\"\n", "\tapiextensionsoptions \"k8s.io/apiextensions-apiserver/pkg/cmd/server/options\"\n", "\t\"k8s.io/apiserver/pkg/admission\"\n", "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep" ], "after_edit": [ "\t\"k8s.io/apiserver/pkg/features\"\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 26 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package app does all of the work necessary to create a Kubernetes // APIServer by binding together the API, master and APIServer infrastructure. // It can be configured and called directly or via the hyperkube framework. package app import ( "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver" apiextensionsoptions "k8s.io/apiextensions-apiserver/pkg/cmd/server/options" "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" genericoptions "k8s.io/apiserver/pkg/server/options" kubeexternalinformers "k8s.io/client-go/informers" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" ) func createAPIExtensionsConfig( kubeAPIServerConfig genericapiserver.Config, externalInformers kubeexternalinformers.SharedInformerFactory, pluginInitializers []admission.PluginInitializer, commandOptions *options.ServerRunOptions, masterCount int, ) (*apiextensionsapiserver.Config, error) { // make a shallow copy to let us twiddle a few things // most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the apiextensions genericConfig := kubeAPIServerConfig // override genericConfig.AdmissionControl with apiextensions' scheme, // because apiextentions apiserver should use its own scheme to convert resources. commandOptions.Admission.ApplyTo( &genericConfig, externalInformers, genericConfig.LoopbackClientConfig, apiextensionsapiserver.Scheme, pluginInitializers...) // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion) genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} // override MergedResourceConfig with apiextensions defaults and registry if err := commandOptions.APIEnablement.ApplyTo( &genericConfig, apiextensionsapiserver.DefaultAPIResourceConfigSource(), apiextensionsapiserver.Scheme); err != nil { return nil, err } apiextensionsConfig := &apiextensionsapiserver.Config{ GenericConfig: &genericapiserver.RecommendedConfig{ Config: genericConfig, SharedInformerFactory: externalInformers, }, ExtraConfig: apiextensionsapiserver.ExtraConfig{ CRDRESTOptionsGetter: apiextensionsoptions.NewCRDRESTOptionsGetter(etcdOptions), MasterCount: masterCount, }, } return apiextensionsConfig, nil } func createAPIExtensionsServer(apiextensionsConfig *apiextensionsapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget) (*apiextensionsapiserver.CustomResourceDefinitions, error) { return apiextensionsConfig.Complete().New(delegateAPIServer) }
cmd/kube-apiserver/app/apiextensions.go
1
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.1809377372264862, 0.028799057006835938, 0.0001688175107119605, 0.007987563498318195, 0.05464847758412361 ]
{ "id": 4, "code_window": [ "\t\"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1\"\n", "\tapiextensionsapiserver \"k8s.io/apiextensions-apiserver/pkg/apiserver\"\n", "\tapiextensionsoptions \"k8s.io/apiextensions-apiserver/pkg/cmd/server/options\"\n", "\t\"k8s.io/apiserver/pkg/admission\"\n", "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep" ], "after_edit": [ "\t\"k8s.io/apiserver/pkg/features\"\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 26 }
package netlink import ( "errors" ) var ( // ErrAttrHeaderTruncated is returned when a netlink attribute's header is // truncated. ErrAttrHeaderTruncated = errors.New("attribute header truncated") // ErrAttrBodyTruncated is returned when a netlink attribute's body is // truncated. ErrAttrBodyTruncated = errors.New("attribute body truncated") ) type Fou struct { Family int Port int Protocol int EncapType int }
vendor/github.com/vishvananda/netlink/fou.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00026404645177535713, 0.00020201881125103682, 0.00016998403589241207, 0.0001720259606372565, 0.00004386808359413408 ]
{ "id": 4, "code_window": [ "\t\"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1\"\n", "\tapiextensionsapiserver \"k8s.io/apiextensions-apiserver/pkg/apiserver\"\n", "\tapiextensionsoptions \"k8s.io/apiextensions-apiserver/pkg/cmd/server/options\"\n", "\t\"k8s.io/apiserver/pkg/admission\"\n", "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep" ], "after_edit": [ "\t\"k8s.io/apiserver/pkg/features\"\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 26 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package anonymous import ( "testing" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" ) func TestAnonymous(t *testing.T) { var a authenticator.Request = NewAuthenticator() u, ok, err := a.AuthenticateRequest(nil) if err != nil { t.Fatalf("Unexpected error %v", err) } if !ok { t.Fatalf("Unexpectedly unauthenticated") } if u.GetName() != user.Anonymous { t.Fatalf("Expected username %s, got %s", user.Anonymous, u.GetName()) } if !sets.NewString(u.GetGroups()...).Equal(sets.NewString(user.AllUnauthenticated)) { t.Fatalf("Expected group %s, got %v", user.AllUnauthenticated, u.GetGroups()) } }
staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous_test.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.0017953661736100912, 0.0004990017041563988, 0.00017206498887389898, 0.0001770475646480918, 0.0006481852033175528 ]
{ "id": 4, "code_window": [ "\t\"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1\"\n", "\tapiextensionsapiserver \"k8s.io/apiextensions-apiserver/pkg/apiserver\"\n", "\tapiextensionsoptions \"k8s.io/apiextensions-apiserver/pkg/cmd/server/options\"\n", "\t\"k8s.io/apiserver/pkg/admission\"\n", "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep" ], "after_edit": [ "\t\"k8s.io/apiserver/pkg/features\"\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 26 }
approvers: - erictune - liggitt - deads2k - mikedanese reviewers: - erictune - liggitt - deads2k - ericchiang - enj - mikedanese
plugin/pkg/auth/OWNERS
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.0001742134481901303, 0.00017223726899828762, 0.00017026108980644494, 0.00017223726899828762, 0.000001976179191842675 ]
{ "id": 5, "code_window": [ "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n", "\tkubeexternalinformers \"k8s.io/client-go/informers\"\n", "\t\"k8s.io/kubernetes/cmd/kube-apiserver/app/options\"\n", ")\n", "\n" ], "labels": [ "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 28 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package app does all of the work necessary to create a Kubernetes // APIServer by binding together the API, master and APIServer infrastructure. // It can be configured and called directly or via the hyperkube framework. package app import ( "fmt" "io/ioutil" "net/http" "strings" "sync" "github.com/golang/glog" apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" genericoptions "k8s.io/apiserver/pkg/server/options" kubeexternalinformers "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" "k8s.io/kube-aggregator/pkg/apis/apiregistration" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme" apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion" informers "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion" "k8s.io/kube-aggregator/pkg/controllers/autoregister" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/pkg/master/controller/crdregistration" ) func createAggregatorConfig( kubeAPIServerConfig genericapiserver.Config, commandOptions *options.ServerRunOptions, externalInformers kubeexternalinformers.SharedInformerFactory, serviceResolver aggregatorapiserver.ServiceResolver, proxyTransport *http.Transport, pluginInitializers []admission.PluginInitializer, ) (*aggregatorapiserver.Config, error) { // make a shallow copy to let us twiddle a few things // most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the aggregator genericConfig := kubeAPIServerConfig // override genericConfig.AdmissionControl with kube-aggregator's scheme, // because aggregator apiserver should use its own scheme to convert its own resources. commandOptions.Admission.ApplyTo( &genericConfig, externalInformers, genericConfig.LoopbackClientConfig, aggregatorscheme.Scheme, pluginInitializers...) // the aggregator doesn't wire these up. It just delegates them to the kubeapiserver genericConfig.EnableSwaggerUI = false genericConfig.SwaggerConfig = nil // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion) genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} // override MergedResourceConfig with aggregator defaults and registry if err := commandOptions.APIEnablement.ApplyTo( &genericConfig, aggregatorapiserver.DefaultAPIResourceConfigSource(), aggregatorscheme.Scheme); err != nil { return nil, err } var err error var certBytes, keyBytes []byte if len(commandOptions.ProxyClientCertFile) > 0 && len(commandOptions.ProxyClientKeyFile) > 0 { certBytes, err = ioutil.ReadFile(commandOptions.ProxyClientCertFile) if err != nil { return nil, err } keyBytes, err = ioutil.ReadFile(commandOptions.ProxyClientKeyFile) if err != nil { return nil, err } } aggregatorConfig := &aggregatorapiserver.Config{ GenericConfig: &genericapiserver.RecommendedConfig{ Config: genericConfig, SharedInformerFactory: externalInformers, }, ExtraConfig: aggregatorapiserver.ExtraConfig{ ProxyClientCert: certBytes, ProxyClientKey: keyBytes, ServiceResolver: serviceResolver, ProxyTransport: proxyTransport, }, } return aggregatorConfig, nil } func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) { aggregatorServer, err := aggregatorConfig.Complete().NewWithDelegate(delegateAPIServer) if err != nil { return nil, err } // create controllers for auto-registration apiRegistrationClient, err := apiregistrationclient.NewForConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig) if err != nil { return nil, err } autoRegistrationController := autoregister.NewAutoRegisterController(aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), apiRegistrationClient) apiServices := apiServicesToRegister(delegateAPIServer, autoRegistrationController) crdRegistrationController := crdregistration.NewAutoRegistrationController( apiExtensionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions(), autoRegistrationController) aggregatorServer.GenericAPIServer.AddPostStartHook("kube-apiserver-autoregistration", func(context genericapiserver.PostStartHookContext) error { go crdRegistrationController.Run(5, context.StopCh) go func() { // let the CRD controller process the initial set of CRDs before starting the autoregistration controller. // this prevents the autoregistration controller's initial sync from deleting APIServices for CRDs that still exist. // we only need to do this if CRDs are enabled on this server. We can't use discovery because we are the source for discovery. if aggregatorConfig.GenericConfig.MergedResourceConfig.AnyVersionForGroupEnabled("apiextensions.k8s.io") { crdRegistrationController.WaitForInitialSync() } autoRegistrationController.Run(5, context.StopCh) }() return nil }) aggregatorServer.GenericAPIServer.AddHealthzChecks( makeAPIServiceAvailableHealthzCheck( "autoregister-completion", apiServices, aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), ), ) return aggregatorServer, nil } func makeAPIService(gv schema.GroupVersion) *apiregistration.APIService { apiServicePriority, ok := apiVersionPriorities[gv] if !ok { // if we aren't found, then we shouldn't register ourselves because it could result in a CRD group version // being permanently stuck in the APIServices list. glog.Infof("Skipping APIService creation for %v", gv) return nil } return &apiregistration.APIService{ ObjectMeta: metav1.ObjectMeta{Name: gv.Version + "." + gv.Group}, Spec: apiregistration.APIServiceSpec{ Group: gv.Group, Version: gv.Version, GroupPriorityMinimum: apiServicePriority.group, VersionPriority: apiServicePriority.version, }, } } // makeAPIServiceAvailableHealthzCheck returns a healthz check that returns healthy // once all of the specified services have been observed to be available at least once. func makeAPIServiceAvailableHealthzCheck(name string, apiServices []*apiregistration.APIService, apiServiceInformer informers.APIServiceInformer) healthz.HealthzChecker { // Track the auto-registered API services that have not been observed to be available yet pendingServiceNamesLock := &sync.RWMutex{} pendingServiceNames := sets.NewString() for _, service := range apiServices { pendingServiceNames.Insert(service.Name) } // When an APIService in the list is seen as available, remove it from the pending list handleAPIServiceChange := func(service *apiregistration.APIService) { pendingServiceNamesLock.Lock() defer pendingServiceNamesLock.Unlock() if !pendingServiceNames.Has(service.Name) { return } if apiregistration.IsAPIServiceConditionTrue(service, apiregistration.Available) { pendingServiceNames.Delete(service.Name) } } // Watch add/update events for APIServices apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { handleAPIServiceChange(obj.(*apiregistration.APIService)) }, UpdateFunc: func(old, new interface{}) { handleAPIServiceChange(new.(*apiregistration.APIService)) }, }) // Don't return healthy until the pending list is empty return healthz.NamedCheck(name, func(r *http.Request) error { pendingServiceNamesLock.RLock() defer pendingServiceNamesLock.RUnlock() if pendingServiceNames.Len() > 0 { return fmt.Errorf("missing APIService: %v", pendingServiceNames.List()) } return nil }) } // priority defines group priority that is used in discovery. This controls // group position in the kubectl output. type priority struct { // group indicates the order of the group relative to other groups. group int32 // version indicates the relative order of the version inside of its group. version int32 } // The proper way to resolve this letting the aggregator know the desired group and version-within-group order of the underlying servers // is to refactor the genericapiserver.DelegationTarget to include a list of priorities based on which APIs were installed. // This requires the APIGroupInfo struct to evolve and include the concept of priorities and to avoid mistakes, the core storage map there needs to be updated. // That ripples out every bit as far as you'd expect, so for 1.7 we'll include the list here instead of being built up during storage. var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "", Version: "v1"}: {group: 18000, version: 1}, // extensions is above the rest for CLI compatibility, though the level of unqualified resource compatibility we // can reasonably expect seems questionable. {Group: "extensions", Version: "v1beta1"}: {group: 17900, version: 1}, // to my knowledge, nothing below here collides {Group: "apps", Version: "v1beta1"}: {group: 17800, version: 1}, {Group: "apps", Version: "v1beta2"}: {group: 17800, version: 9}, {Group: "apps", Version: "v1"}: {group: 17800, version: 15}, {Group: "events.k8s.io", Version: "v1beta1"}: {group: 17750, version: 5}, {Group: "authentication.k8s.io", Version: "v1"}: {group: 17700, version: 15}, {Group: "authentication.k8s.io", Version: "v1beta1"}: {group: 17700, version: 9}, {Group: "authorization.k8s.io", Version: "v1"}: {group: 17600, version: 15}, {Group: "authorization.k8s.io", Version: "v1beta1"}: {group: 17600, version: 9}, {Group: "autoscaling", Version: "v1"}: {group: 17500, version: 15}, {Group: "autoscaling", Version: "v2beta1"}: {group: 17500, version: 9}, {Group: "batch", Version: "v1"}: {group: 17400, version: 15}, {Group: "batch", Version: "v1beta1"}: {group: 17400, version: 9}, {Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9}, {Group: "certificates.k8s.io", Version: "v1beta1"}: {group: 17300, version: 9}, {Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15}, {Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9}, {Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15}, {Group: "rbac.authorization.k8s.io", Version: "v1beta1"}: {group: 17000, version: 12}, {Group: "rbac.authorization.k8s.io", Version: "v1alpha1"}: {group: 17000, version: 9}, {Group: "settings.k8s.io", Version: "v1alpha1"}: {group: 16900, version: 9}, {Group: "storage.k8s.io", Version: "v1"}: {group: 16800, version: 15}, {Group: "storage.k8s.io", Version: "v1beta1"}: {group: 16800, version: 9}, {Group: "storage.k8s.io", Version: "v1alpha1"}: {group: 16800, version: 1}, {Group: "apiextensions.k8s.io", Version: "v1beta1"}: {group: 16700, version: 9}, {Group: "admissionregistration.k8s.io", Version: "v1"}: {group: 16700, version: 15}, {Group: "admissionregistration.k8s.io", Version: "v1beta1"}: {group: 16700, version: 12}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1"}: {group: 16700, version: 9}, {Group: "scheduling.k8s.io", Version: "v1beta1"}: {group: 16600, version: 12}, {Group: "scheduling.k8s.io", Version: "v1alpha1"}: {group: 16600, version: 9}, {Group: "coordination.k8s.io", Version: "v1beta1"}: {group: 16500, version: 9}, // Append a new group to the end of the list if unsure. // You can use min(existing group)-100 as the initial value for a group. // Version can be set to 9 (to have space around) for a new group. } func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, registration autoregister.AutoAPIServiceRegistration) []*apiregistration.APIService { apiServices := []*apiregistration.APIService{} for _, curr := range delegateAPIServer.ListedPaths() { if curr == "/api/v1" { apiService := makeAPIService(schema.GroupVersion{Group: "", Version: "v1"}) registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) continue } if !strings.HasPrefix(curr, "/apis/") { continue } // this comes back in a list that looks like /apis/rbac.authorization.k8s.io/v1alpha1 tokens := strings.Split(curr, "/") if len(tokens) != 4 { continue } apiService := makeAPIService(schema.GroupVersion{Group: tokens[2], Version: tokens[3]}) if apiService == nil { continue } registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) } return apiServices }
cmd/kube-apiserver/app/aggregator.go
1
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.07339320331811905, 0.0040325261652469635, 0.000161862961249426, 0.00017633356037549675, 0.013659128919243813 ]
{ "id": 5, "code_window": [ "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n", "\tkubeexternalinformers \"k8s.io/client-go/informers\"\n", "\t\"k8s.io/kubernetes/cmd/kube-apiserver/app/options\"\n", ")\n", "\n" ], "labels": [ "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 28 }
// Copyright ©2015 Steve Francia <[email protected]> // Portions Copyright ©2015 The Hugo Authors // Portions Copyright 2016-present Bjørn Erik Pedersen <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package afero import ( "bytes" "fmt" "io" "log" "os" "path/filepath" "strings" "unicode" "golang.org/x/text/transform" "golang.org/x/text/unicode/norm" ) // Filepath separator defined by os.Separator. const FilePathSeparator = string(filepath.Separator) // Takes a reader and a path and writes the content func (a Afero) WriteReader(path string, r io.Reader) (err error) { return WriteReader(a.Fs, path, r) } func WriteReader(fs Fs, path string, r io.Reader) (err error) { dir, _ := filepath.Split(path) ospath := filepath.FromSlash(dir) if ospath != "" { err = fs.MkdirAll(ospath, 0777) // rwx, rw, r if err != nil { if err != os.ErrExist { log.Panicln(err) } } } file, err := fs.Create(path) if err != nil { return } defer file.Close() _, err = io.Copy(file, r) return } // Same as WriteReader but checks to see if file/directory already exists. func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { return SafeWriteReader(a.Fs, path, r) } func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { dir, _ := filepath.Split(path) ospath := filepath.FromSlash(dir) if ospath != "" { err = fs.MkdirAll(ospath, 0777) // rwx, rw, r if err != nil { return } } exists, err := Exists(fs, path) if err != nil { return } if exists { return fmt.Errorf("%v already exists", path) } file, err := fs.Create(path) if err != nil { return } defer file.Close() _, err = io.Copy(file, r) return } func (a Afero) GetTempDir(subPath string) string { return GetTempDir(a.Fs, subPath) } // GetTempDir returns the default temp directory with trailing slash // if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx func GetTempDir(fs Fs, subPath string) string { addSlash := func(p string) string { if FilePathSeparator != p[len(p)-1:] { p = p + FilePathSeparator } return p } dir := addSlash(os.TempDir()) if subPath != "" { // preserve windows backslash :-( if FilePathSeparator == "\\" { subPath = strings.Replace(subPath, "\\", "____", -1) } dir = dir + UnicodeSanitize((subPath)) if FilePathSeparator == "\\" { dir = strings.Replace(dir, "____", "\\", -1) } if exists, _ := Exists(fs, dir); exists { return addSlash(dir) } err := fs.MkdirAll(dir, 0777) if err != nil { panic(err) } dir = addSlash(dir) } return dir } // Rewrite string to remove non-standard path characters func UnicodeSanitize(s string) string { source := []rune(s) target := make([]rune, 0, len(source)) for _, r := range source { if unicode.IsLetter(r) || unicode.IsDigit(r) || unicode.IsMark(r) || r == '.' || r == '/' || r == '\\' || r == '_' || r == '-' || r == '%' || r == ' ' || r == '#' { target = append(target, r) } } return string(target) } // Transform characters with accents into plan forms func NeuterAccents(s string) string { t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) result, _, _ := transform.String(t, string(s)) return result } func isMn(r rune) bool { return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks } func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { return FileContainsBytes(a.Fs, filename, subslice) } // Check if a file contains a specified byte slice. func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { f, err := fs.Open(filename) if err != nil { return false, err } defer f.Close() return readerContainsAny(f, subslice), nil } func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { return FileContainsAnyBytes(a.Fs, filename, subslices) } // Check if a file contains any of the specified byte slices. func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { f, err := fs.Open(filename) if err != nil { return false, err } defer f.Close() return readerContainsAny(f, subslices...), nil } // readerContains reports whether any of the subslices is within r. func readerContainsAny(r io.Reader, subslices ...[]byte) bool { if r == nil || len(subslices) == 0 { return false } largestSlice := 0 for _, sl := range subslices { if len(sl) > largestSlice { largestSlice = len(sl) } } if largestSlice == 0 { return false } bufflen := largestSlice * 4 halflen := bufflen / 2 buff := make([]byte, bufflen) var err error var n, i int for { i++ if i == 1 { n, err = io.ReadAtLeast(r, buff[:halflen], halflen) } else { if i != 2 { // shift left to catch overlapping matches copy(buff[:], buff[halflen:]) } n, err = io.ReadAtLeast(r, buff[halflen:], halflen) } if n > 0 { for _, sl := range subslices { if bytes.Contains(buff, sl) { return true } } } if err != nil { break } } return false } func (a Afero) DirExists(path string) (bool, error) { return DirExists(a.Fs, path) } // DirExists checks if a path exists and is a directory. func DirExists(fs Fs, path string) (bool, error) { fi, err := fs.Stat(path) if err == nil && fi.IsDir() { return true, nil } if os.IsNotExist(err) { return false, nil } return false, err } func (a Afero) IsDir(path string) (bool, error) { return IsDir(a.Fs, path) } // IsDir checks if a given path is a directory. func IsDir(fs Fs, path string) (bool, error) { fi, err := fs.Stat(path) if err != nil { return false, err } return fi.IsDir(), nil } func (a Afero) IsEmpty(path string) (bool, error) { return IsEmpty(a.Fs, path) } // IsEmpty checks if a given file or directory is empty. func IsEmpty(fs Fs, path string) (bool, error) { if b, _ := Exists(fs, path); !b { return false, fmt.Errorf("%q path does not exist", path) } fi, err := fs.Stat(path) if err != nil { return false, err } if fi.IsDir() { f, err := fs.Open(path) defer f.Close() if err != nil { return false, err } list, err := f.Readdir(-1) return len(list) == 0, nil } return fi.Size() == 0, nil } func (a Afero) Exists(path string) (bool, error) { return Exists(a.Fs, path) } // Check if a file or directory exists. func Exists(fs Fs, path string) (bool, error) { _, err := fs.Stat(path) if err == nil { return true, nil } if os.IsNotExist(err) { return false, nil } return false, err } func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { combinedPath := filepath.Join(basePathFs.path, relativePath) if parent, ok := basePathFs.source.(*BasePathFs); ok { return FullBaseFsPath(parent, combinedPath) } return combinedPath }
vendor/github.com/spf13/afero/util.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00022362179879564792, 0.00017389521235600114, 0.00016445087385363877, 0.00016991447773762047, 0.000011592169357754756 ]
{ "id": 5, "code_window": [ "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n", "\tkubeexternalinformers \"k8s.io/client-go/informers\"\n", "\t\"k8s.io/kubernetes/cmd/kube-apiserver/app/options\"\n", ")\n", "\n" ], "labels": [ "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 28 }
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "os" "path/filepath" "github.com/golang/glog" "github.com/spf13/cobra" ) const ( versionFilename = "version.txt" defaultPort uint64 = 18629 ) var ( migrateCmd = &cobra.Command{ Short: "Upgrade/downgrade etcd data across multiple versions", Long: `Upgrade or downgrade etcd data across multiple versions to the target version Given a 'bin-dir' directory of etcd and etcdctl binaries, an etcd 'data-dir' with a 'version.txt' file and a target etcd version, this tool will upgrade or downgrade the etcd data from the version specified in 'version.txt' to the target version. `, Run: func(cmd *cobra.Command, args []string) { runMigrate() }, } opts = migrateOpts{} ) type migrateOpts struct { name string port uint64 peerListenUrls string peerAdvertiseUrls string binDir string dataDir string bundledVersionString string etcdDataPrefix string ttlKeysDirectory string initialCluster string targetVersion string targetStorage string etcdServerArgs string } func main() { flags := migrateCmd.Flags() flags.StringVar(&opts.name, "name", "", "etcd cluster member name. Defaults to etcd-{hostname}") flags.Uint64Var(&opts.port, "port", defaultPort, "etcd client port to use during migration operations. This should be a different port than typically used by etcd to avoid clients accidentally connecting during upgrade/downgrade operations.") flags.StringVar(&opts.peerListenUrls, "listen-peer-urls", "", "etcd --listen-peer-urls flag, required for HA clusters") flags.StringVar(&opts.peerAdvertiseUrls, "initial-advertise-peer-urls", "", "etcd --initial-advertise-peer-urls flag, required for HA clusters") flags.StringVar(&opts.binDir, "bin-dir", "/usr/local/bin", "directory of etcd and etcdctl binaries, must contain etcd-<version> and etcdctl-<version> for each version listed in bindled-versions") flags.StringVar(&opts.dataDir, "data-dir", "", "etcd data directory of etcd server to migrate") flags.StringVar(&opts.bundledVersionString, "bundled-versions", "", "comma separated list of etcd binary versions present under the bin-dir") flags.StringVar(&opts.etcdDataPrefix, "etcd-data-prefix", "/registry", "etcd key prefix under which all objects are kept") flags.StringVar(&opts.ttlKeysDirectory, "ttl-keys-directory", "", "etcd key prefix under which all keys with TTLs are kept. Defaults to {etcd-data-prefix}/events") flags.StringVar(&opts.initialCluster, "initial-cluster", "", "comma separated list of name=endpoint pairs. Defaults to etcd-{hostname}=http://localhost:2380") flags.StringVar(&opts.targetVersion, "target-version", "", "version of etcd to migrate to. Format must be '<major>.<minor>.<patch>'") flags.StringVar(&opts.targetStorage, "target-storage", "", "storage version of etcd to migrate to, one of: etcd2, etcd3") flags.StringVar(&opts.etcdServerArgs, "etcd-server-extra-args", "", "additional etcd server args for starting etcd servers during migration steps, --peer-* TLS cert flags should be added for etcd clusters with more than 1 member that use mutual TLS for peer communication.") migrateCmd.Execute() } // runMigrate validates the command line flags and starts the migration. func runMigrate() { if opts.name == "" { hostname, err := os.Hostname() if err != nil { glog.Errorf("Error while getting hostname to supply default --name: %v", err) os.Exit(1) } opts.name = fmt.Sprintf("etcd-%s", hostname) } if opts.ttlKeysDirectory == "" { opts.ttlKeysDirectory = fmt.Sprintf("%s/events", opts.etcdDataPrefix) } if opts.initialCluster == "" { opts.initialCluster = fmt.Sprintf("%s=http://localhost:2380", opts.name) } if opts.targetStorage == "" { glog.Errorf("--target-storage is required") os.Exit(1) } if opts.targetVersion == "" { glog.Errorf("--target-version is required") os.Exit(1) } if opts.dataDir == "" { glog.Errorf("--data-dir is required") os.Exit(1) } if opts.bundledVersionString == "" { glog.Errorf("--bundled-versions is required") os.Exit(1) } bundledVersions, err := ParseSupportedVersions(opts.bundledVersionString) if err != nil { glog.Errorf("Failed to parse --supported-versions: %v", err) } err = validateBundledVersions(bundledVersions, opts.binDir) if err != nil { glog.Errorf("Failed to validate that 'etcd-<version>' and 'etcdctl-<version>' binaries exist in --bin-dir '%s' for all --bundled-verions '%s': %v", opts.binDir, opts.bundledVersionString, err) os.Exit(1) } target := &EtcdVersionPair{ version: MustParseEtcdVersion(opts.targetVersion), storageVersion: MustParseEtcdStorageVersion(opts.targetStorage), } migrate(opts.name, opts.port, opts.peerListenUrls, opts.peerAdvertiseUrls, opts.binDir, opts.dataDir, opts.etcdDataPrefix, opts.ttlKeysDirectory, opts.initialCluster, target, bundledVersions, opts.etcdServerArgs) } // migrate opens or initializes the etcd data directory, configures the migrator, and starts the migration. func migrate(name string, port uint64, peerListenUrls string, peerAdvertiseUrls string, binPath string, dataDirPath string, etcdDataPrefix string, ttlKeysDirectory string, initialCluster string, target *EtcdVersionPair, bundledVersions SupportedVersions, etcdServerArgs string) { dataDir, err := OpenOrCreateDataDirectory(dataDirPath) if err != nil { glog.Errorf("Error opening or creating data directory %s: %v", dataDirPath, err) os.Exit(1) } cfg := &EtcdMigrateCfg{ binPath: binPath, name: name, port: port, peerListenUrls: peerListenUrls, peerAdvertiseUrls: peerAdvertiseUrls, etcdDataPrefix: etcdDataPrefix, ttlKeysDirectory: ttlKeysDirectory, initialCluster: initialCluster, supportedVersions: bundledVersions, dataDirectory: dataDirPath, etcdServerArgs: etcdServerArgs, } client, err := NewEtcdMigrateClient(cfg) if err != nil { glog.Errorf("Migration failed: %v", err) os.Exit(1) } defer client.Close() migrator := &Migrator{cfg, dataDir, client} err = migrator.MigrateIfNeeded(target) if err != nil { glog.Errorf("Migration failed: %v", err) os.Exit(1) } } // validateBundledVersions checks that 'etcd-<version>' and 'etcdctl-<version>' binaries exist in the binDir // for each version in the bundledVersions list. func validateBundledVersions(bundledVersions SupportedVersions, binDir string) error { for _, v := range bundledVersions { for _, binaryName := range []string{"etcd", "etcdctl"} { fn := filepath.Join(binDir, fmt.Sprintf("%s-%s", binaryName, v)) if _, err := os.Stat(fn); err != nil { return fmt.Errorf("failed to validate '%s' binary exists for bundled-version '%s': %v", fn, v, err) } } } return nil }
cluster/images/etcd/migrate/migrate.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.0004702886217273772, 0.00019141612574458122, 0.00016533715825062245, 0.0001718980638543144, 0.00006712051253998652 ]
{ "id": 5, "code_window": [ "\tgenericapiserver \"k8s.io/apiserver/pkg/server\"\n", "\tgenericoptions \"k8s.io/apiserver/pkg/server/options\"\n", "\tkubeexternalinformers \"k8s.io/client-go/informers\"\n", "\t\"k8s.io/kubernetes/cmd/kube-apiserver/app/options\"\n", ")\n", "\n" ], "labels": [ "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tutilfeature \"k8s.io/apiserver/pkg/util/feature\"\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 28 }
package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", ) go_library( name = "go_default_library", srcs = ["storage.go"], importpath = "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage", deps = [ "//pkg/apis/admissionregistration:go_default_library", "//pkg/registry/admissionregistration/initializerconfiguration:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", ], ) filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], visibility = ["//visibility:private"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], )
pkg/registry/admissionregistration/initializerconfiguration/storage/BUILD
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.0008057276718318462, 0.00033089728094637394, 0.00016669425531290472, 0.00017558355466462672, 0.0002741706557571888 ]
{ "id": 6, "code_window": [ "\n", "\t// copy the etcd options so we don't mutate originals.\n", "\tetcdOptions := *commandOptions.Etcd\n", "\tetcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion)\n", "\tgenericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}\n", "\n", "\t// override MergedResourceConfig with apiextensions defaults and registry\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tetcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 54 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package app does all of the work necessary to create a Kubernetes // APIServer by binding together the API, master and APIServer infrastructure. // It can be configured and called directly or via the hyperkube framework. package app import ( "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver" apiextensionsoptions "k8s.io/apiextensions-apiserver/pkg/cmd/server/options" "k8s.io/apiserver/pkg/admission" genericapiserver "k8s.io/apiserver/pkg/server" genericoptions "k8s.io/apiserver/pkg/server/options" kubeexternalinformers "k8s.io/client-go/informers" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" ) func createAPIExtensionsConfig( kubeAPIServerConfig genericapiserver.Config, externalInformers kubeexternalinformers.SharedInformerFactory, pluginInitializers []admission.PluginInitializer, commandOptions *options.ServerRunOptions, masterCount int, ) (*apiextensionsapiserver.Config, error) { // make a shallow copy to let us twiddle a few things // most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the apiextensions genericConfig := kubeAPIServerConfig // override genericConfig.AdmissionControl with apiextensions' scheme, // because apiextentions apiserver should use its own scheme to convert resources. commandOptions.Admission.ApplyTo( &genericConfig, externalInformers, genericConfig.LoopbackClientConfig, apiextensionsapiserver.Scheme, pluginInitializers...) // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion) genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} // override MergedResourceConfig with apiextensions defaults and registry if err := commandOptions.APIEnablement.ApplyTo( &genericConfig, apiextensionsapiserver.DefaultAPIResourceConfigSource(), apiextensionsapiserver.Scheme); err != nil { return nil, err } apiextensionsConfig := &apiextensionsapiserver.Config{ GenericConfig: &genericapiserver.RecommendedConfig{ Config: genericConfig, SharedInformerFactory: externalInformers, }, ExtraConfig: apiextensionsapiserver.ExtraConfig{ CRDRESTOptionsGetter: apiextensionsoptions.NewCRDRESTOptionsGetter(etcdOptions), MasterCount: masterCount, }, } return apiextensionsConfig, nil } func createAPIExtensionsServer(apiextensionsConfig *apiextensionsapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget) (*apiextensionsapiserver.CustomResourceDefinitions, error) { return apiextensionsConfig.Complete().New(delegateAPIServer) }
cmd/kube-apiserver/app/apiextensions.go
1
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.9990386962890625, 0.22301989793777466, 0.00017296461737714708, 0.001641527283936739, 0.41466379165649414 ]
{ "id": 6, "code_window": [ "\n", "\t// copy the etcd options so we don't mutate originals.\n", "\tetcdOptions := *commandOptions.Etcd\n", "\tetcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion)\n", "\tgenericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}\n", "\n", "\t// override MergedResourceConfig with apiextensions defaults and registry\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tetcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 54 }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package scanner implements a scanner for gcfg configuration text. // It takes a []byte as source which can then be tokenized // through repeated calls to the Scan method. // // Note that the API for the scanner package may change to accommodate new // features or implementation changes in gcfg. // package scanner import ( "fmt" "path/filepath" "unicode" "unicode/utf8" ) import ( "gopkg.in/gcfg.v1/token" ) // An ErrorHandler may be provided to Scanner.Init. If a syntax error is // encountered and a handler was installed, the handler is called with a // position and an error message. The position points to the beginning of // the offending token. // type ErrorHandler func(pos token.Position, msg string) // A Scanner holds the scanner's internal state while processing // a given text. It can be allocated as part of another data // structure but must be initialized via Init before use. // type Scanner struct { // immutable state file *token.File // source file handle dir string // directory portion of file.Name() src []byte // source err ErrorHandler // error reporting; or nil mode Mode // scanning mode // scanning state ch rune // current character offset int // character offset rdOffset int // reading offset (position after current character) lineOffset int // current line offset nextVal bool // next token is expected to be a value // public state - ok to modify ErrorCount int // number of errors encountered } // Read the next Unicode char into s.ch. // s.ch < 0 means end-of-file. // func (s *Scanner) next() { if s.rdOffset < len(s.src) { s.offset = s.rdOffset if s.ch == '\n' { s.lineOffset = s.offset s.file.AddLine(s.offset) } r, w := rune(s.src[s.rdOffset]), 1 switch { case r == 0: s.error(s.offset, "illegal character NUL") case r >= 0x80: // not ASCII r, w = utf8.DecodeRune(s.src[s.rdOffset:]) if r == utf8.RuneError && w == 1 { s.error(s.offset, "illegal UTF-8 encoding") } } s.rdOffset += w s.ch = r } else { s.offset = len(s.src) if s.ch == '\n' { s.lineOffset = s.offset s.file.AddLine(s.offset) } s.ch = -1 // eof } } // A mode value is a set of flags (or 0). // They control scanner behavior. // type Mode uint const ( ScanComments Mode = 1 << iota // return comments as COMMENT tokens ) // Init prepares the scanner s to tokenize the text src by setting the // scanner at the beginning of src. The scanner uses the file set file // for position information and it adds line information for each line. // It is ok to re-use the same file when re-scanning the same file as // line information which is already present is ignored. Init causes a // panic if the file size does not match the src size. // // Calls to Scan will invoke the error handler err if they encounter a // syntax error and err is not nil. Also, for each error encountered, // the Scanner field ErrorCount is incremented by one. The mode parameter // determines how comments are handled. // // Note that Init may call err if there is an error in the first character // of the file. // func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { // Explicitly initialize all fields since a scanner may be reused. if file.Size() != len(src) { panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) } s.file = file s.dir, _ = filepath.Split(file.Name()) s.src = src s.err = err s.mode = mode s.ch = ' ' s.offset = 0 s.rdOffset = 0 s.lineOffset = 0 s.ErrorCount = 0 s.nextVal = false s.next() } func (s *Scanner) error(offs int, msg string) { if s.err != nil { s.err(s.file.Position(s.file.Pos(offs)), msg) } s.ErrorCount++ } func (s *Scanner) scanComment() string { // initial [;#] already consumed offs := s.offset - 1 // position of initial [;#] for s.ch != '\n' && s.ch >= 0 { s.next() } return string(s.src[offs:s.offset]) } func isLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) } func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } func (s *Scanner) scanIdentifier() string { offs := s.offset for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { s.next() } return string(s.src[offs:s.offset]) } func (s *Scanner) scanEscape(val bool) { offs := s.offset ch := s.ch s.next() // always make progress switch ch { case '\\', '"': // ok case 'n', 't': if val { break // ok } fallthrough default: s.error(offs, "unknown escape sequence") } } func (s *Scanner) scanString() string { // '"' opening already consumed offs := s.offset - 1 for s.ch != '"' { ch := s.ch s.next() if ch == '\n' || ch < 0 { s.error(offs, "string not terminated") break } if ch == '\\' { s.scanEscape(false) } } s.next() return string(s.src[offs:s.offset]) } func stripCR(b []byte) []byte { c := make([]byte, len(b)) i := 0 for _, ch := range b { if ch != '\r' { c[i] = ch i++ } } return c[:i] } func (s *Scanner) scanValString() string { offs := s.offset hasCR := false end := offs inQuote := false loop: for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { ch := s.ch s.next() switch { case inQuote && ch == '\\': s.scanEscape(true) case !inQuote && ch == '\\': if s.ch == '\r' { hasCR = true s.next() } if s.ch != '\n' { s.error(offs, "unquoted '\\' must be followed by new line") break loop } s.next() case ch == '"': inQuote = !inQuote case ch == '\r': hasCR = true case ch < 0 || inQuote && ch == '\n': s.error(offs, "string not terminated") break loop } if inQuote || !isWhiteSpace(ch) { end = s.offset } } lit := s.src[offs:end] if hasCR { lit = stripCR(lit) } return string(lit) } func isWhiteSpace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\r' } func (s *Scanner) skipWhitespace() { for isWhiteSpace(s.ch) { s.next() } } // Scan scans the next token and returns the token position, the token, // and its literal string if applicable. The source end is indicated by // token.EOF. // // If the returned token is a literal (token.IDENT, token.STRING) or // token.COMMENT, the literal string has the corresponding value. // // If the returned token is token.ILLEGAL, the literal string is the // offending character. // // In all other cases, Scan returns an empty literal string. // // For more tolerant parsing, Scan will return a valid token if // possible even if a syntax error was encountered. Thus, even // if the resulting token sequence contains no illegal tokens, // a client may not assume that no error occurred. Instead it // must check the scanner's ErrorCount or the number of calls // of the error handler, if there was one installed. // // Scan adds line information to the file added to the file // set with Init. Token positions are relative to that file // and thus relative to the file set. // func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { scanAgain: s.skipWhitespace() // current token start pos = s.file.Pos(s.offset) // determine token value switch ch := s.ch; { case s.nextVal: lit = s.scanValString() tok = token.STRING s.nextVal = false case isLetter(ch): lit = s.scanIdentifier() tok = token.IDENT default: s.next() // always make progress switch ch { case -1: tok = token.EOF case '\n': tok = token.EOL case '"': tok = token.STRING lit = s.scanString() case '[': tok = token.LBRACK case ']': tok = token.RBRACK case ';', '#': // comment lit = s.scanComment() if s.mode&ScanComments == 0 { // skip comment goto scanAgain } tok = token.COMMENT case '=': tok = token.ASSIGN s.nextVal = true default: s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) tok = token.ILLEGAL lit = string(ch) } } return }
vendor/gopkg.in/gcfg.v1/scanner/scanner.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00018047950288746506, 0.00017404250684194267, 0.00016021213377825916, 0.00017447894788347185, 0.000004140120836382266 ]
{ "id": 6, "code_window": [ "\n", "\t// copy the etcd options so we don't mutate originals.\n", "\tetcdOptions := *commandOptions.Etcd\n", "\tetcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion)\n", "\tgenericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}\n", "\n", "\t// override MergedResourceConfig with apiextensions defaults and registry\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tetcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 54 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package generators import ( "github.com/golang/glog" "k8s.io/gengo/types" ) // extractBoolTagOrDie gets the comment-tags for the key and asserts that, if // it exists, the value is boolean. If the tag did not exist, it returns // false. func extractBoolTagOrDie(key string, lines []string) bool { val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { glog.Fatal(err) } return val }
staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00017903205298352987, 0.00017614600074011832, 0.00017149792984127998, 0.00017702701734378934, 0.0000031388426577905193 ]
{ "id": 6, "code_window": [ "\n", "\t// copy the etcd options so we don't mutate originals.\n", "\tetcdOptions := *commandOptions.Etcd\n", "\tetcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion)\n", "\tgenericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}\n", "\n", "\t// override MergedResourceConfig with apiextensions defaults and registry\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tetcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)\n" ], "file_path": "cmd/kube-apiserver/app/apiextensions.go", "type": "add", "edit_start_line_idx": 54 }
package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", "go_test", ) go_library( name = "go_default_library", srcs = ["namespaced_resources_deleter.go"], importpath = "k8s.io/kubernetes/pkg/controller/namespace/deletion", deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], ) go_test( name = "go_default_test", srcs = ["namespaced_resources_deleter_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", ], ) filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], visibility = ["//visibility:private"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], )
pkg/controller/namespace/deletion/BUILD
0
https://github.com/kubernetes/kubernetes/commit/70f9ca0c1a53de126583e49390efb8a0acaacb88
[ 0.00017937157826963812, 0.000175734210642986, 0.00016979595238808542, 0.0001756886049406603, 0.000002835794703059946 ]
{ "id": 0, "code_window": [ " \"//pkg/util/dbterror\",\n", " \"//pkg/util/generic\",\n", " \"//pkg/util/logutil\",\n", " \"//pkg/util/size\",\n", " \"@com_github_google_uuid//:uuid\",\n", " \"@com_github_pingcap_errors//:errors\",\n", " \"@com_github_pingcap_failpoint//:failpoint\",\n", " \"@com_github_tikv_client_go_v2//util\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//pkg/util/memory\",\n" ], "file_path": "pkg/ddl/ingest/BUILD.bazel", "type": "add", "edit_start_line_idx": 42 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "ingest", srcs = [ "backend.go", "backend_mgr.go", "checkpoint.go", "config.go", "disk_root.go", "engine.go", "engine_mgr.go", "env.go", "mem_root.go", "message.go", "mock.go", ], importpath = "github.com/pingcap/tidb/pkg/ddl/ingest", visibility = ["//visibility:public"], deps = [ "//br/pkg/lightning/backend", "//br/pkg/lightning/backend/encode", "//br/pkg/lightning/backend/kv", "//br/pkg/lightning/backend/local", "//br/pkg/lightning/checkpoints", "//br/pkg/lightning/common", "//br/pkg/lightning/config", "//br/pkg/lightning/errormanager", "//br/pkg/lightning/log", "//pkg/config", "//pkg/ddl/internal/session", "//pkg/ddl/util", "//pkg/kv", "//pkg/meta", "//pkg/parser/mysql", "//pkg/sessionctx", "//pkg/sessionctx/variable", "//pkg/table", "//pkg/util", "//pkg/util/dbterror", "//pkg/util/generic", "//pkg/util/logutil", "//pkg/util/size", "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_tikv_client_go_v2//util", "@io_etcd_go_etcd_client_v3//:client", "@io_etcd_go_etcd_client_v3//concurrency", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], ) go_test( name = "ingest_test", timeout = "short", srcs = [ "checkpoint_test.go", "env_test.go", "integration_test.go", "main_test.go", "mem_root_test.go", ], embed = [":ingest"], flaky = True, race = "on", shard_count = 15, deps = [ "//pkg/config", "//pkg/ddl", "//pkg/ddl/ingest/testutil", "//pkg/ddl/internal/session", "//pkg/ddl/testutil", "//pkg/ddl/util/callback", "//pkg/domain", "//pkg/errno", "//pkg/parser/model", "//pkg/testkit", "//tests/realtikvtest", "@com_github_ngaut_pools//:pools", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@org_uber_go_goleak//:goleak", ], )
pkg/ddl/ingest/BUILD.bazel
1
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.8514969944953918, 0.09485675394535065, 0.00016739859711378813, 0.0001777385186869651, 0.26751279830932617 ]
{ "id": 0, "code_window": [ " \"//pkg/util/dbterror\",\n", " \"//pkg/util/generic\",\n", " \"//pkg/util/logutil\",\n", " \"//pkg/util/size\",\n", " \"@com_github_google_uuid//:uuid\",\n", " \"@com_github_pingcap_errors//:errors\",\n", " \"@com_github_pingcap_failpoint//:failpoint\",\n", " \"@com_github_tikv_client_go_v2//util\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//pkg/util/memory\",\n" ], "file_path": "pkg/ddl/ingest/BUILD.bazel", "type": "add", "edit_start_line_idx": 42 }
load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "testutil", srcs = ["testutil.go"], importpath = "github.com/pingcap/tidb/pkg/statistics/handle/cache/internal/testutil", visibility = ["//pkg/statistics/handle/cache:__subpackages__"], deps = [ "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/statistics", "//pkg/types", ], )
pkg/statistics/handle/cache/internal/testutil/BUILD.bazel
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00018467710469849408, 0.00018143808119930327, 0.0001781990722520277, 0.00018143808119930327, 0.000003239016223233193 ]
{ "id": 0, "code_window": [ " \"//pkg/util/dbterror\",\n", " \"//pkg/util/generic\",\n", " \"//pkg/util/logutil\",\n", " \"//pkg/util/size\",\n", " \"@com_github_google_uuid//:uuid\",\n", " \"@com_github_pingcap_errors//:errors\",\n", " \"@com_github_pingcap_failpoint//:failpoint\",\n", " \"@com_github_tikv_client_go_v2//util\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//pkg/util/memory\",\n" ], "file_path": "pkg/ddl/ingest/BUILD.bazel", "type": "add", "edit_start_line_idx": 42 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package autoid_test import ( "context" "strconv" "strings" "testing" "github.com/pingcap/failpoint" _ "github.com/pingcap/tidb/pkg/autoid_service" ddltestutil "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testutil" "github.com/stretchr/testify/require" ) // Test filter different kind of allocators. // In special ddl type, for example: // 1: ActionRenameTable : it will abandon all the old allocators. // 2: ActionRebaseAutoID : it will drop row-id-type allocator. // 3: ActionModifyTableAutoIdCache : it will drop row-id-type allocator. // 3: ActionRebaseAutoRandomBase : it will drop auto-rand-type allocator. func TestFilterDifferentAllocators(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("drop table if exists t1") for _, str := range []string{"", " AUTO_ID_CACHE 1"} { tk.MustExec("create table t(a bigint auto_random(5) key, b int auto_increment unique)" + str) tk.MustExec("insert into t values()") tk.MustQuery("select b from t").Check(testkit.Rows("1")) allHandles, err := ddltestutil.ExtractAllTableHandles(tk.Session(), "test", "t") require.NoError(t, err) require.Equal(t, 1, len(allHandles)) orderedHandles := testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) require.Equal(t, int64(1), orderedHandles[0]) tk.MustExec("delete from t") // Test rebase auto_increment. tk.MustExec("alter table t auto_increment 3000000") tk.MustExec("insert into t values()") tk.MustQuery("select b from t").Check(testkit.Rows("3000000")) allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Session(), "test", "t") require.NoError(t, err) require.Equal(t, 1, len(allHandles)) orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) require.Equal(t, int64(2), orderedHandles[0]) tk.MustExec("delete from t") // Test rebase auto_random. tk.MustExec("alter table t auto_random_base 3000000") tk.MustExec("insert into t values()") tk.MustQuery("select b from t").Check(testkit.Rows("3000001")) allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Session(), "test", "t") require.NoError(t, err) require.Equal(t, 1, len(allHandles)) orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) require.Equal(t, int64(3000000), orderedHandles[0]) tk.MustExec("delete from t") // Test rename table. tk.MustExec("rename table t to t1") tk.MustExec("insert into t1 values()") res := tk.MustQuery("select b from t1") strInt64, err := strconv.ParseInt(res.Rows()[0][0].(string), 10, 64) require.NoError(t, err) require.GreaterOrEqual(t, strInt64, int64(3000002)) allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Session(), "test", "t1") require.NoError(t, err) require.Equal(t, 1, len(allHandles)) orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) require.Greater(t, orderedHandles[0], int64(3000001)) tk.MustExec("drop table t1") } } func TestInsertWithAutoidSchema(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec(`use test`) tk.MustExec(`create table t1(id int primary key auto_increment, n int);`) tk.MustExec(`create table t2(id int unsigned primary key auto_increment, n int);`) tk.MustExec(`create table t3(id tinyint primary key auto_increment, n int);`) tk.MustExec(`create table t4(id int primary key, n float auto_increment, key I_n(n));`) tk.MustExec(`create table t5(id int primary key, n float unsigned auto_increment, key I_n(n));`) tk.MustExec(`create table t6(id int primary key, n double auto_increment, key I_n(n));`) tk.MustExec(`create table t7(id int primary key, n double unsigned auto_increment, key I_n(n));`) // test for inserting multiple values tk.MustExec(`create table t8(id int primary key auto_increment, n int);`) testInsertWithAutoidSchema(t, tk) // test for auto_id_cache = 1 tk.MustExec(`drop table if exists t1, t2, t3, t4, t5, t6, t7, t8`) tk.MustExec(`create table t1(id int primary key auto_increment, n int) AUTO_ID_CACHE 1;`) tk.MustExec(`create table t2(id int unsigned primary key auto_increment, n int) AUTO_ID_CACHE 1;`) tk.MustExec(`create table t3(id tinyint primary key auto_increment, n int) AUTO_ID_CACHE 1;`) tk.MustExec(`create table t4(id int primary key, n float auto_increment, key I_n(n)) AUTO_ID_CACHE 1;`) tk.MustExec(`create table t5(id int primary key, n float unsigned auto_increment, key I_n(n)) AUTO_ID_CACHE 1;`) tk.MustExec(`create table t6(id int primary key, n double auto_increment, key I_n(n)) AUTO_ID_CACHE 1;`) tk.MustExec(`create table t7(id int primary key, n double unsigned auto_increment, key I_n(n)) AUTO_ID_CACHE 1;`) // test for inserting multiple values tk.MustExec(`create table t8(id int primary key auto_increment, n int);`) testInsertWithAutoidSchema(t, tk) } func testInsertWithAutoidSchema(t *testing.T, tk *testkit.TestKit) { tests := []struct { insert string query string result [][]interface{} }{ { `insert into t1(id, n) values(1, 1)`, `select * from t1 where id = 1`, testkit.Rows(`1 1`), }, { `insert into t1(n) values(2)`, `select * from t1 where id = 2`, testkit.Rows(`2 2`), }, { `insert into t1(n) values(3)`, `select * from t1 where id = 3`, testkit.Rows(`3 3`), }, { `insert into t1(id, n) values(-1, 4)`, `select * from t1 where id = -1`, testkit.Rows(`-1 4`), }, { `insert into t1(n) values(5)`, `select * from t1 where id = 4`, testkit.Rows(`4 5`), }, { `insert into t1(id, n) values('5', 6)`, `select * from t1 where id = 5`, testkit.Rows(`5 6`), }, { `insert into t1(n) values(7)`, `select * from t1 where id = 6`, testkit.Rows(`6 7`), }, { `insert into t1(id, n) values(7.4, 8)`, `select * from t1 where id = 7`, testkit.Rows(`7 8`), }, { `insert into t1(id, n) values(7.5, 9)`, `select * from t1 where id = 8`, testkit.Rows(`8 9`), }, { `insert into t1(n) values(9)`, `select * from t1 where id = 9`, testkit.Rows(`9 9`), }, // test last insert id { `insert into t1 values(3000, -1), (null, -2)`, `select * from t1 where id = 3000`, testkit.Rows(`3000 -1`), }, { `;`, `select * from t1 where id = 3001`, testkit.Rows(`3001 -2`), }, { `;`, `select last_insert_id()`, testkit.Rows(`3001`), }, { `insert into t2(id, n) values(1, 1)`, `select * from t2 where id = 1`, testkit.Rows(`1 1`), }, { `insert into t2(n) values(2)`, `select * from t2 where id = 2`, testkit.Rows(`2 2`), }, { `insert into t2(n) values(3)`, `select * from t2 where id = 3`, testkit.Rows(`3 3`), }, { `insert into t3(id, n) values(1, 1)`, `select * from t3 where id = 1`, testkit.Rows(`1 1`), }, { `insert into t3(n) values(2)`, `select * from t3 where id = 2`, testkit.Rows(`2 2`), }, { `insert into t3(n) values(3)`, `select * from t3 where id = 3`, testkit.Rows(`3 3`), }, { `insert into t3(id, n) values(-1, 4)`, `select * from t3 where id = -1`, testkit.Rows(`-1 4`), }, { `insert into t3(n) values(5)`, `select * from t3 where id = 4`, testkit.Rows(`4 5`), }, { `insert into t4(id, n) values(1, 1)`, `select * from t4 where id = 1`, testkit.Rows(`1 1`), }, { `insert into t4(id) values(2)`, `select * from t4 where id = 2`, testkit.Rows(`2 2`), }, { `insert into t4(id, n) values(3, -1)`, `select * from t4 where id = 3`, testkit.Rows(`3 -1`), }, { `insert into t4(id) values(4)`, `select * from t4 where id = 4`, testkit.Rows(`4 3`), }, { `insert into t4(id, n) values(5, 5.5)`, `select * from t4 where id = 5`, testkit.Rows(`5 5.5`), }, { `insert into t4(id) values(6)`, `select * from t4 where id = 6`, testkit.Rows(`6 7`), }, { `insert into t4(id, n) values(7, '7.7')`, `select * from t4 where id = 7`, testkit.Rows(`7 7.7`), }, { `insert into t4(id) values(8)`, `select * from t4 where id = 8`, testkit.Rows(`8 9`), }, { `insert into t4(id, n) values(9, 10.4)`, `select * from t4 where id = 9`, testkit.Rows(`9 10.4`), }, { `insert into t4(id) values(10)`, `select * from t4 where id = 10`, testkit.Rows(`10 11`), }, { `insert into t5(id, n) values(1, 1)`, `select * from t5 where id = 1`, testkit.Rows(`1 1`), }, { `insert into t5(id) values(2)`, `select * from t5 where id = 2`, testkit.Rows(`2 2`), }, { `insert into t5(id) values(3)`, `select * from t5 where id = 3`, testkit.Rows(`3 3`), }, { `insert into t6(id, n) values(1, 1)`, `select * from t6 where id = 1`, testkit.Rows(`1 1`), }, { `insert into t6(id) values(2)`, `select * from t6 where id = 2`, testkit.Rows(`2 2`), }, { `insert into t6(id, n) values(3, -1)`, `select * from t6 where id = 3`, testkit.Rows(`3 -1`), }, { `insert into t6(id) values(4)`, `select * from t6 where id = 4`, testkit.Rows(`4 3`), }, { `insert into t6(id, n) values(5, 5.5)`, `select * from t6 where id = 5`, testkit.Rows(`5 5.5`), }, { `insert into t6(id) values(6)`, `select * from t6 where id = 6`, testkit.Rows(`6 7`), }, { `insert into t6(id, n) values(7, '7.7')`, `select * from t4 where id = 7`, testkit.Rows(`7 7.7`), }, { `insert into t6(id) values(8)`, `select * from t4 where id = 8`, testkit.Rows(`8 9`), }, { `insert into t6(id, n) values(9, 10.4)`, `select * from t6 where id = 9`, testkit.Rows(`9 10.4`), }, { `insert into t6(id) values(10)`, `select * from t6 where id = 10`, testkit.Rows(`10 11`), }, { `insert into t7(id, n) values(1, 1)`, `select * from t7 where id = 1`, testkit.Rows(`1 1`), }, { `insert into t7(id) values(2)`, `select * from t7 where id = 2`, testkit.Rows(`2 2`), }, { `insert into t7(id) values(3)`, `select * from t7 where id = 3`, testkit.Rows(`3 3`), }, // the following is test for insert multiple values. { `insert into t8(n) values(1),(2)`, `select * from t8 where id = 1`, testkit.Rows(`1 1`), }, { `;`, `select * from t8 where id = 2`, testkit.Rows(`2 2`), }, { `;`, `select last_insert_id();`, testkit.Rows(`1`), }, // test user rebase and auto alloc mixture. { `insert into t8 values(null, 3),(-1, -1),(null,4),(null, 5)`, `select * from t8 where id = 3`, testkit.Rows(`3 3`), }, // -1 won't rebase allocator here cause -1 < base. { `;`, `select * from t8 where id = -1`, testkit.Rows(`-1 -1`), }, { `;`, `select * from t8 where id = 4`, testkit.Rows(`4 4`), }, { `;`, `select * from t8 where id = 5`, testkit.Rows(`5 5`), }, { `;`, `select last_insert_id();`, testkit.Rows(`3`), }, { `insert into t8 values(null, 6),(10, 7),(null, 8)`, `select * from t8 where id = 6`, testkit.Rows(`6 6`), }, // 10 will rebase allocator here. { `;`, `select * from t8 where id = 10`, testkit.Rows(`10 7`), }, { `;`, `select * from t8 where id = 11`, testkit.Rows(`11 8`), }, { `;`, `select last_insert_id()`, testkit.Rows(`6`), }, // fix bug for last_insert_id should be first allocated id in insert rows (skip the rebase id). { `insert into t8 values(100, 9),(null,10),(null,11)`, `select * from t8 where id = 100`, testkit.Rows(`100 9`), }, { `;`, `select * from t8 where id = 101`, testkit.Rows(`101 10`), }, { `;`, `select * from t8 where id = 102`, testkit.Rows(`102 11`), }, { `;`, `select last_insert_id()`, testkit.Rows(`101`), }, // test with sql_mode: NO_AUTO_VALUE_ON_ZERO. { `;`, `select @@sql_mode`, testkit.Rows(`ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION`), }, { `;`, "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION,NO_AUTO_VALUE_ON_ZERO`", nil, }, { `insert into t8 values (0, 12), (null, 13)`, `select * from t8 where id = 0`, testkit.Rows(`0 12`), }, { `;`, `select * from t8 where id = 103`, testkit.Rows(`103 13`), }, { `;`, `select last_insert_id()`, testkit.Rows(`103`), }, // test without sql_mode: NO_AUTO_VALUE_ON_ZERO. { `;`, "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION`", nil, }, // value 0 will be substitute by autoid. { `insert into t8 values (0, 14), (null, 15)`, `select * from t8 where id = 104`, testkit.Rows(`104 14`), }, { `;`, `select * from t8 where id = 105`, testkit.Rows(`105 15`), }, { `;`, `select last_insert_id()`, testkit.Rows(`104`), }, // last test : auto increment allocation can find in retryInfo. { `retry : insert into t8 values (null, 16), (null, 17)`, `select * from t8 where id = 1000`, testkit.Rows(`1000 16`), }, { `;`, `select * from t8 where id = 1001`, testkit.Rows(`1001 17`), }, { `;`, `select last_insert_id()`, // this insert doesn't has the last_insert_id, should be same as the last insert case. testkit.Rows(`104`), }, } for _, tt := range tests { if strings.HasPrefix(tt.insert, "retry : ") { // it's the last retry insert case, change the sessionVars. retryInfo := &variable.RetryInfo{Retrying: true} retryInfo.AddAutoIncrementID(1000) retryInfo.AddAutoIncrementID(1001) tk.Session().GetSessionVars().RetryInfo = retryInfo tk.MustExec(tt.insert[8:]) tk.Session().GetSessionVars().RetryInfo = &variable.RetryInfo{} } else { tk.MustExec(tt.insert) } if tt.query == "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION,NO_AUTO_VALUE_ON_ZERO`" || tt.query == "set session sql_mode = `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION`" { tk.MustExec(tt.query) } else { tk.MustQuery(tt.query).Check(tt.result) } } } func TestMockAutoIDServiceError(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("USE test;") tk.MustExec("create table t_mock_err (id int key auto_increment) auto_id_cache 1") failpoint.Enable("github.com/pingcap/tidb/pkg/autoid_service/mockErr", `return(true)`) defer failpoint.Disable("github.com/pingcap/tidb/pkg/autoid_service/mockErr") // Cover a bug that the autoid client retry non-retryable errors forever cause dead loop. tk.MustExecToErr("insert into t_mock_err values (),()") // mock error, instead of dead loop } func TestIssue39528(t *testing.T) { // When AUTO_ID_CACHE is 1, it should not affect row id setting when autoid and rowid are separated. store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test;") tk.MustExec("create table issue39528 (id int unsigned key nonclustered auto_increment) shard_row_id_bits=4 auto_id_cache 1;") tk.MustExec("insert into issue39528 values ()") tk.MustExec("insert into issue39528 values ()") ctx := context.Background() var codeRun bool ctx = context.WithValue(ctx, "testIssue39528", &codeRun) _, err := tk.ExecWithContext(ctx, "insert into issue39528 values ()") require.NoError(t, err) // Make sure the code does not visit tikv on allocate path. require.False(t, codeRun) }
pkg/executor/test/autoidtest/autoid_test.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.0006545999203808606, 0.00018491584341973066, 0.00016605462587904185, 0.00017222028691321611, 0.00007177122461143881 ]
{ "id": 0, "code_window": [ " \"//pkg/util/dbterror\",\n", " \"//pkg/util/generic\",\n", " \"//pkg/util/logutil\",\n", " \"//pkg/util/size\",\n", " \"@com_github_google_uuid//:uuid\",\n", " \"@com_github_pingcap_errors//:errors\",\n", " \"@com_github_pingcap_failpoint//:failpoint\",\n", " \"@com_github_tikv_client_go_v2//util\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//pkg/util/memory\",\n" ], "file_path": "pkg/ddl/ingest/BUILD.bazel", "type": "add", "edit_start_line_idx": 42 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( "bytes" "crypto/rand" "math" "sort" "testing" "unsafe" "github.com/stretchr/testify/require" ) func randBytes(n int) []byte { b := make([]byte, n) rand.Read(b) return b } func TestNoopKeyAdapter(t *testing.T) { keyAdapter := NoopKeyAdapter{} key := randBytes(32) require.Len(t, key, keyAdapter.EncodedLen(key, ZeroRowID)) encodedKey := keyAdapter.Encode(nil, key, ZeroRowID) require.Equal(t, key, encodedKey) decodedKey, err := keyAdapter.Decode(nil, encodedKey) require.NoError(t, err) require.Equal(t, key, decodedKey) } func TestDupDetectKeyAdapter(t *testing.T) { inputs := []struct { key []byte rowID int64 }{ { []byte{0x0}, 0, }, { randBytes(32), 1, }, { randBytes(32), math.MaxInt32, }, { randBytes(32), math.MinInt32, }, } keyAdapter := DupDetectKeyAdapter{} for _, input := range inputs { encodedRowID := EncodeIntRowID(input.rowID) result := keyAdapter.Encode(nil, input.key, encodedRowID) require.Equal(t, keyAdapter.EncodedLen(input.key, encodedRowID), len(result)) // Decode the result. key, err := keyAdapter.Decode(nil, result) require.NoError(t, err) require.Equal(t, input.key, key) } } func TestDupDetectKeyOrder(t *testing.T) { keys := [][]byte{ {0x0, 0x1, 0x2}, {0x0, 0x1, 0x3}, {0x0, 0x1, 0x3, 0x4}, {0x0, 0x1, 0x3, 0x4, 0x0}, {0x0, 0x1, 0x3, 0x4, 0x0, 0x0, 0x0}, } keyAdapter := DupDetectKeyAdapter{} encodedKeys := make([][]byte, 0, len(keys)) for _, key := range keys { encodedKeys = append(encodedKeys, keyAdapter.Encode(nil, key, EncodeIntRowID(1))) } sorted := sort.SliceIsSorted(encodedKeys, func(i, j int) bool { return bytes.Compare(encodedKeys[i], encodedKeys[j]) < 0 }) require.True(t, sorted) } func TestDupDetectEncodeDupKey(t *testing.T) { keyAdapter := DupDetectKeyAdapter{} key := randBytes(32) result1 := keyAdapter.Encode(nil, key, EncodeIntRowID(10)) result2 := keyAdapter.Encode(nil, key, EncodeIntRowID(20)) require.NotEqual(t, result1, result2) } func startWithSameMemory(x []byte, y []byte) bool { return cap(x) > 0 && cap(y) > 0 && uintptr(unsafe.Pointer(&x[:cap(x)][0])) == uintptr(unsafe.Pointer(&y[:cap(y)][0])) } func TestEncodeKeyToPreAllocatedBuf(t *testing.T) { keyAdapters := []KeyAdapter{NoopKeyAdapter{}, DupDetectKeyAdapter{}} for _, keyAdapter := range keyAdapters { key := randBytes(32) buf := make([]byte, 256) buf2 := keyAdapter.Encode(buf[:4], key, EncodeIntRowID(1)) require.True(t, startWithSameMemory(buf, buf2)) // Verify the encoded result first. key2, err := keyAdapter.Decode(nil, buf2[4:]) require.NoError(t, err) require.Equal(t, key, key2) } } func TestDecodeKeyToPreAllocatedBuf(t *testing.T) { data := []byte{ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x0, 0x8, } keyAdapters := []KeyAdapter{NoopKeyAdapter{}, DupDetectKeyAdapter{}} for _, keyAdapter := range keyAdapters { key, err := keyAdapter.Decode(nil, data) require.NoError(t, err) buf := make([]byte, 4+len(data)) buf2, err := keyAdapter.Decode(buf[:4], data) require.NoError(t, err) require.True(t, startWithSameMemory(buf, buf2)) require.Equal(t, key, buf2[4:]) } } func TestDecodeKeyDstIsInsufficient(t *testing.T) { data := []byte{ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x0, 0x8, } keyAdapters := []KeyAdapter{NoopKeyAdapter{}, DupDetectKeyAdapter{}} for _, keyAdapter := range keyAdapters { key, err := keyAdapter.Decode(nil, data) require.NoError(t, err) buf := make([]byte, 4, 6) copy(buf, []byte{'a', 'b', 'c', 'd'}) buf2, err := keyAdapter.Decode(buf[:4], data) require.NoError(t, err) require.False(t, startWithSameMemory(buf, buf2)) require.Equal(t, buf[:4], buf2[:4]) require.Equal(t, key, buf2[4:]) } }
br/pkg/lightning/common/key_adapter_test.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00017449757433496416, 0.00017032727191690356, 0.00016448914539068937, 0.00017060643585864455, 0.000002956224079753156 ]
{ "id": 1, "code_window": [ "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/sessionctx\"\n", "\t\"github.com/pingcap/tidb/pkg/util\"\n", "\t\"github.com/pingcap/tidb/pkg/util/logutil\"\n", "\t\"github.com/pingcap/tidb/pkg/util/size\"\n", "\t\"go.uber.org/zap\"\n", ")\n", "\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/memory\"\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "add", "edit_start_line_idx": 27 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ingest import ( "context" "os" "path/filepath" "strconv" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/size" "go.uber.org/zap" ) var ( // LitBackCtxMgr is the entry for the lightning backfill process. LitBackCtxMgr BackendCtxMgr // LitMemRoot is used to track the memory usage of the lightning backfill process. LitMemRoot MemRoot // LitDiskRoot is used to track the disk usage of the lightning backfill process. LitDiskRoot DiskRoot // LitRLimit is the max open file number of the lightning backfill process. LitRLimit uint64 // LitSortPath is the sort path for the lightning backfill process. LitSortPath string // LitInitialized is the flag indicates whether the lightning backfill process is initialized. LitInitialized bool ) const maxMemoryQuota = 2 * size.GB // InitGlobalLightningEnv initialize Lightning backfill environment. func InitGlobalLightningEnv(ctx context.Context, sctx sessionctx.Context) { log.SetAppLogger(logutil.BgLogger()) globalCfg := config.GetGlobalConfig() if globalCfg.Store != "tikv" { logutil.BgLogger().Warn(LitWarnEnvInitFail, zap.String("category", "ddl-ingest"), zap.String("storage limitation", "only support TiKV storage"), zap.String("current storage", globalCfg.Store), zap.Bool("lightning is initialized", LitInitialized)) return } sPath, err := genLightningDataDir() if err != nil { logutil.BgLogger().Warn(LitWarnEnvInitFail, zap.String("category", "ddl-ingest"), zap.Error(err), zap.Bool("lightning is initialized", LitInitialized)) return } LitSortPath = sPath LitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, maxMemoryQuota) LitRLimit = util.GenRLimit("ddl-ingest") LitInitialized = true logutil.BgLogger().Info(LitInfoEnvInitSucc, zap.String("category", "ddl-ingest"), zap.Uint64("memory limitation", maxMemoryQuota), zap.String("disk usage info", LitDiskRoot.UsageInfo()), zap.Uint64("max open file number", LitRLimit), zap.Bool("lightning is initialized", LitInitialized)) } // Generate lightning local store dir in TiDB data dir. // it will append -port to be tmp_ddl suffix. func genLightningDataDir() (string, error) { sortPath := ConfigSortPath() if _, err := os.Stat(sortPath); err != nil { if !os.IsNotExist(err) { logutil.BgLogger().Error(LitErrStatDirFail, zap.String("category", "ddl-ingest"), zap.String("sort path", sortPath), zap.Error(err)) return "", err } } err := os.MkdirAll(sortPath, 0o700) if err != nil { logutil.BgLogger().Error(LitErrCreateDirFail, zap.String("category", "ddl-ingest"), zap.String("sort path", sortPath), zap.Error(err)) return "", err } logutil.BgLogger().Info(LitInfoSortDir, zap.String("category", "ddl-ingest"), zap.String("data path:", sortPath)) return sortPath, nil } // ConfigSortPath returns the sort path for lightning. func ConfigSortPath() string { tidbCfg := config.GetGlobalConfig() sortPathSuffix := "/tmp_ddl-" + strconv.Itoa(int(tidbCfg.Port)) sortPath := filepath.Join(tidbCfg.TempDir, sortPathSuffix) return sortPath } // GenLightningDataDirForTest is only used for test. var GenLightningDataDirForTest = genLightningDataDir
pkg/ddl/ingest/env.go
1
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.4663015305995941, 0.039063069969415665, 0.00016570615116506815, 0.0001705464965198189, 0.1288173496723175 ]
{ "id": 1, "code_window": [ "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/sessionctx\"\n", "\t\"github.com/pingcap/tidb/pkg/util\"\n", "\t\"github.com/pingcap/tidb/pkg/util/logutil\"\n", "\t\"github.com/pingcap/tidb/pkg/util/size\"\n", "\t\"go.uber.org/zap\"\n", ")\n", "\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/memory\"\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "add", "edit_start_line_idx": 27 }
// Copyright 2022 PingCAP, Inc. Licensed under Apache-2.0. package streamhelper import ( "context" "github.com/google/uuid" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/owner" clientv3 "go.etcd.io/etcd/client/v3" ) const ( ownerPrompt = "log-backup" ownerPath = "/tidb/br-stream/owner" ) // OnTick advances the inner logic clock for the advancer. // It's synchronous: this would only return after the events triggered by the clock has all been done. // It's generally panic-free, you may not need to trying recover a panic here. func (c *CheckpointAdvancer) OnTick(ctx context.Context) (err error) { defer c.recordTimeCost("tick")() defer utils.PanicToErr(&err) return c.tick(ctx) } // OnStart implements daemon.Interface, which will be called when log backup service starts. func (c *CheckpointAdvancer) OnStart(ctx context.Context) { c.StartTaskListener(ctx) } // OnBecomeOwner implements daemon.Interface. If the tidb-server become owner, this function will be called. func (c *CheckpointAdvancer) OnBecomeOwner(ctx context.Context) { metrics.AdvancerOwner.Set(1.0) c.SpawnSubscriptionHandler(ctx) go func() { <-ctx.Done() c.OnStop() }() } // Name implements daemon.Interface. func (c *CheckpointAdvancer) Name() string { return "LogBackup::Advancer" } func (c *CheckpointAdvancer) OnStop() { metrics.AdvancerOwner.Set(0.0) c.stopSubscriber() } func OwnerManagerForLogBackup(ctx context.Context, etcdCli *clientv3.Client) owner.Manager { id := uuid.New() return owner.NewOwnerManager(ctx, etcdCli, ownerPrompt, id.String(), ownerPath) }
br/pkg/streamhelper/advancer_daemon.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.0014283640775829554, 0.00046773269423283637, 0.000164876357303001, 0.00019856623839586973, 0.0004623158893082291 ]
{ "id": 1, "code_window": [ "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/sessionctx\"\n", "\t\"github.com/pingcap/tidb/pkg/util\"\n", "\t\"github.com/pingcap/tidb/pkg/util/logutil\"\n", "\t\"github.com/pingcap/tidb/pkg/util/size\"\n", "\t\"go.uber.org/zap\"\n", ")\n", "\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/memory\"\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "add", "edit_start_line_idx": 27 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dbutil import ( "testing" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/schemacmp" "github.com/stretchr/testify/require" ) type testCase struct { sql string columns []string indexs []string colLen [][]int colName string fineCol bool } func TestTable(t *testing.T) { testCases := []*testCase{ { ` CREATE TABLE htest ( a int(11) PRIMARY KEY ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin `, []string{"a"}, []string{mysql.PrimaryKeyName}, [][]int{{types.UnspecifiedLength}}, "c", false, }, { ` CREATE TABLE itest (a int(11) NOT NULL, b double NOT NULL DEFAULT '2', c varchar(10) NOT NULL, d time DEFAULT NULL, PRIMARY KEY (a, b), UNIQUE KEY d (d)) `, []string{"a", "b", "c", "d"}, []string{mysql.PrimaryKeyName, "d"}, [][]int{{types.UnspecifiedLength, types.UnspecifiedLength}, {types.UnspecifiedLength}}, "a", true, }, { ` CREATE TABLE jtest ( a int(11) NOT NULL, b varchar(10) DEFAULT NULL, c varchar(255) DEFAULT NULL, PRIMARY KEY (a) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin `, []string{"a", "b", "c"}, []string{mysql.PrimaryKeyName}, [][]int{{types.UnspecifiedLength}}, "c", true, }, { ` CREATE TABLE mtest ( a int(24), KEY test (a)) `, []string{"a"}, []string{"test"}, [][]int{{types.UnspecifiedLength}}, "d", false, }, { ` CREATE TABLE ntest ( a int(24) PRIMARY KEY CLUSTERED ) `, []string{"a"}, []string{mysql.PrimaryKeyName}, [][]int{{types.UnspecifiedLength}}, "d", false, }, { ` CREATE TABLE otest ( a int(11) NOT NULL, b varchar(10) DEFAULT NULL, c varchar(255) DEFAULT NULL, PRIMARY KEY (a) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci `, []string{"a", "b", "c"}, []string{mysql.PrimaryKeyName}, [][]int{{types.UnspecifiedLength}}, "c", true, }, } for _, testCase := range testCases { tableInfo, err := GetTableInfoBySQL(testCase.sql, parser.New()) require.NoError(t, err) for i, column := range tableInfo.Columns { require.Equal(t, column.Name.O, testCase.columns[i]) } require.Len(t, tableInfo.Indices, len(testCase.indexs)) for j, index := range tableInfo.Indices { require.Equal(t, index.Name.O, testCase.indexs[j]) for k, indexCol := range index.Columns { require.Equal(t, testCase.colLen[j][k], indexCol.Length) } } col := FindColumnByName(tableInfo.Columns, testCase.colName) require.Equal(t, col != nil, testCase.fineCol) } } func TestTableStructEqual(t *testing.T) { createTableSQL1 := "CREATE TABLE `test`.`atest` (`id` int(24), `name` varchar(24), `birthday` datetime, `update_time` time, `money` decimal(20,2), primary key(`id`))" tableInfo1, err := GetTableInfoBySQL(createTableSQL1, parser.New()) require.NoError(t, err) createTableSQL2 := "CREATE TABLE `test`.`atest` (`id` int(24) NOT NULL, `name` varchar(24), `birthday` datetime, `update_time` time, `money` decimal(20,2), primary key(`id`))" tableInfo2, err := GetTableInfoBySQL(createTableSQL2, parser.New()) require.NoError(t, err) createTableSQL3 := `CREATE TABLE "test"."atest" ("id" int(24), "name" varchar(24), "birthday" datetime, "update_time" time, "money" decimal(20,2), unique key("id"))` p := parser.New() p.SetSQLMode(mysql.ModeANSIQuotes) tableInfo3, err := GetTableInfoBySQL(createTableSQL3, p) require.NoError(t, err) equal, _ := EqualTableInfo(tableInfo1, tableInfo2) require.Equal(t, true, equal) equal, _ = EqualTableInfo(tableInfo1, tableInfo3) require.Equal(t, false, equal) } func TestSchemacmpEncode(t *testing.T) { createTableSQL := "CREATE TABLE `test`.`atest` (`id` int(24), primary key(`id`))" tableInfo, err := GetTableInfoBySQL(createTableSQL, parser.New()) require.NoError(t, err) table := schemacmp.Encode(tableInfo) require.Equal(t, "CREATE TABLE `tbl`(`id` INT(24) NOT NULL, PRIMARY KEY (`id`)) CHARSET UTF8MB4 COLLATE UTF8MB4_BIN", table.String()) }
pkg/util/dbutil/table_test.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.002703616628423333, 0.0003207970876246691, 0.00016098315245471895, 0.00016699779371265322, 0.0005959736299701035 ]
{ "id": 1, "code_window": [ "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/sessionctx\"\n", "\t\"github.com/pingcap/tidb/pkg/util\"\n", "\t\"github.com/pingcap/tidb/pkg/util/logutil\"\n", "\t\"github.com/pingcap/tidb/pkg/util/size\"\n", "\t\"go.uber.org/zap\"\n", ")\n", "\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/memory\"\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "add", "edit_start_line_idx": 27 }
[ { "Name": "TestDAGPlanBuilderSimpleCase", "Cases": [ { "SQL": "select * from t t1 use index(c_d_e)", "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))" }, { "SQL": "select f from t use index() where f = 1", "Best": "TableReader(Table(t)->Sel([eq(test.t.f, 1)]))" }, { "SQL": "select a from t where a between 1 and 2 order by c", "Best": "TableReader(Table(t))->Sort->Projection" }, { "SQL": "select * from t where (t.c > 0 and t.c < 2) or (t.c > 4 and t.c < 6) or (t.c > 8 and t.c < 10) or (t.c > 12 and t.c < 14) or (t.c > 16 and t.c < 18)", "Best": "TableReader(Table(t)->Sel([or(or(and(gt(test.t.c, 0), lt(test.t.c, 2)), and(gt(test.t.c, 4), lt(test.t.c, 6))), or(and(gt(test.t.c, 8), lt(test.t.c, 10)), or(and(gt(test.t.c, 12), lt(test.t.c, 14)), and(gt(test.t.c, 16), lt(test.t.c, 18)))))]))" }, { "SQL": "select * from t where (t.c > 0 and t.c < 1) or (t.c > 2 and t.c < 3) or (t.c > 4 and t.c < 5) or (t.c > 6 and t.c < 7) or (t.c > 9 and t.c < 10)", "Best": "Dual" }, { "SQL": "select * from t where t.c = 1 and t.e = 1 order by t.b limit 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t))->TopN([test.t.b],0,1)" }, { "SQL": "select * from t where t.e_str is null", "Best": "IndexLookUp(Index(t.e_d_c_str_prefix)[[NULL,NULL]], Table(t))" }, { "SQL": "select * from t where t.c is null", "Best": "Dual" }, { "SQL": "select * from t where t.c = 1 and t.e = 1 order by t.e limit 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t))->TopN([test.t.e],0,1)" }, { "SQL": "select * from t where t.c = 1 and t.e = 1 order by t.d limit 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)])->Limit, Table(t))" }, { "SQL": "select c from t where t.c = 1 and t.e = 1 order by t.d limit 1", "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)])->Limit)->Limit->Projection" }, { "SQL": "select c from t order by t.a limit 1", "Best": "TableReader(Table(t)->Limit)->Limit->Projection" }, { "SQL": "select c from t order by t.a + t.b limit 1", "Best": "TableReader(Table(t)->TopN([plus(test.t.a, test.t.b)],0,1))->Projection->TopN([Column#14],0,1)->Projection->Projection" }, { "SQL": "select c from t limit 1", "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Limit)->Limit" }, { "SQL": "select c from t where c = 1 limit 1", "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Limit)->Limit" }, { "SQL": "select c from t where c = 1", "Best": "IndexReader(Index(t.c_d_e)[[1,1]])" }, { "SQL": "select c from t order by c", "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])" }, { "SQL": "select c from t where c = 1 order by e", "Best": "IndexReader(Index(t.c_d_e)[[1,1]])->Sort->Projection" }, { "SQL": "select c, b from t where c = 1 limit 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))->Projection" }, { "SQL": "select c, b from t where c = 1 and e = 1 and b = 1 limit 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)])->Limit)->Limit->Projection" }, { "SQL": "select c from t where c = 1 order by d, c", "Best": "IndexReader(Index(t.c_d_e)[[1,1]])->Sort->Projection" }, { "SQL": "select c_str from t where e_str = '1' order by d_str, c_str", "Best": "IndexLookUp(Index(t.e_d_c_str_prefix)[[\"1\",\"1\"]], Table(t))->Sort->Projection" }, { "SQL": "select c from t where t.c = 1 and t.a > 1 order by t.d limit 1", "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Sel([gt(test.t.a, 1)])->Limit)->Limit->Projection" }, { "SQL": "select c from t where t.c = 1 and t.d = 1 order by t.a limit 1", "Best": "IndexReader(Index(t.c_d_e)[[1 1,1 1]]->TopN([test.t.a],0,1))->TopN([test.t.a],0,1)->Projection" }, { "SQL": "select * from t where t.c = 1 and t.a > 1 order by t.d limit 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([gt(test.t.a, 1)])->Limit, Table(t))" }, { "SQL": "select * from t use index(e_d_c_str_prefix) where t.c_str = 'abcdefghijk' and t.d_str = 'd' and t.e_str = 'e'", "Best": "IndexLookUp(Index(t.e_d_c_str_prefix)[[\"e\" \"d\" \"abcdefghij\",\"e\" \"d\" \"abcdefghij\"]], Table(t)->Sel([eq(test.t.c_str, abcdefghijk)]))" }, { "SQL": "select * from t use index(e_d_c_str_prefix) where t.e_str = b'1110000'", "Best": "IndexLookUp(Index(t.e_d_c_str_prefix)[[\"p\",\"p\"]], Table(t))" }, { "SQL": "select * from (select * from t use index() order by b) t left join t t1 on t.a=t1.a limit 10", "Best": "IndexJoin{TableReader(Table(t)->Limit)->Limit->TableReader(Table(t))}(test.t.a,test.t.a)->Limit" }, { "SQL": "select * from ((SELECT 1 a,3 b) UNION (SELECT 2,1) ORDER BY (SELECT 2)) t order by a,b", "Best": "UnionAll{Dual->Projection->Dual->Projection}->HashAgg->Sort" }, { "SQL": "select * from ((SELECT 1 a,6 b) UNION (SELECT 2,5) UNION (SELECT 2, 4) ORDER BY 1) t order by 1, 2", "Best": "UnionAll{Dual->Projection->Projection->Dual->Projection->Projection->Dual->Projection->Projection}->HashAgg->Projection->Sort" }, { "SQL": "select * from (select *, NULL as xxx from t) t order by xxx", "Best": "TableReader(Table(t))->Projection" }, { "SQL": "select * from t use index(f) where f = 1 and a = 1", "Best": "PointGet(Index(t.f)[KindInt64 1])->Sel([eq(test.t.a, 1)])" }, { "SQL": "select * from t2 use index(b) where b = 1 and a = 1", "Best": "PointGet(Index(t2.b)[KindInt64 1])->Sel([eq(test.t2.a, 1)])" }, { "SQL": "select f from t where a > 1", "Best": "TableReader(Table(t))->Projection" }, { "SQL": "select f from t where a > 1 limit 10", "Best": "TableReader(Table(t)->Limit)->Limit" } ] }, { "Name": "TestDAGPlanBuilderJoin", "Cases": [ { "SQL": "select * from t t1 join t t2 on t1.a = t2.c_str", "Best": "LeftHashJoin{TableReader(Table(t))->Projection->TableReader(Table(t))->Projection}(Column#25,Column#26)" }, { "SQL": "select * from t t1 join t t2 on t1.b = t2.a", "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)" }, { "SQL": "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.a = t3.a", "Best": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.b = t3.a", "Best": "LeftHashJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.b,test.t.a)" }, { "SQL": "select * from t t1 join t t2 on t1.b = t2.a order by t1.a", "Best": "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)" }, { "SQL": "select * from t t1 join t t2 on t1.b = t2.a order by t1.a limit 1", "Best": "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)->Limit" }, { "SQL": "select /*+ TIDB_HJ(t1, t2) */ * from t t1 join t t2 on t1.b = t2.a order by t1.a limit 1", "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)->TopN([test.t.a],0,1)" }, { "SQL": "select * from t t1 left join t t2 on t1.b = t2.a where 1 = 1 limit 1", "Best": "IndexJoin{TableReader(Table(t)->Limit)->Limit->TableReader(Table(t))}(test.t.b,test.t.a)->Limit" }, { "SQL": "select * from t t1 join t t2 on t1.b = t2.a and t1.c = 1 and t1.d = 1 and t1.e = 1 order by t1.a limit 1", "Best": "IndexJoin{PointGet(Index(t.c_d_e)[KindInt64 1 KindInt64 1 KindInt64 1])->TableReader(Table(t))}(test.t.b,test.t.a)->TopN([test.t.a],0,1)" }, { "SQL": "select * from t t1 join t t2 on t1.b = t2.b join t t3 on t1.b = t3.b", "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.b)->TableReader(Table(t))}(test.t.b,test.t.b)" }, { "SQL": "select * from t t1 join t t2 on t1.a = t2.a order by t1.a", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select * from t t1 left outer join t t2 on t1.a = t2.a right outer join t t3 on t1.a = t3.a", "Best": "MergeRightOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.a = t3.a and t1.b = 1 and t3.c = 1", "Best": "IndexJoin{IndexJoin{TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)->Projection" }, { "SQL": "select * from t where t.c in (select b from t s where s.a = t.a)", "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)(test.t.c,test.t.b)" }, { "SQL": "select t.c in (select b from t s where s.a = t.a) from t", "Best": "LeftHashJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.a)(test.t.c,test.t.b)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.b", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))->Sort}(test.t.a,test.t.b)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a order by t2.a", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.b = t2.b order by t2.a", "Best": "MergeInnerJoin{TableReader(Table(t))->Sort->TableReader(Table(t))->Sort}(test.t.b,test.t.b)->Sort" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a order by t2.a desc", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.b = t2.b order by t2.b desc", "Best": "MergeInnerJoin{TableReader(Table(t))->Sort->TableReader(Table(t))->Sort}(test.t.b,test.t.b)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a", "Best": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.a = t2.b and t2.a = t3.b", "Best": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))->Sort}(test.t.a,test.t.b)->Sort->TableReader(Table(t))->Sort}(test.t.a,test.t.b)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.c = t2.c and t1.d = t2.d and t3.c = t1.c and t3.d = t1.d", "Best": "MergeInnerJoin{MergeInnerJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)(test.t.d,test.t.d)->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)(test.t.d,test.t.d)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.c = t2.c and t1.d = t2.d and t3.c = t1.c and t3.d = t1.d order by t1.c", "Best": "MergeInnerJoin{MergeInnerJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)(test.t.d,test.t.d)->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)(test.t.d,test.t.d)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t2.a = t3.a", "Best": "MergeLeftOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Sort->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t1.a = t3.a", "Best": "MergeLeftOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t1, t2) */ * from t t1, t t2 where t1.a = t2.a", "Best": "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1, t t2 where t1.a = t2.c", "Best": "IndexJoin{TableReader(Table(t))->IndexLookUp(Index(t.c_d_e)[[NULL,NULL]], Table(t))}(test.t.a,test.t.c)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ t1.a , t2.a from t t1, t t2 where t1.a = t2.c", "Best": "IndexJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)" }, { "SQL": "select /*+ TIDB_INLJ(t1, t2) */ t1.a, t2.a from t t1, t t2 where t1.a = t2.a order by t1.c", "Best": "IndexJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.a)->Projection" }, { "SQL": "select /*+ TIDB_INLJ(t1, t2) */ t1.a, t2.a from t t1, t t2 where t1.a = t2.a order by t2.c", "Best": "IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.a)->Projection" }, { "SQL": "select /*+ TIDB_INLJ(t1) */ t1.a , t2.a from t t1, t t2 where t1.a = t2.c", "Best": "IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.c,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t1, t2) */ * from t t1 left outer join t t2 on t1.a = t2.a and t2.b < 1", "Best": "IndexJoin{TableReader(Table(t))->TableReader(Table(t)->Sel([lt(test.t.b, 1)]))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t1, t2) */ * from t t1 join t t2 on t1.d=t2.d and t2.c = 1", "Best": "IndexJoin{IndexLookUp(Index(t.c_d_e)[[1 NULL,1 NULL]], Table(t))->TableReader(Table(t))}(test.t.d,test.t.d)->Projection" }, { "SQL": "select /*+ TIDB_INLJ(t1, t2) */ * from t t1 left outer join t t2 on t1.a = t2.b", "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.b)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 right outer join t t2 on t1.a = t2.b", "Best": "RightHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.b)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 where t1.a in (select a from t t2)", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t1) */ * from t t1 where t1.a in (select a from t t2)", "Best": "IndexJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t1.c=t2.c and t1.f=t2.f", "Best": "IndexJoin{TableReader(Table(t))->IndexLookUp(Index(t.c_d_e)[[NULL,NULL]], Table(t))}(test.t.c,test.t.c)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t1.a = t2.a and t1.f=t2.f", "Best": "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t1.f=t2.f and t1.a=t2.a", "Best": "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t1.a=t2.a and t2.a in (1, 2)", "Best": "IndexJoin{BatchPointGet(Handle(t.a)[1 2])->TableReader(Table(t)->Sel([in(test.t.a, 1, 2)]))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t1.b=t2.c and t1.b=1 and t2.d > t1.d-10 and t2.d < t1.d+10", "Best": "IndexJoin{TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->IndexLookUp(Index(t.c_d_e)[[1 NULL,1 NULL]], Table(t))}" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t1.b=t2.b and t1.c=1 and t2.c=1 and t2.d > t1.d-10 and t2.d < t1.d+10", "Best": "LeftHashJoin{IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t))->IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t))}(test.t.b,test.t.b)" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t2.c > t1.d-10 and t2.c < t1.d+10", "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t1.b = t2.c and t2.c=1 and t2.d=2 and t2.e=4", "Best": "RightHashJoin{PointGet(Index(t.c_d_e)[KindInt64 1 KindInt64 2 KindInt64 4])->TableReader(Table(t)->Sel([eq(test.t.b, 1)]))}->Projection" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 where t2.c=1 and t2.d=1 and t2.e > 10 and t2.e < 20", "Best": "RightHashJoin{IndexLookUp(Index(t.c_d_e)[(1 1 10,1 1 20)], Table(t))->TableReader(Table(t))}->Projection" } ] }, { "Name": "TestDAGPlanBuilderSubquery", "Cases": [ { "SQL": "select * from t where exists (select s.a from t s having sum(s.a) = t.a )", "Best": "LeftHashJoin{TableReader(Table(t))->Projection->IndexReader(Index(t.f)[[NULL,+inf]]->StreamAgg)->StreamAgg}(Column#27,Column#25)" }, { "SQL": "select * from t where exists (select s.a from t s having sum(s.a) = t.a ) order by t.a", "Best": "LeftHashJoin{TableReader(Table(t))->Projection->IndexReader(Index(t.f)[[NULL,+inf]]->StreamAgg)->StreamAgg}(Column#27,Column#25)->Sort" }, { "SQL": "select * from t where a in (select s.a from t s) order by t.a", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select * from t where exists (select s.a from t s where s.c in (select c from t as k where k.d = s.d) having sum(s.a) = t.a )", "Best": "LeftHashJoin{TableReader(Table(t))->Projection->MergeSemiJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.c,test.t.c)(test.t.d,test.t.d)->Projection->StreamAgg}(Column#39,Column#37)" }, { "SQL": "select * from t where a in (select a from t) order by b", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Sort" }, { "SQL": "select t.c in (select count(*) from t s, t t1 where s.a = t.a and s.a = t1.a) from t", "Best": "Apply{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->StreamAgg}->Projection" }, { "SQL": "select (select count(*) from t s, t t1 where s.a = t.a and s.a = t1.a) from t", "Best": "MergeLeftOuterJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Projection}(test.t.a,test.t.a)->Projection" }, { "SQL": "select (select count(*) from t s, t t1 where s.a = t.a and s.a = t1.a) from t order by t.a", "Best": "MergeLeftOuterJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Projection}(test.t.a,test.t.a)->Projection->Projection" } ] }, { "Name": "TestDAGPlanTopN", "Cases": [ { "SQL": "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b order by t1.a limit 1", "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->Limit)->Limit->TableReader(Table(t))}(test.t.b,test.t.b)->TopN([test.t.a],0,1)->TableReader(Table(t))}(test.t.b,test.t.b)->TopN([test.t.a],0,1)" }, { "SQL": "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b order by t1.b limit 1", "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->TopN([test.t.b],0,1))->TopN([test.t.b],0,1)->TableReader(Table(t))}(test.t.b,test.t.b)->TopN([test.t.b],0,1)->TableReader(Table(t))}(test.t.b,test.t.b)->TopN([test.t.b],0,1)" }, { "SQL": "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b limit 1", "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->Limit)->Limit->TableReader(Table(t))}(test.t.b,test.t.b)->Limit->TableReader(Table(t))}(test.t.b,test.t.b)->Limit" }, { "SQL": "select * from t where b = 1 and c = 1 order by c limit 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t)->Sel([eq(test.t.b, 1)]))->Limit" }, { "SQL": "select * from t where c = 1 order by c limit 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))" }, { "SQL": "select * from t order by a limit 1", "Best": "TableReader(Table(t)->Limit)->Limit" }, { "SQL": "select c from t order by c limit 1", "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Limit)->Limit" } ] }, { "Name": "TestDAGPlanBuilderBasePhysicalPlan", "Cases": [ { "SQL": "select * from t order by b limit 1 for update", "Best": "TableReader(Table(t)->TopN([test.t.b],0,1))->TopN([test.t.b],0,1)->Lock", "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`), limit_to_cop(@`sel_1`)" }, { "SQL": "update t set a = 5 where b < 1 order by d limit 1", "Best": "TableReader(Table(t)->Sel([lt(test.t.b, 1)])->TopN([test.t.d],0,1))->TopN([test.t.d],0,1)->Update", "Hints": "use_index(@`upd_1` `test`.`t` ), no_order_index(@`upd_1` `test`.`t` `primary`), limit_to_cop(@`upd_1`)" }, { "SQL": "update t set a = 5", "Best": "TableReader(Table(t))->Update", "Hints": "use_index(@`upd_1` `test`.`t` ), no_order_index(@`upd_1` `test`.`t` `primary`)" }, { "SQL": "delete /*+ TIDB_INLJ(t1, t2) */ t1 from t t1, t t2 where t1.c=t2.c", "Best": "IndexJoin{TableReader(Table(t))->IndexLookUp(Index(t.c_d_e)[[NULL,NULL]], Table(t))}(test.t.c,test.t.c)->Delete", "Hints": "inl_join(@`del_1` `test`.`t2`), use_index(@`del_1` `test`.`t1` ), no_order_index(@`del_1` `test`.`t1` `primary`), use_index(@`del_1` `test`.`t2` `c_d_e`), no_order_index(@`del_1` `test`.`t2` `c_d_e`)" }, { "SQL": "delete /*+ TIDB_SMJ(t1, t2) */ from t1 using t t1, t t2 where t1.c=t2.c", "Best": "MergeInnerJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)->Delete", "Hints": "merge_join(@`del_1` `test`.`t1`), use_index(@`del_1` `test`.`t1` `c_d_e`), order_index(@`del_1` `test`.`t1` `c_d_e`), use_index(@`del_1` `test`.`t2` `c_d_e`), order_index(@`del_1` `test`.`t2` `c_d_e`)" }, { "SQL": "update /*+ TIDB_SMJ(t1, t2) */ t t1, t t2 set t1.c=1, t2.c=1 where t1.a=t2.a", "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Update", "Hints": "merge_join(@`upd_1` `test`.`t1`), use_index(@`upd_1` `test`.`t1` ), order_index(@`upd_1` `test`.`t1` `primary`), use_index(@`upd_1` `test`.`t2` ), order_index(@`upd_1` `test`.`t2` `primary`)" }, { "SQL": "update /*+ TIDB_HJ(t1, t2) */ t t1, t t2 set t1.c=1, t2.c=1 where t1.a=t2.a", "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Update", "Hints": "hash_join(@`upd_1` `test`.`t1`), use_index(@`upd_1` `test`.`t1` ), no_order_index(@`upd_1` `test`.`t1` `primary`), use_index(@`upd_1` `test`.`t2` ), no_order_index(@`upd_1` `test`.`t2` `primary`)" }, { "SQL": "delete from t where b < 1 order by d limit 1", "Best": "TableReader(Table(t)->Sel([lt(test.t.b, 1)])->TopN([test.t.d],0,1))->TopN([test.t.d],0,1)->Delete", "Hints": "use_index(@`del_1` `test`.`t` ), no_order_index(@`del_1` `test`.`t` `primary`), limit_to_cop(@`del_1`)" }, { "SQL": "delete from t", "Best": "TableReader(Table(t))->Delete", "Hints": "use_index(@`del_1` `test`.`t` ), no_order_index(@`del_1` `test`.`t` `primary`)" }, { "SQL": "delete from t use index(c_d_e) where b = 1", "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t)->Sel([eq(test.t.b, 1)]))->Delete", "Hints": "use_index(@`del_1` `test`.`t` `c_d_e`), no_order_index(@`del_1` `test`.`t` `c_d_e`)" }, { "SQL": "insert into t select * from t where b < 1 order by d limit 1", "Best": "TableReader(Table(t)->Sel([lt(test.t.b, 1)])->TopN([test.t.d],0,1))->TopN([test.t.d],0,1)->Insert", "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`), limit_to_cop(@`sel_1`)" }, { "SQL": "insert into t (a, b, c, e, f, g) values(0,0,0,0,0,0)", "Best": "Insert", "Hints": "" }, { "SQL": "select 1", "Best": "Dual->Projection", "Hints": "" }, { "SQL": "select * from t where false", "Best": "Dual", "Hints": "" }, { "SQL": "show tables", "Best": "Show", "Hints": "" } ] }, { "Name": "TestDAGPlanBuilderUnion", "Cases": [ { "SQL": "select * from t union all select * from t", "Best": "UnionAll{TableReader(Table(t))->TableReader(Table(t))}" }, { "SQL": "select * from t union all (select * from t) order by a ", "Best": "UnionAll{TableReader(Table(t))->TableReader(Table(t))}->Sort" }, { "SQL": "select * from t union all (select * from t) limit 1", "Best": "UnionAll{TableReader(Table(t)->Limit)->Limit->TableReader(Table(t)->Limit)->Limit}->Limit" }, { "SQL": "select a from t union all (select c from t) order by a limit 1", "Best": "UnionAll{TableReader(Table(t)->Limit)->Limit->IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Limit)->Limit}->TopN([Column#25],0,1)" } ] }, { "Name": "TestDAGPlanBuilderUnionScan", "Cases": [ { "SQL": "select * from t", "Best": "TableReader(Table(t))->UnionScan([])->Projection" }, { "SQL": "select * from t where b = 1", "Best": "TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->UnionScan([eq(test.t.b, 1)])->Projection" }, { "SQL": "select * from t where a = 1", "Best": "TableReader(Table(t)->Sel([eq(test.t.a, 1)]))->UnionScan([eq(test.t.a, 1)])->Projection" }, { "SQL": "select * from t where a = 1 order by a", "Best": "TableReader(Table(t)->Sel([eq(test.t.a, 1)]))->UnionScan([eq(test.t.a, 1)])->Projection->Sort" }, { "SQL": "select * from t where a = 1 order by b", "Best": "TableReader(Table(t)->Sel([eq(test.t.a, 1)]))->UnionScan([eq(test.t.a, 1)])->Projection->Sort" }, { "SQL": "select * from t where a = 1 limit 1", "Best": "TableReader(Table(t)->Sel([eq(test.t.a, 1)]))->UnionScan([eq(test.t.a, 1)])->Limit" }, { "SQL": "select * from t where c = 1", "Best": "TableReader(Table(t)->Sel([eq(test.t.c, 1)]))->UnionScan([eq(test.t.c, 1)])->Projection" }, { "SQL": "select c from t where c = 1", "Best": "TableReader(Table(t)->Sel([eq(test.t.c, 1)]))->UnionScan([eq(test.t.c, 1)])->Projection" } ] }, { "Name": "TestDAGPlanBuilderAgg", "Cases": [ { "SQL": "select distinct b from t", "Best": "TableReader(Table(t)->HashAgg)->HashAgg" }, { "SQL": "select count(*) from (select * from t order by b) t group by b", "Best": "TableReader(Table(t)->HashAgg)->HashAgg" }, { "SQL": "select count(*), x from (select b as bbb, a + 1 as x from (select * from t order by b) t) t group by bbb", "Best": "TableReader(Table(t)->HashAgg)->HashAgg" }, { "SQL": "select sum(a), avg(b + c) from t group by d", "Best": "TableReader(Table(t)->HashAgg)->HashAgg" }, { "SQL": "select sum(distinct a), avg(b + c) from t group by d", "Best": "TableReader(Table(t)->HashAgg)->HashAgg" }, { "SQL": "select sum(e), avg(e + c) from t where c = 1 group by (c + d)", "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->HashAgg)->HashAgg" }, { "SQL": "select sum(e), avg(e + c) from t where c = 1 group by c", "Best": "IndexReader(Index(t.c_d_e)[[1,1]])->Projection->StreamAgg" }, { "SQL": "select sum(e), avg(e + c) from t where c = 1 group by e", "Best": "IndexReader(Index(t.c_d_e)[[1,1]])->Projection->HashAgg" }, { "SQL": "select sum(e), avg(b + c) from t where c = 1 and e = 1 group by d", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t))->Projection->Projection->StreamAgg" }, { "SQL": "select sum(e), avg(b + c) from t where c = 1 and b = 1", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t)->Sel([eq(test.t.b, 1)]))->Projection->StreamAgg" }, { "SQL": "select sum(e) as k, avg(b + c) from t where c = 1 and b = 1 and e = 1 group by d order by k", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)]))->Projection->Projection->StreamAgg->Sort" }, { "SQL": "select sum(e) as k, avg(b + c) from t where c = 1 and b = 1 and e = 1 group by c order by k", "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)]))->Projection->Projection->StreamAgg->Sort" }, { "SQL": "select sum(to_base64(e)) from t where c = 1", "Best": "IndexReader(Index(t.c_d_e)[[1,1]])->Projection->StreamAgg" }, { "SQL": "select (select count(1) k from t s where s.a = t.a having k != 0) from t", "Best": "MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))->Projection}(test.t.a,test.t.a)->Projection" }, { "SQL": "select sum(to_base64(e)) from t group by e,d,c order by c", "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])->Projection->StreamAgg->Projection" }, { "SQL": "select sum(e+1) from t group by e,d,c order by c", "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->StreamAgg)->StreamAgg->Projection" }, { "SQL": "select sum(to_base64(e)) from t group by e,d,c order by c,e", "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])->Projection->StreamAgg->Sort->Projection" }, { "SQL": "select sum(e+1) from t group by e,d,c order by c,e", "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->StreamAgg)->StreamAgg->Sort->Projection" }, { "SQL": "select count(*) from t group by g order by g limit 10", "Best": "IndexReader(Index(t.g)[[NULL,+inf]]->StreamAgg)->StreamAgg->Limit->Projection" }, { "SQL": "select count(*) from t group by g limit 10", "Best": "IndexReader(Index(t.g)[[NULL,+inf]]->StreamAgg)->StreamAgg->Limit" }, { "SQL": "select count(*) from t group by g order by g", "Best": "IndexReader(Index(t.g)[[NULL,+inf]]->StreamAgg)->StreamAgg->Projection" }, { "SQL": "select count(*) from t group by g order by g desc limit 1", "Best": "IndexReader(Index(t.g)[[NULL,+inf]]->StreamAgg)->StreamAgg->Limit->Projection" }, { "SQL": "select count(*) from t group by b order by b limit 10", "Best": "TableReader(Table(t)->HashAgg)->HashAgg->TopN([test.t.b],0,10)->Projection" }, { "SQL": "select count(*) from t group by b order by b", "Best": "TableReader(Table(t)->HashAgg)->HashAgg->Sort->Projection" }, { "SQL": "select count(*) from t group by b limit 10", "Best": "TableReader(Table(t)->HashAgg)->HashAgg->Limit" }, { "SQL": "select sum(a.g), sum(b.g) from t a join t b on a.g = b.g group by a.g", "Best": "MergeInnerJoin{IndexReader(Index(t.g)[[NULL,+inf]])->IndexReader(Index(t.g)[[NULL,+inf]])}(test.t.g,test.t.g)->Projection->StreamAgg" }, { "SQL": "select /*+ tidb_inlj(a,b) */ sum(a.g), sum(b.g) from t a join t b on a.g = b.g and a.g > 60 group by a.g order by a.g limit 1", "Best": "IndexJoin{IndexReader(Index(t.g)[(60,+inf]])->IndexReader(Index(t.g)[[NULL,NULL]]->Sel([gt(test.t.g, 60)]))}(test.t.g,test.t.g)->Projection->StreamAgg->Limit->Projection" }, { "SQL": "select sum(a.g), sum(b.g) from t a join t b on a.g = b.g and a.a>5 group by a.g order by a.g limit 1", "Best": "MergeInnerJoin{IndexReader(Index(t.g)[[NULL,+inf]]->Sel([gt(test.t.a, 5)]))->IndexReader(Index(t.g)[[NULL,+inf]])}(test.t.g,test.t.g)->Projection->StreamAgg->Limit->Projection" }, { "SQL": "select sum(d) from t", "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->StreamAgg)->StreamAgg" } ] }, { "Name": "TestDAGPlanBuilderWindow", "Cases": [ { "SQL": "select lead(a, 1) over (partition by null) as c from t", "Best": "IndexReader(Index(t.f)[[NULL,+inf]])->Window(lead(test.t.a, 1)->Column#14 over())->Projection" } ] }, { "Name": "TestDAGPlanBuilderWindowParallel", "Cases": [ { "SQL": "select lead(a, 1) over (partition by null) as c from t", "Best": "IndexReader(Index(t.f)[[NULL,+inf]])->Window(lead(test.t.a, 1)->Column#14 over())->Projection" }, { "SQL": "select lead(a, 1) over (partition by b) as c from t", "Best": "TableReader(Table(t))->Sort->Window(lead(test.t.a, 1)->Column#14 over(partition by test.t.b))->Partition(execution info: concurrency:4, data sources:[TableReader_10])->Projection" } ] } ]
pkg/planner/core/casetest/dag/testdata/plan_suite_out.json
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00017724839563015848, 0.00017202654271386564, 0.00016552407760173082, 0.0001723038003547117, 0.0000026363859433331527 ]
{ "id": 2, "code_window": [ "\t// LitInitialized is the flag indicates whether the lightning backfill process is initialized.\n", "\tLitInitialized bool\n", ")\n", "\n", "const maxMemoryQuota = 2 * size.GB\n", "\n", "// InitGlobalLightningEnv initialize Lightning backfill environment.\n", "func InitGlobalLightningEnv(ctx context.Context, sctx sessionctx.Context) {\n", "\tlog.SetAppLogger(logutil.BgLogger())\n", "\tglobalCfg := config.GetGlobalConfig()\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "const defaultMemoryQuota = 2 * size.GB\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 46 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ingest import ( "context" "os" "path/filepath" "strconv" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/size" "go.uber.org/zap" ) var ( // LitBackCtxMgr is the entry for the lightning backfill process. LitBackCtxMgr BackendCtxMgr // LitMemRoot is used to track the memory usage of the lightning backfill process. LitMemRoot MemRoot // LitDiskRoot is used to track the disk usage of the lightning backfill process. LitDiskRoot DiskRoot // LitRLimit is the max open file number of the lightning backfill process. LitRLimit uint64 // LitSortPath is the sort path for the lightning backfill process. LitSortPath string // LitInitialized is the flag indicates whether the lightning backfill process is initialized. LitInitialized bool ) const maxMemoryQuota = 2 * size.GB // InitGlobalLightningEnv initialize Lightning backfill environment. func InitGlobalLightningEnv(ctx context.Context, sctx sessionctx.Context) { log.SetAppLogger(logutil.BgLogger()) globalCfg := config.GetGlobalConfig() if globalCfg.Store != "tikv" { logutil.BgLogger().Warn(LitWarnEnvInitFail, zap.String("category", "ddl-ingest"), zap.String("storage limitation", "only support TiKV storage"), zap.String("current storage", globalCfg.Store), zap.Bool("lightning is initialized", LitInitialized)) return } sPath, err := genLightningDataDir() if err != nil { logutil.BgLogger().Warn(LitWarnEnvInitFail, zap.String("category", "ddl-ingest"), zap.Error(err), zap.Bool("lightning is initialized", LitInitialized)) return } LitSortPath = sPath LitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, maxMemoryQuota) LitRLimit = util.GenRLimit("ddl-ingest") LitInitialized = true logutil.BgLogger().Info(LitInfoEnvInitSucc, zap.String("category", "ddl-ingest"), zap.Uint64("memory limitation", maxMemoryQuota), zap.String("disk usage info", LitDiskRoot.UsageInfo()), zap.Uint64("max open file number", LitRLimit), zap.Bool("lightning is initialized", LitInitialized)) } // Generate lightning local store dir in TiDB data dir. // it will append -port to be tmp_ddl suffix. func genLightningDataDir() (string, error) { sortPath := ConfigSortPath() if _, err := os.Stat(sortPath); err != nil { if !os.IsNotExist(err) { logutil.BgLogger().Error(LitErrStatDirFail, zap.String("category", "ddl-ingest"), zap.String("sort path", sortPath), zap.Error(err)) return "", err } } err := os.MkdirAll(sortPath, 0o700) if err != nil { logutil.BgLogger().Error(LitErrCreateDirFail, zap.String("category", "ddl-ingest"), zap.String("sort path", sortPath), zap.Error(err)) return "", err } logutil.BgLogger().Info(LitInfoSortDir, zap.String("category", "ddl-ingest"), zap.String("data path:", sortPath)) return sortPath, nil } // ConfigSortPath returns the sort path for lightning. func ConfigSortPath() string { tidbCfg := config.GetGlobalConfig() sortPathSuffix := "/tmp_ddl-" + strconv.Itoa(int(tidbCfg.Port)) sortPath := filepath.Join(tidbCfg.TempDir, sortPathSuffix) return sortPath } // GenLightningDataDirForTest is only used for test. var GenLightningDataDirForTest = genLightningDataDir
pkg/ddl/ingest/env.go
1
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.999184787273407, 0.33923575282096863, 0.00017185758042614907, 0.01142638549208641, 0.46600279211997986 ]
{ "id": 2, "code_window": [ "\t// LitInitialized is the flag indicates whether the lightning backfill process is initialized.\n", "\tLitInitialized bool\n", ")\n", "\n", "const maxMemoryQuota = 2 * size.GB\n", "\n", "// InitGlobalLightningEnv initialize Lightning backfill environment.\n", "func InitGlobalLightningEnv(ctx context.Context, sctx sessionctx.Context) {\n", "\tlog.SetAppLogger(logutil.BgLogger())\n", "\tglobalCfg := config.GetGlobalConfig()\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "const defaultMemoryQuota = 2 * size.GB\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 46 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package session import ( "context" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/terror" ) // Advisory Locks are the locks in GET_LOCK() and RELEASE_LOCK(). // We implement them in TiDB by using an INSERT into mysql.advisory_locks // inside of a pessimistic transaction that is never committed. // // Each advisory lock requires its own session, since the pessimistic locks // can be rolled back in any order (transactions can't release random locks // like this even if savepoints was supported). // // We use referenceCount to track the number of references to the lock in the session. // A little known feature of advisory locks is that you can call GET_LOCK // multiple times on the same lock, and it will only be released when // the reference count reaches zero. type advisoryLock struct { ctx context.Context session *session referenceCount int owner uint64 } // IncrReferences increments the reference count for the advisory lock. func (a *advisoryLock) IncrReferences() { a.referenceCount++ } // DecrReferences decrements the reference count for the advisory lock. func (a *advisoryLock) DecrReferences() { a.referenceCount-- } // ReferenceCount returns the current reference count for the advisory lock. func (a *advisoryLock) ReferenceCount() int { return a.referenceCount } // Close releases the advisory lock, which includes // rolling back the transaction and closing the session. func (a *advisoryLock) Close() { _, err := a.session.ExecuteInternal(a.ctx, "ROLLBACK") terror.Log(err) a.session.Close() } // GetLock acquires a new advisory lock using a pessimistic transaction. // The timeout is implemented by using the pessimistic lock timeout. // We will never COMMIT the transaction, but the err indicates // if the lock was successfully acquired. func (a *advisoryLock) GetLock(lockName string, timeout int64) error { a.ctx = kv.WithInternalSourceType(a.ctx, kv.InternalTxnOthers) _, err := a.session.ExecuteInternal(a.ctx, "SET innodb_lock_wait_timeout = %?", timeout) if err != nil { return err } _, err = a.session.ExecuteInternal(a.ctx, "BEGIN PESSIMISTIC") if err != nil { return err } _, err = a.session.ExecuteInternal(a.ctx, "INSERT INTO mysql.advisory_locks (lock_name) VALUES (%?)", lockName) if err != nil { // We couldn't acquire the LOCK so we close the session cleanly // and return the error to the caller. The caller will need to interpret // this differently if it is lock wait timeout or a deadlock. a.Close() return err } a.referenceCount++ return nil } // IsUsedLock checks if a lockName is already in use func (a *advisoryLock) IsUsedLock(lockName string) error { defer a.Close() // Rollback a.ctx = kv.WithInternalSourceType(a.ctx, kv.InternalTxnOthers) _, err := a.session.ExecuteInternal(a.ctx, "SET innodb_lock_wait_timeout = 1") if err != nil { return err } _, err = a.session.ExecuteInternal(a.ctx, "BEGIN PESSIMISTIC") if err != nil { return err } _, err = a.session.ExecuteInternal(a.ctx, "INSERT INTO mysql.advisory_locks (lock_name) VALUES (%?)", lockName) if err != nil { return err } return nil }
pkg/session/advisory_locks.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.000989448744803667, 0.0002954983210656792, 0.00016515832976438105, 0.00017741965712048113, 0.00024419938563369215 ]
{ "id": 2, "code_window": [ "\t// LitInitialized is the flag indicates whether the lightning backfill process is initialized.\n", "\tLitInitialized bool\n", ")\n", "\n", "const maxMemoryQuota = 2 * size.GB\n", "\n", "// InitGlobalLightningEnv initialize Lightning backfill environment.\n", "func InitGlobalLightningEnv(ctx context.Context, sctx sessionctx.Context) {\n", "\tlog.SetAppLogger(logutil.BgLogger())\n", "\tglobalCfg := config.GetGlobalConfig()\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "const defaultMemoryQuota = 2 * size.GB\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 46 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package readonlytest import ( "testing" "github.com/pingcap/tidb/pkg/testkit/testsetup" "go.uber.org/goleak" ) func TestMain(m *testing.M) { testsetup.SetupForCommonTest() opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), } goleak.VerifyTestMain(m, opts...) }
tests/readonlytest/main_test.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00017568677139934152, 0.00017125325393863022, 0.00016648138989694417, 0.00017142243450507522, 0.000003836442374449689 ]
{ "id": 2, "code_window": [ "\t// LitInitialized is the flag indicates whether the lightning backfill process is initialized.\n", "\tLitInitialized bool\n", ")\n", "\n", "const maxMemoryQuota = 2 * size.GB\n", "\n", "// InitGlobalLightningEnv initialize Lightning backfill environment.\n", "func InitGlobalLightningEnv(ctx context.Context, sctx sessionctx.Context) {\n", "\tlog.SetAppLogger(logutil.BgLogger())\n", "\tglobalCfg := config.GetGlobalConfig()\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "const defaultMemoryQuota = 2 * size.GB\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 46 }
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package parser_test import ( "testing" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/stretchr/testify/require" ) func TestParseHint(t *testing.T) { testCases := []struct { input string mode mysql.SQLMode output []*ast.TableOptimizerHint errs []string }{ { input: "", errs: []string{`Optimizer hint syntax error at line 1 `}, }, { input: "MEMORY_QUOTA(8 MB) MEMORY_QUOTA(6 GB)", output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("MEMORY_QUOTA"), HintData: int64(8 * 1024 * 1024), }, { HintName: model.NewCIStr("MEMORY_QUOTA"), HintData: int64(6 * 1024 * 1024 * 1024), }, }, }, { input: "QB_NAME(qb1) QB_NAME(`qb2`), QB_NAME(TRUE) QB_NAME(\"ANSI quoted\") QB_NAME(_utf8), QB_NAME(0b10) QB_NAME(0x1a)", mode: mysql.ModeANSIQuotes, output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("QB_NAME"), QBName: model.NewCIStr("qb1"), }, { HintName: model.NewCIStr("QB_NAME"), QBName: model.NewCIStr("qb2"), }, { HintName: model.NewCIStr("QB_NAME"), QBName: model.NewCIStr("TRUE"), }, { HintName: model.NewCIStr("QB_NAME"), QBName: model.NewCIStr("ANSI quoted"), }, { HintName: model.NewCIStr("QB_NAME"), QBName: model.NewCIStr("_utf8"), }, { HintName: model.NewCIStr("QB_NAME"), QBName: model.NewCIStr("0b10"), }, { HintName: model.NewCIStr("QB_NAME"), QBName: model.NewCIStr("0x1a"), }, }, }, { input: "QB_NAME(1)", errs: []string{`Optimizer hint syntax error at line 1 `}, }, { input: "QB_NAME('string literal')", errs: []string{`Optimizer hint syntax error at line 1 `}, }, { input: "QB_NAME(many identifiers)", errs: []string{`Optimizer hint syntax error at line 1 `}, }, { input: "QB_NAME(@qb1)", errs: []string{`Optimizer hint syntax error at line 1 `}, }, { input: "QB_NAME(b'10')", errs: []string{ `Cannot use bit-value literal`, `Optimizer hint syntax error at line 1 `, }, }, { input: "QB_NAME(x'1a')", errs: []string{ `Cannot use hexadecimal literal`, `Optimizer hint syntax error at line 1 `, }, }, { input: "JOIN_FIXED_ORDER() BKA()", errs: []string{ `Optimizer hint JOIN_FIXED_ORDER is not supported`, `Optimizer hint BKA is not supported`, }, }, { input: "HASH_JOIN() TIDB_HJ(@qb1) INL_JOIN(x, `y y`.z) MERGE_JOIN(w@`First QB`)", output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("HASH_JOIN"), }, { HintName: model.NewCIStr("TIDB_HJ"), QBName: model.NewCIStr("qb1"), }, { HintName: model.NewCIStr("INL_JOIN"), Tables: []ast.HintTable{ {TableName: model.NewCIStr("x")}, {DBName: model.NewCIStr("y y"), TableName: model.NewCIStr("z")}, }, }, { HintName: model.NewCIStr("MERGE_JOIN"), Tables: []ast.HintTable{ {TableName: model.NewCIStr("w"), QBName: model.NewCIStr("First QB")}, }, }, }, }, { input: "USE_INDEX_MERGE(@qb1 tbl1 x, y, z) IGNORE_INDEX(tbl2@qb2) USE_INDEX(tbl3 PRIMARY) FORCE_INDEX(tbl4@qb3 c1)", output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("USE_INDEX_MERGE"), Tables: []ast.HintTable{{TableName: model.NewCIStr("tbl1")}}, QBName: model.NewCIStr("qb1"), Indexes: []model.CIStr{model.NewCIStr("x"), model.NewCIStr("y"), model.NewCIStr("z")}, }, { HintName: model.NewCIStr("IGNORE_INDEX"), Tables: []ast.HintTable{{TableName: model.NewCIStr("tbl2"), QBName: model.NewCIStr("qb2")}}, }, { HintName: model.NewCIStr("USE_INDEX"), Tables: []ast.HintTable{{TableName: model.NewCIStr("tbl3")}}, Indexes: []model.CIStr{model.NewCIStr("PRIMARY")}, }, { HintName: model.NewCIStr("FORCE_INDEX"), Tables: []ast.HintTable{{TableName: model.NewCIStr("tbl4"), QBName: model.NewCIStr("qb3")}}, Indexes: []model.CIStr{model.NewCIStr("c1")}, }, }, }, { input: "USE_INDEX(@qb1 tbl1 partition(p0) x) USE_INDEX_MERGE(@qb2 tbl2@qb2 partition(p0, p1) x, y, z)", output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("USE_INDEX"), Tables: []ast.HintTable{{ TableName: model.NewCIStr("tbl1"), PartitionList: []model.CIStr{model.NewCIStr("p0")}, }}, QBName: model.NewCIStr("qb1"), Indexes: []model.CIStr{model.NewCIStr("x")}, }, { HintName: model.NewCIStr("USE_INDEX_MERGE"), Tables: []ast.HintTable{{ TableName: model.NewCIStr("tbl2"), QBName: model.NewCIStr("qb2"), PartitionList: []model.CIStr{model.NewCIStr("p0"), model.NewCIStr("p1")}, }}, QBName: model.NewCIStr("qb2"), Indexes: []model.CIStr{model.NewCIStr("x"), model.NewCIStr("y"), model.NewCIStr("z")}, }, }, }, { input: `SET_VAR(sbs = 16M) SET_VAR(fkc=OFF) SET_VAR(os="mcb=off") set_var(abc=1) set_var(os2='mcb2=off')`, output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("SET_VAR"), HintData: ast.HintSetVar{ VarName: "sbs", Value: "16M", }, }, { HintName: model.NewCIStr("SET_VAR"), HintData: ast.HintSetVar{ VarName: "fkc", Value: "OFF", }, }, { HintName: model.NewCIStr("SET_VAR"), HintData: ast.HintSetVar{ VarName: "os", Value: "mcb=off", }, }, { HintName: model.NewCIStr("set_var"), HintData: ast.HintSetVar{ VarName: "abc", Value: "1", }, }, { HintName: model.NewCIStr("set_var"), HintData: ast.HintSetVar{ VarName: "os2", Value: "mcb2=off", }, }, }, }, { input: "USE_TOJA(TRUE) IGNORE_PLAN_CACHE() USE_CASCADES(TRUE) QUERY_TYPE(@qb1 OLAP) QUERY_TYPE(OLTP) NO_INDEX_MERGE() RESOURCE_GROUP(rg1)", output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("USE_TOJA"), HintData: true, }, { HintName: model.NewCIStr("IGNORE_PLAN_CACHE"), }, { HintName: model.NewCIStr("USE_CASCADES"), HintData: true, }, { HintName: model.NewCIStr("QUERY_TYPE"), QBName: model.NewCIStr("qb1"), HintData: model.NewCIStr("OLAP"), }, { HintName: model.NewCIStr("QUERY_TYPE"), HintData: model.NewCIStr("OLTP"), }, { HintName: model.NewCIStr("NO_INDEX_MERGE"), }, { HintName: model.NewCIStr("RESOURCE_GROUP"), HintData: "rg1", }, }, }, { input: "READ_FROM_STORAGE(@foo TIKV[a, b], TIFLASH[c, d]) HASH_AGG() SEMI_JOIN_REWRITE() READ_FROM_STORAGE(TIKV[e])", output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("READ_FROM_STORAGE"), HintData: model.NewCIStr("TIKV"), QBName: model.NewCIStr("foo"), Tables: []ast.HintTable{ {TableName: model.NewCIStr("a")}, {TableName: model.NewCIStr("b")}, }, }, { HintName: model.NewCIStr("READ_FROM_STORAGE"), HintData: model.NewCIStr("TIFLASH"), QBName: model.NewCIStr("foo"), Tables: []ast.HintTable{ {TableName: model.NewCIStr("c")}, {TableName: model.NewCIStr("d")}, }, }, { HintName: model.NewCIStr("HASH_AGG"), }, { HintName: model.NewCIStr("SEMI_JOIN_REWRITE"), }, { HintName: model.NewCIStr("READ_FROM_STORAGE"), HintData: model.NewCIStr("TIKV"), Tables: []ast.HintTable{ {TableName: model.NewCIStr("e")}, }, }, }, }, { input: "unknown_hint()", errs: []string{`Optimizer hint syntax error at line 1 `}, }, { input: "set_var(timestamp = 1.5)", errs: []string{ `Cannot use decimal number`, `Optimizer hint syntax error at line 1 `, }, }, { input: "set_var(timestamp = _utf8mb4'1234')", // Optimizer hint doesn't recognize _charset'strings'. errs: []string{`Optimizer hint syntax error at line 1 `}, }, { input: "set_var(timestamp = 9999999999999999999999999999999999999)", errs: []string{ `integer value is out of range`, `Optimizer hint syntax error at line 1 `, }, }, { input: "time_range('2020-02-20 12:12:12',456)", errs: []string{ `Optimizer hint syntax error at line 1 `, }, }, { input: "time_range(456,'2020-02-20 12:12:12')", errs: []string{ `Optimizer hint syntax error at line 1 `, }, }, { input: "TIME_RANGE('2020-02-20 12:12:12','2020-02-20 13:12:12')", output: []*ast.TableOptimizerHint{ { HintName: model.NewCIStr("TIME_RANGE"), HintData: ast.HintTimeRange{ From: "2020-02-20 12:12:12", To: "2020-02-20 13:12:12", }, }, }, }, } for _, tc := range testCases { output, errs := parser.ParseHint("/*+"+tc.input+"*/", tc.mode, parser.Pos{Line: 1}) require.Lenf(t, errs, len(tc.errs), "input = %s,\n... errs = %q", tc.input, errs) for i, err := range errs { require.Errorf(t, err, "input = %s, i = %d", tc.input, i) require.Containsf(t, err.Error(), tc.errs[i], "input = %s, i = %d", tc.input, i) } require.Equalf(t, tc.output, output, "input = %s,\n... output = %q", tc.input, output) } }
pkg/parser/hintparser_test.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00030901405261829495, 0.00017258171283174306, 0.00016666612646076828, 0.0001679942215560004, 0.000022834456103737466 ]
{ "id": 3, "code_window": [ "\t\t\tzap.String(\"category\", \"ddl-ingest\"),\n", "\t\t\tzap.Error(err), zap.Bool(\"lightning is initialized\", LitInitialized))\n", "\t\treturn\n", "\t}\n", "\tLitSortPath = sPath\n", "\tLitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, maxMemoryQuota)\n", "\tLitRLimit = util.GenRLimit(\"ddl-ingest\")\n", "\tLitInitialized = true\n", "\tlogutil.BgLogger().Info(LitInfoEnvInitSucc,\n", "\t\tzap.String(\"category\", \"ddl-ingest\"),\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tmemTotal, err := memory.MemTotal()\n", "\tif err != nil {\n", "\t\tlogutil.BgLogger().Warn(\"get total memory fail\", zap.Error(err))\n", "\t\tmemTotal = defaultMemoryQuota\n", "\t} else {\n", "\t\tmemTotal = memTotal / 2\n", "\t}\n", "\tLitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, memTotal)\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 68 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ingest import ( "context" "os" "path/filepath" "strconv" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/size" "go.uber.org/zap" ) var ( // LitBackCtxMgr is the entry for the lightning backfill process. LitBackCtxMgr BackendCtxMgr // LitMemRoot is used to track the memory usage of the lightning backfill process. LitMemRoot MemRoot // LitDiskRoot is used to track the disk usage of the lightning backfill process. LitDiskRoot DiskRoot // LitRLimit is the max open file number of the lightning backfill process. LitRLimit uint64 // LitSortPath is the sort path for the lightning backfill process. LitSortPath string // LitInitialized is the flag indicates whether the lightning backfill process is initialized. LitInitialized bool ) const maxMemoryQuota = 2 * size.GB // InitGlobalLightningEnv initialize Lightning backfill environment. func InitGlobalLightningEnv(ctx context.Context, sctx sessionctx.Context) { log.SetAppLogger(logutil.BgLogger()) globalCfg := config.GetGlobalConfig() if globalCfg.Store != "tikv" { logutil.BgLogger().Warn(LitWarnEnvInitFail, zap.String("category", "ddl-ingest"), zap.String("storage limitation", "only support TiKV storage"), zap.String("current storage", globalCfg.Store), zap.Bool("lightning is initialized", LitInitialized)) return } sPath, err := genLightningDataDir() if err != nil { logutil.BgLogger().Warn(LitWarnEnvInitFail, zap.String("category", "ddl-ingest"), zap.Error(err), zap.Bool("lightning is initialized", LitInitialized)) return } LitSortPath = sPath LitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, maxMemoryQuota) LitRLimit = util.GenRLimit("ddl-ingest") LitInitialized = true logutil.BgLogger().Info(LitInfoEnvInitSucc, zap.String("category", "ddl-ingest"), zap.Uint64("memory limitation", maxMemoryQuota), zap.String("disk usage info", LitDiskRoot.UsageInfo()), zap.Uint64("max open file number", LitRLimit), zap.Bool("lightning is initialized", LitInitialized)) } // Generate lightning local store dir in TiDB data dir. // it will append -port to be tmp_ddl suffix. func genLightningDataDir() (string, error) { sortPath := ConfigSortPath() if _, err := os.Stat(sortPath); err != nil { if !os.IsNotExist(err) { logutil.BgLogger().Error(LitErrStatDirFail, zap.String("category", "ddl-ingest"), zap.String("sort path", sortPath), zap.Error(err)) return "", err } } err := os.MkdirAll(sortPath, 0o700) if err != nil { logutil.BgLogger().Error(LitErrCreateDirFail, zap.String("category", "ddl-ingest"), zap.String("sort path", sortPath), zap.Error(err)) return "", err } logutil.BgLogger().Info(LitInfoSortDir, zap.String("category", "ddl-ingest"), zap.String("data path:", sortPath)) return sortPath, nil } // ConfigSortPath returns the sort path for lightning. func ConfigSortPath() string { tidbCfg := config.GetGlobalConfig() sortPathSuffix := "/tmp_ddl-" + strconv.Itoa(int(tidbCfg.Port)) sortPath := filepath.Join(tidbCfg.TempDir, sortPathSuffix) return sortPath } // GenLightningDataDirForTest is only used for test. var GenLightningDataDirForTest = genLightningDataDir
pkg/ddl/ingest/env.go
1
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.9974685907363892, 0.17808888852596283, 0.00016469723777845502, 0.002979094162583351, 0.36755460500717163 ]
{ "id": 3, "code_window": [ "\t\t\tzap.String(\"category\", \"ddl-ingest\"),\n", "\t\t\tzap.Error(err), zap.Bool(\"lightning is initialized\", LitInitialized))\n", "\t\treturn\n", "\t}\n", "\tLitSortPath = sPath\n", "\tLitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, maxMemoryQuota)\n", "\tLitRLimit = util.GenRLimit(\"ddl-ingest\")\n", "\tLitInitialized = true\n", "\tlogutil.BgLogger().Info(LitInfoEnvInitSucc,\n", "\t\tzap.String(\"category\", \"ddl-ingest\"),\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tmemTotal, err := memory.MemTotal()\n", "\tif err != nil {\n", "\t\tlogutil.BgLogger().Warn(\"get total memory fail\", zap.Error(err))\n", "\t\tmemTotal = defaultMemoryQuota\n", "\t} else {\n", "\t\tmemTotal = memTotal / 2\n", "\t}\n", "\tLitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, memTotal)\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 68 }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package core_test import ( "fmt" "strconv" "testing" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" ) func TestRowSizeInMPP(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("set tidb_cost_model_version=2") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a varchar(10), b varchar(20), c varchar(256))") tk.MustExec("insert into t values (space(10), space(20), space(256))") tk.MustExec("analyze table t") // Create virtual tiflash replica info. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() db, exists := is.SchemaByName(model.NewCIStr("test")) require.True(t, exists) for _, tblInfo := range db.Tables { if tblInfo.Name.L == "t" { tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ Count: 1, Available: true, } } } tk.MustExec(`set @@tidb_opt_tiflash_concurrency_factor=1`) tk.MustExec(`set @@tidb_allow_mpp=1`) var costs [3]float64 for i, col := range []string{"a", "b", "c"} { rs := tk.MustQuery(fmt.Sprintf(`explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ %v from t`, col)).Rows() cost, err := strconv.ParseFloat(rs[0][2].(string), 64) require.NoError(t, err) costs[i] = cost } require.True(t, costs[0] < costs[1] && costs[1] < costs[2]) // rowSize can affect the final cost }
pkg/planner/core/enforce_mpp_test.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00017806827963795513, 0.0001753234537318349, 0.00017029796435963362, 0.0001763290201779455, 0.0000026524512577452697 ]
{ "id": 3, "code_window": [ "\t\t\tzap.String(\"category\", \"ddl-ingest\"),\n", "\t\t\tzap.Error(err), zap.Bool(\"lightning is initialized\", LitInitialized))\n", "\t\treturn\n", "\t}\n", "\tLitSortPath = sPath\n", "\tLitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, maxMemoryQuota)\n", "\tLitRLimit = util.GenRLimit(\"ddl-ingest\")\n", "\tLitInitialized = true\n", "\tlogutil.BgLogger().Info(LitInfoEnvInitSucc,\n", "\t\tzap.String(\"category\", \"ddl-ingest\"),\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tmemTotal, err := memory.MemTotal()\n", "\tif err != nil {\n", "\t\tlogutil.BgLogger().Warn(\"get total memory fail\", zap.Error(err))\n", "\t\tmemTotal = defaultMemoryQuota\n", "\t} else {\n", "\t\tmemTotal = memTotal / 2\n", "\t}\n", "\tLitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, memTotal)\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 68 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package statistics import ( "context" "encoding/json" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" ) // BuildExtendedStats build extended stats for column groups if needed based on the column samples. func BuildExtendedStats(sctx sessionctx.Context, tableID int64, cols []*model.ColumnInfo, collectors []*SampleCollector) (*ExtendedStatsColl, error) { const sql = "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)" sqlExec, ok := sctx.(sqlexec.RestrictedSQLExecutor) if !ok { return nil, errors.Errorf("invalid sql executor") } rows, _, err := sqlExec.ExecRestrictedSQL(kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats), nil, sql, tableID, ExtendedStatsAnalyzed, ExtendedStatsInited) if err != nil { return nil, errors.Trace(err) } if len(rows) == 0 { return nil, nil } statsColl := NewExtendedStatsColl() for _, row := range rows { name := row.GetString(0) item := &ExtendedStatsItem{Tp: uint8(row.GetInt64(1))} colIDs := row.GetString(2) err := json.Unmarshal([]byte(colIDs), &item.ColIDs) if err != nil { logutil.BgLogger().Error("invalid column_ids in mysql.stats_extended, skip collecting extended stats for this row", zap.String("column_ids", colIDs), zap.Error(err)) continue } item = fillExtendedStatsItemVals(sctx, item, cols, collectors) if item != nil { statsColl.Stats[name] = item } } if len(statsColl.Stats) == 0 { return nil, nil } return statsColl, nil } func fillExtendedStatsItemVals(sctx sessionctx.Context, item *ExtendedStatsItem, cols []*model.ColumnInfo, collectors []*SampleCollector) *ExtendedStatsItem { switch item.Tp { case ast.StatsTypeCardinality, ast.StatsTypeDependency: return nil case ast.StatsTypeCorrelation: return fillExtStatsCorrVals(sctx, item, cols, collectors) } return nil } func fillExtStatsCorrVals(sctx sessionctx.Context, item *ExtendedStatsItem, cols []*model.ColumnInfo, collectors []*SampleCollector) *ExtendedStatsItem { colOffsets := make([]int, 0, 2) for _, id := range item.ColIDs { for i, col := range cols { if col.ID == id { colOffsets = append(colOffsets, i) break } } } if len(colOffsets) != 2 { return nil } // samplesX and samplesY are in order of handle, i.e, their SampleItem.Ordinals are in order. samplesX := collectors[colOffsets[0]].Samples // We would modify Ordinal of samplesY, so we make a deep copy. samplesY := CopySampleItems(collectors[colOffsets[1]].Samples) sampleNum := min(len(samplesX), len(samplesY)) if sampleNum == 1 { item.ScalarVals = 1 return item } if sampleNum <= 0 { item.ScalarVals = 0 return item } sc := sctx.GetSessionVars().StmtCtx var err error samplesX, err = SortSampleItems(sc, samplesX) if err != nil { return nil } samplesYInXOrder := make([]*SampleItem, 0, sampleNum) for i, itemX := range samplesX { if itemX.Ordinal >= len(samplesY) { continue } itemY := samplesY[itemX.Ordinal] itemY.Ordinal = i samplesYInXOrder = append(samplesYInXOrder, itemY) } samplesYInYOrder, err := SortSampleItems(sc, samplesYInXOrder) if err != nil { return nil } var corrXYSum float64 for i := 1; i < len(samplesYInYOrder); i++ { corrXYSum += float64(i) * float64(samplesYInYOrder[i].Ordinal) } // X means the ordinal of the item in original sequence, Y means the oridnal of the item in the // sorted sequence, we know that X and Y value sets are both: // 0, 1, ..., sampleNum-1 // we can simply compute sum(X) = sum(Y) = // (sampleNum-1)*sampleNum / 2 // and sum(X^2) = sum(Y^2) = // (sampleNum-1)*sampleNum*(2*sampleNum-1) / 6 // We use "Pearson correlation coefficient" to compute the order correlation of columns, // the formula is based on https://en.wikipedia.org/wiki/Pearson_correlation_coefficient. // Note that (itemsCount*corrX2Sum - corrXSum*corrXSum) would never be zero when sampleNum is larger than 1. itemsCount := float64(sampleNum) corrXSum := (itemsCount - 1) * itemsCount / 2.0 corrX2Sum := (itemsCount - 1) * itemsCount * (2*itemsCount - 1) / 6.0 item.ScalarVals = (itemsCount*corrXYSum - corrXSum*corrXSum) / (itemsCount*corrX2Sum - corrXSum*corrXSum) return item }
pkg/statistics/builder_ext_stats.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.001174767385236919, 0.0002421854151180014, 0.00016625961870886385, 0.00017586442118044943, 0.0002493114734534174 ]
{ "id": 3, "code_window": [ "\t\t\tzap.String(\"category\", \"ddl-ingest\"),\n", "\t\t\tzap.Error(err), zap.Bool(\"lightning is initialized\", LitInitialized))\n", "\t\treturn\n", "\t}\n", "\tLitSortPath = sPath\n", "\tLitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, maxMemoryQuota)\n", "\tLitRLimit = util.GenRLimit(\"ddl-ingest\")\n", "\tLitInitialized = true\n", "\tlogutil.BgLogger().Info(LitInfoEnvInitSucc,\n", "\t\tzap.String(\"category\", \"ddl-ingest\"),\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tmemTotal, err := memory.MemTotal()\n", "\tif err != nil {\n", "\t\tlogutil.BgLogger().Warn(\"get total memory fail\", zap.Error(err))\n", "\t\tmemTotal = defaultMemoryQuota\n", "\t} else {\n", "\t\tmemTotal = memTotal / 2\n", "\t}\n", "\tLitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, memTotal)\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 68 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package driver import ( "context" "testing" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/stretchr/testify/require" ) type mockErrInterceptor struct { err error } func (m *mockErrInterceptor) OnGet(_ context.Context, _ kv.Snapshot, _ kv.Key) ([]byte, error) { return nil, m.err } func (m *mockErrInterceptor) OnBatchGet(_ context.Context, _ kv.Snapshot, _ []kv.Key) (map[string][]byte, error) { return nil, m.err } func (m *mockErrInterceptor) OnIter(_ kv.Snapshot, _ kv.Key, _ kv.Key) (kv.Iterator, error) { return nil, m.err } func (m *mockErrInterceptor) OnIterReverse(_ kv.Snapshot, _ kv.Key, _ kv.Key) (kv.Iterator, error) { return nil, m.err } func TestTxnGet(t *testing.T) { store, err := mockstore.NewMockStore() require.NoError(t, err) defer func() { require.NoError(t, store.Close()) }() clearStoreData(t, store) prepareSnapshot(t, store, [][]interface{}{{"k1", "v1"}}) txn, err := store.Begin() require.NoError(t, err) require.NotNil(t, txn) // should return snapshot value if no dirty data v, err := txn.Get(context.Background(), kv.Key("k1")) require.NoError(t, err) require.Equal(t, []byte("v1"), v) // insert but not commit err = txn.Set(kv.Key("k1"), kv.Key("v1+")) require.NoError(t, err) // should return dirty data if dirty data exists v, err = txn.Get(context.Background(), kv.Key("k1")) require.NoError(t, err) require.Equal(t, []byte("v1+"), v) err = txn.Set(kv.Key("k2"), []byte("v2+")) require.NoError(t, err) // should return dirty data if dirty data exists v, err = txn.Get(context.Background(), kv.Key("k2")) require.NoError(t, err) require.Equal(t, []byte("v2+"), v) // delete but not commit err = txn.Delete(kv.Key("k1")) require.NoError(t, err) // should return kv.ErrNotExist if deleted v, err = txn.Get(context.Background(), kv.Key("k1")) require.Nil(t, v) require.True(t, kv.ErrNotExist.Equal(err)) // should return kv.ErrNotExist if not exist v, err = txn.Get(context.Background(), kv.Key("kn")) require.Nil(t, v) require.True(t, kv.ErrNotExist.Equal(err)) // make snapshot returns error errInterceptor := &mockErrInterceptor{err: errors.New("error")} txn.SetOption(kv.SnapInterceptor, errInterceptor) // should return kv.ErrNotExist because k1 is deleted in memBuff v, err = txn.Get(context.Background(), kv.Key("k1")) require.Nil(t, v) require.True(t, kv.ErrNotExist.Equal(err)) // should return dirty data because k2 is in memBuff v, err = txn.Get(context.Background(), kv.Key("k2")) require.NoError(t, err) require.Equal(t, []byte("v2+"), v) // should return error because kn is read from snapshot v, err = txn.Get(context.Background(), kv.Key("kn")) require.Nil(t, v) require.Equal(t, errInterceptor.err, err) } func TestTxnBatchGet(t *testing.T) { store, err := mockstore.NewMockStore() require.NoError(t, err) defer func() { require.NoError(t, store.Close()) }() clearStoreData(t, store) prepareSnapshot(t, store, [][]interface{}{{"k1", "v1"}, {"k2", "v2"}, {"k3", "v3"}, {"k4", "v4"}}) txn, err := store.Begin() require.NoError(t, err) result, err := txn.BatchGet(context.Background(), []kv.Key{kv.Key("k1"), kv.Key("k2"), kv.Key("k3"), kv.Key("kn")}) require.NoError(t, err) require.Equal(t, 3, len(result)) require.Equal(t, []byte("v1"), result["k1"]) require.Equal(t, []byte("v2"), result["k2"]) require.Equal(t, []byte("v3"), result["k3"]) // make some dirty data err = txn.Set(kv.Key("k1"), []byte("v1+")) require.NoError(t, err) err = txn.Set(kv.Key("k4"), []byte("v4+")) require.NoError(t, err) err = txn.Delete(kv.Key("k2")) require.NoError(t, err) result, err = txn.BatchGet(context.Background(), []kv.Key{kv.Key("k1"), kv.Key("k2"), kv.Key("k3"), kv.Key("k4"), kv.Key("kn")}) require.NoError(t, err) require.Equal(t, 3, len(result)) require.Equal(t, []byte("v1+"), result["k1"]) require.Equal(t, []byte("v3"), result["k3"]) require.Equal(t, []byte("v4+"), result["k4"]) // return data if not read from snapshot result, err = txn.BatchGet(context.Background(), []kv.Key{kv.Key("k1"), kv.Key("k4")}) require.NoError(t, err) require.Equal(t, 2, len(result)) require.Equal(t, []byte("v1+"), result["k1"]) require.Equal(t, []byte("v4+"), result["k4"]) // make snapshot returns error errInterceptor := &mockErrInterceptor{err: errors.New("error")} txn.SetOption(kv.SnapInterceptor, errInterceptor) // fails if read from snapshot result, err = txn.BatchGet(context.Background(), []kv.Key{kv.Key("k3")}) require.Nil(t, result) require.Equal(t, errInterceptor.err, err) result, err = txn.BatchGet(context.Background(), []kv.Key{kv.Key("k1"), kv.Key("k3"), kv.Key("k4")}) require.Nil(t, result) require.Equal(t, errInterceptor.err, err) result, err = txn.BatchGet(context.Background(), []kv.Key{kv.Key("k1"), kv.Key("k4"), kv.Key("kn")}) require.Nil(t, result) require.Equal(t, errInterceptor.err, err) } func TestTxnScan(t *testing.T) { store, err := mockstore.NewMockStore() require.NoError(t, err) defer func() { require.NoError(t, store.Close()) }() clearStoreData(t, store) prepareSnapshot(t, store, [][]interface{}{{"k1", "v1"}, {"k3", "v3"}, {"k5", "v5"}, {"k7", "v7"}, {"k9", "v9"}}) txn, err := store.Begin() require.NoError(t, err) iter, err := txn.Iter(kv.Key("k3"), kv.Key("k9")) require.NoError(t, err) checkIter(t, iter, [][]interface{}{{"k3", "v3"}, {"k5", "v5"}, {"k7", "v7"}}) iter, err = txn.IterReverse(kv.Key("k9"), nil) require.NoError(t, err) checkIter(t, iter, [][]interface{}{{"k7", "v7"}, {"k5", "v5"}, {"k3", "v3"}, {"k1", "v1"}}) iter, err = txn.IterReverse(kv.Key("k9"), kv.Key("k3")) require.NoError(t, err) checkIter(t, iter, [][]interface{}{{"k7", "v7"}, {"k5", "v5"}, {"k3", "v3"}}) // make some dirty data err = txn.Set(kv.Key("k1"), []byte("v1+")) require.NoError(t, err) err = txn.Set(kv.Key("k3"), []byte("v3+")) require.NoError(t, err) err = txn.Set(kv.Key("k31"), []byte("v31+")) require.NoError(t, err) err = txn.Delete(kv.Key("k5")) require.NoError(t, err) iter, err = txn.Iter(kv.Key("k3"), kv.Key("k9")) require.NoError(t, err) checkIter(t, iter, [][]interface{}{{"k3", "v3+"}, {"k31", "v31+"}, {"k7", "v7"}}) iter, err = txn.IterReverse(kv.Key("k9"), nil) require.NoError(t, err) checkIter(t, iter, [][]interface{}{{"k7", "v7"}, {"k31", "v31+"}, {"k3", "v3+"}, {"k1", "v1+"}}) // make snapshot returns error errInterceptor := &mockErrInterceptor{err: errors.New("error")} txn.SetOption(kv.SnapInterceptor, errInterceptor) iter, err = txn.Iter(kv.Key("k1"), kv.Key("k2")) require.Equal(t, errInterceptor.err, err) require.Nil(t, iter) }
pkg/store/driver/txn_test.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00031626649433746934, 0.00018286447448190302, 0.00015709572471678257, 0.00017268018564209342, 0.00004251510472386144 ]
{ "id": 4, "code_window": [ "\tLitInitialized = true\n", "\tlogutil.BgLogger().Info(LitInfoEnvInitSucc,\n", "\t\tzap.String(\"category\", \"ddl-ingest\"),\n", "\t\tzap.Uint64(\"memory limitation\", maxMemoryQuota),\n", "\t\tzap.String(\"disk usage info\", LitDiskRoot.UsageInfo()),\n", "\t\tzap.Uint64(\"max open file number\", LitRLimit),\n", "\t\tzap.Bool(\"lightning is initialized\", LitInitialized))\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tzap.Uint64(\"memory limitation\", memTotal),\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 73 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ingest import ( "context" "os" "path/filepath" "strconv" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/size" "go.uber.org/zap" ) var ( // LitBackCtxMgr is the entry for the lightning backfill process. LitBackCtxMgr BackendCtxMgr // LitMemRoot is used to track the memory usage of the lightning backfill process. LitMemRoot MemRoot // LitDiskRoot is used to track the disk usage of the lightning backfill process. LitDiskRoot DiskRoot // LitRLimit is the max open file number of the lightning backfill process. LitRLimit uint64 // LitSortPath is the sort path for the lightning backfill process. LitSortPath string // LitInitialized is the flag indicates whether the lightning backfill process is initialized. LitInitialized bool ) const maxMemoryQuota = 2 * size.GB // InitGlobalLightningEnv initialize Lightning backfill environment. func InitGlobalLightningEnv(ctx context.Context, sctx sessionctx.Context) { log.SetAppLogger(logutil.BgLogger()) globalCfg := config.GetGlobalConfig() if globalCfg.Store != "tikv" { logutil.BgLogger().Warn(LitWarnEnvInitFail, zap.String("category", "ddl-ingest"), zap.String("storage limitation", "only support TiKV storage"), zap.String("current storage", globalCfg.Store), zap.Bool("lightning is initialized", LitInitialized)) return } sPath, err := genLightningDataDir() if err != nil { logutil.BgLogger().Warn(LitWarnEnvInitFail, zap.String("category", "ddl-ingest"), zap.Error(err), zap.Bool("lightning is initialized", LitInitialized)) return } LitSortPath = sPath LitBackCtxMgr = newLitBackendCtxMgr(ctx, sctx, LitSortPath, maxMemoryQuota) LitRLimit = util.GenRLimit("ddl-ingest") LitInitialized = true logutil.BgLogger().Info(LitInfoEnvInitSucc, zap.String("category", "ddl-ingest"), zap.Uint64("memory limitation", maxMemoryQuota), zap.String("disk usage info", LitDiskRoot.UsageInfo()), zap.Uint64("max open file number", LitRLimit), zap.Bool("lightning is initialized", LitInitialized)) } // Generate lightning local store dir in TiDB data dir. // it will append -port to be tmp_ddl suffix. func genLightningDataDir() (string, error) { sortPath := ConfigSortPath() if _, err := os.Stat(sortPath); err != nil { if !os.IsNotExist(err) { logutil.BgLogger().Error(LitErrStatDirFail, zap.String("category", "ddl-ingest"), zap.String("sort path", sortPath), zap.Error(err)) return "", err } } err := os.MkdirAll(sortPath, 0o700) if err != nil { logutil.BgLogger().Error(LitErrCreateDirFail, zap.String("category", "ddl-ingest"), zap.String("sort path", sortPath), zap.Error(err)) return "", err } logutil.BgLogger().Info(LitInfoSortDir, zap.String("category", "ddl-ingest"), zap.String("data path:", sortPath)) return sortPath, nil } // ConfigSortPath returns the sort path for lightning. func ConfigSortPath() string { tidbCfg := config.GetGlobalConfig() sortPathSuffix := "/tmp_ddl-" + strconv.Itoa(int(tidbCfg.Port)) sortPath := filepath.Join(tidbCfg.TempDir, sortPathSuffix) return sortPath } // GenLightningDataDirForTest is only used for test. var GenLightningDataDirForTest = genLightningDataDir
pkg/ddl/ingest/env.go
1
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.9970500469207764, 0.0856340155005455, 0.00016494051669724286, 0.0010847629746422172, 0.2748211622238159 ]
{ "id": 4, "code_window": [ "\tLitInitialized = true\n", "\tlogutil.BgLogger().Info(LitInfoEnvInitSucc,\n", "\t\tzap.String(\"category\", \"ddl-ingest\"),\n", "\t\tzap.Uint64(\"memory limitation\", maxMemoryQuota),\n", "\t\tzap.String(\"disk usage info\", LitDiskRoot.UsageInfo()),\n", "\t\tzap.Uint64(\"max open file number\", LitRLimit),\n", "\t\tzap.Bool(\"lightning is initialized\", LitInitialized))\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tzap.Uint64(\"memory limitation\", memTotal),\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 73 }
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "crypto/tls" "fmt" "strings" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/extension" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/planner/core" servererr "github.com/pingcap/tidb/pkg/server/err" "github.com/pingcap/tidb/pkg/server/internal/column" "github.com/pingcap/tidb/pkg/server/internal/resultset" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/sessionstates" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/pingcap/tidb/pkg/util/topsql/stmtstats" ) // TiDBDriver implements IDriver. type TiDBDriver struct { store kv.Storage } // NewTiDBDriver creates a new TiDBDriver. func NewTiDBDriver(store kv.Storage) *TiDBDriver { driver := &TiDBDriver{ store: store, } return driver } // TiDBContext implements QueryCtx. type TiDBContext struct { session.Session stmts map[int]*TiDBStatement } // TiDBStatement implements PreparedStatement. type TiDBStatement struct { id uint32 numParams int boundParams [][]byte paramsType []byte ctx *TiDBContext // this result set should have been closed before stored here. Only the `rowIterator` are used here. This field is // not moved out to reuse the logic inside functions `writeResultSet...` // TODO: move the `fetchedRows` into the statement, and remove the `ResultSet` from statement. rs resultset.CursorResultSet // the `rowContainer` should contain all pre-fetched results of the statement in `EXECUTE` command. // it's stored here to be closed in RESET and CLOSE command rowContainer *chunk.RowContainer sql string hasActiveCursor bool } // ID implements PreparedStatement ID method. func (ts *TiDBStatement) ID() int { return int(ts.id) } // Execute implements PreparedStatement Execute method. func (ts *TiDBStatement) Execute(ctx context.Context, args []expression.Expression) (rs resultset.ResultSet, err error) { tidbRecordset, err := ts.ctx.ExecutePreparedStmt(ctx, ts.id, args) if err != nil { return nil, err } if tidbRecordset == nil { return } rs = resultset.New(tidbRecordset, ts.ctx.GetSessionVars().PreparedStmts[ts.id].(*core.PlanCacheStmt)) return } // AppendParam implements PreparedStatement AppendParam method. func (ts *TiDBStatement) AppendParam(paramID int, data []byte) error { if paramID >= len(ts.boundParams) { return mysql.NewErr(mysql.ErrWrongArguments, "stmt_send_longdata") } // If len(data) is 0, append an empty byte slice to the end to distinguish no data and no parameter. if len(data) == 0 { ts.boundParams[paramID] = []byte{} } else { ts.boundParams[paramID] = append(ts.boundParams[paramID], data...) } return nil } // NumParams implements PreparedStatement NumParams method. func (ts *TiDBStatement) NumParams() int { return ts.numParams } // BoundParams implements PreparedStatement BoundParams method. func (ts *TiDBStatement) BoundParams() [][]byte { return ts.boundParams } // SetParamsType implements PreparedStatement SetParamsType method. func (ts *TiDBStatement) SetParamsType(paramsType []byte) { ts.paramsType = paramsType } // GetParamsType implements PreparedStatement GetParamsType method. func (ts *TiDBStatement) GetParamsType() []byte { return ts.paramsType } // StoreResultSet stores ResultSet for stmt fetching func (ts *TiDBStatement) StoreResultSet(rs resultset.CursorResultSet) { // the original reset set should have been closed, and it's only used to store the iterator through the rowContainer // so it's fine to just overwrite it. ts.rs = rs } // GetResultSet gets ResultSet associated this statement func (ts *TiDBStatement) GetResultSet() resultset.CursorResultSet { return ts.rs } // Reset implements PreparedStatement Reset method. func (ts *TiDBStatement) Reset() error { for i := range ts.boundParams { ts.boundParams[i] = nil } ts.hasActiveCursor = false if ts.rs != nil && ts.rs.GetRowContainerReader() != nil { ts.rs.GetRowContainerReader().Close() } ts.rs = nil if ts.rowContainer != nil { ts.rowContainer.GetMemTracker().Detach() ts.rowContainer.GetDiskTracker().Detach() rc := ts.rowContainer ts.rowContainer = nil err := rc.Close() if err != nil { return err } } return nil } // Close implements PreparedStatement Close method. func (ts *TiDBStatement) Close() error { if ts.rs != nil && ts.rs.GetRowContainerReader() != nil { ts.rs.GetRowContainerReader().Close() } if ts.rowContainer != nil { ts.rowContainer.GetMemTracker().Detach() ts.rowContainer.GetDiskTracker().Detach() err := ts.rowContainer.Close() if err != nil { return err } } // TODO close at tidb level if ts.ctx.GetSessionVars().TxnCtx != nil && ts.ctx.GetSessionVars().TxnCtx.CouldRetry { err := ts.ctx.DropPreparedStmt(ts.id) if err != nil { return err } } else { if ts.ctx.GetSessionVars().EnablePreparedPlanCache { preparedPointer := ts.ctx.GetSessionVars().PreparedStmts[ts.id] preparedObj, ok := preparedPointer.(*core.PlanCacheStmt) if !ok { return errors.Errorf("invalid PlanCacheStmt type") } bindSQL, _ := core.GetBindSQL4PlanCache(ts.ctx, preparedObj) cacheKey, err := core.NewPlanCacheKey(ts.ctx.GetSessionVars(), preparedObj.StmtText, preparedObj.StmtDB, preparedObj.PreparedAst.SchemaVersion, 0, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load()) if err != nil { return err } if !ts.ctx.GetSessionVars().IgnorePreparedCacheCloseStmt { // keep the plan in cache ts.ctx.GetSessionPlanCache().Delete(cacheKey) } } ts.ctx.GetSessionVars().RemovePreparedStmt(ts.id) } delete(ts.ctx.stmts, int(ts.id)) return nil } // GetCursorActive implements PreparedStatement GetCursorActive method. func (ts *TiDBStatement) GetCursorActive() bool { return ts.hasActiveCursor } // SetCursorActive implements PreparedStatement SetCursorActive method. func (ts *TiDBStatement) SetCursorActive(fetchEnd bool) { ts.hasActiveCursor = fetchEnd } // StoreRowContainer stores a row container into the prepared statement func (ts *TiDBStatement) StoreRowContainer(c *chunk.RowContainer) { ts.rowContainer = c } // GetRowContainer returns the row container of the statement func (ts *TiDBStatement) GetRowContainer() *chunk.RowContainer { return ts.rowContainer } // OpenCtx implements IDriver. func (qd *TiDBDriver) OpenCtx(connID uint64, capability uint32, collation uint8, _ string, tlsState *tls.ConnectionState, extensions *extension.SessionExtensions) (*TiDBContext, error) { se, err := session.CreateSession(qd.store) if err != nil { return nil, err } se.SetTLSState(tlsState) err = se.SetCollation(int(collation)) if err != nil { return nil, err } se.SetClientCapability(capability) se.SetConnectionID(connID) tc := &TiDBContext{ Session: se, stmts: make(map[int]*TiDBStatement), } se.SetSessionStatesHandler(sessionstates.StatePrepareStmt, tc) se.SetExtensions(extensions) return tc, nil } // GetWarnings implements QueryCtx GetWarnings method. func (tc *TiDBContext) GetWarnings() []stmtctx.SQLWarn { return tc.GetSessionVars().StmtCtx.GetWarnings() } // WarningCount implements QueryCtx WarningCount method. func (tc *TiDBContext) WarningCount() uint16 { return tc.GetSessionVars().StmtCtx.WarningCount() } func (tc *TiDBContext) checkSandBoxMode(stmt ast.StmtNode) error { if !tc.Session.GetSessionVars().InRestrictedSQL && tc.InSandBoxMode() { switch stmt.(type) { case *ast.SetPwdStmt, *ast.AlterUserStmt: default: return servererr.ErrMustChangePassword.GenWithStackByArgs() } } return nil } // ExecuteStmt implements QueryCtx interface. func (tc *TiDBContext) ExecuteStmt(ctx context.Context, stmt ast.StmtNode) (resultset.ResultSet, error) { var rs sqlexec.RecordSet var err error if err = tc.checkSandBoxMode(stmt); err != nil { return nil, err } if s, ok := stmt.(*ast.NonTransactionalDMLStmt); ok { rs, err = session.HandleNonTransactionalDML(ctx, s, tc.Session) } else { rs, err = tc.Session.ExecuteStmt(ctx, stmt) } if err != nil { tc.Session.GetSessionVars().StmtCtx.AppendError(err) return nil, err } if rs == nil { return nil, nil } return resultset.New(rs, nil), nil } // Close implements QueryCtx Close method. func (tc *TiDBContext) Close() error { // close PreparedStatement associated with this connection for _, v := range tc.stmts { terror.Call(v.Close) } tc.Session.Close() return nil } // FieldList implements QueryCtx FieldList method. func (tc *TiDBContext) FieldList(table string) (columns []*column.Info, err error) { fields, err := tc.Session.FieldList(table) if err != nil { return nil, err } columns = make([]*column.Info, 0, len(fields)) for _, f := range fields { columns = append(columns, column.ConvertColumnInfo(f)) } return columns, nil } // GetStatement implements QueryCtx GetStatement method. func (tc *TiDBContext) GetStatement(stmtID int) PreparedStatement { tcStmt := tc.stmts[stmtID] if tcStmt != nil { return tcStmt } return nil } // Prepare implements QueryCtx Prepare method. func (tc *TiDBContext) Prepare(sql string) (statement PreparedStatement, columns, params []*column.Info, err error) { stmtID, paramCount, fields, err := tc.Session.PrepareStmt(sql) if err != nil { return } stmt := &TiDBStatement{ sql: sql, id: stmtID, numParams: paramCount, boundParams: make([][]byte, paramCount), ctx: tc, } statement = stmt columns = make([]*column.Info, len(fields)) for i := range fields { columns[i] = column.ConvertColumnInfo(fields[i]) } params = make([]*column.Info, paramCount) for i := range params { params[i] = &column.Info{ Type: mysql.TypeBlob, } } tc.stmts[int(stmtID)] = stmt return } // GetStmtStats implements the sessionctx.Context interface. func (tc *TiDBContext) GetStmtStats() *stmtstats.StatementStats { return tc.Session.GetStmtStats() } // EncodeSessionStates implements SessionStatesHandler.EncodeSessionStates interface. func (tc *TiDBContext) EncodeSessionStates(_ context.Context, _ sessionctx.Context, sessionStates *sessionstates.SessionStates) error { sessionVars := tc.Session.GetSessionVars() sessionStates.PreparedStmts = make(map[uint32]*sessionstates.PreparedStmtInfo, len(sessionVars.PreparedStmts)) for preparedID, preparedObj := range sessionVars.PreparedStmts { preparedStmt, ok := preparedObj.(*core.PlanCacheStmt) if !ok { return errors.Errorf("invalid PlanCacheStmt type") } sessionStates.PreparedStmts[preparedID] = &sessionstates.PreparedStmtInfo{ StmtText: preparedStmt.StmtText, StmtDB: preparedStmt.StmtDB, } } for name, id := range sessionVars.PreparedStmtNameToID { // Only text protocol statements have names. if preparedStmtInfo, ok := sessionStates.PreparedStmts[id]; ok { preparedStmtInfo.Name = name } } for id, stmt := range tc.stmts { // Only binary protocol statements have paramTypes. preparedStmtInfo, ok := sessionStates.PreparedStmts[uint32(id)] if !ok { return errors.Errorf("prepared statement %d not found", id) } // Bound params are sent by CMD_STMT_SEND_LONG_DATA, the proxy can wait for COM_STMT_EXECUTE. for _, boundParam := range stmt.BoundParams() { if boundParam != nil { return sessionstates.ErrCannotMigrateSession.GenWithStackByArgs("prepared statements have bound params") } } if stmt.GetCursorActive() { return sessionstates.ErrCannotMigrateSession.GenWithStackByArgs("prepared statements have unfetched rows") } preparedStmtInfo.ParamTypes = stmt.GetParamsType() } return nil } // DecodeSessionStates implements SessionStatesHandler.DecodeSessionStates interface. func (tc *TiDBContext) DecodeSessionStates(ctx context.Context, _ sessionctx.Context, sessionStates *sessionstates.SessionStates) error { if len(sessionStates.PreparedStmts) == 0 { return nil } sessionVars := tc.Session.GetSessionVars() savedPreparedStmtID := sessionVars.GetNextPreparedStmtID() savedCurrentDB := sessionVars.CurrentDB defer func() { sessionVars.SetNextPreparedStmtID(savedPreparedStmtID - 1) sessionVars.CurrentDB = savedCurrentDB }() for id, preparedStmtInfo := range sessionStates.PreparedStmts { // Set the next id and currentDB manually. sessionVars.SetNextPreparedStmtID(id - 1) sessionVars.CurrentDB = preparedStmtInfo.StmtDB if preparedStmtInfo.Name == "" { // Binary protocol: add to sessionVars.PreparedStmts and TiDBContext.stmts. stmt, _, _, err := tc.Prepare(preparedStmtInfo.StmtText) if err != nil { return err } // Only binary protocol uses paramsType, which is passed from the first COM_STMT_EXECUTE. stmt.SetParamsType(preparedStmtInfo.ParamTypes) } else { // Text protocol: add to sessionVars.PreparedStmts and sessionVars.PreparedStmtNameToID. stmtText := strings.ReplaceAll(preparedStmtInfo.StmtText, "\\", "\\\\") stmtText = strings.ReplaceAll(stmtText, "'", "\\'") // Add single quotes because the sql_mode might contain ANSI_QUOTES. sql := fmt.Sprintf("PREPARE `%s` FROM '%s'", preparedStmtInfo.Name, stmtText) stmts, err := tc.Parse(ctx, sql) if err != nil { return err } if _, err = tc.ExecuteStmt(ctx, stmts[0]); err != nil { return err } } } return nil }
pkg/server/driver_tidb.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.0007817887235432863, 0.00018465348694007844, 0.0001608111779205501, 0.00017229426885023713, 0.00008911928307497874 ]
{ "id": 4, "code_window": [ "\tLitInitialized = true\n", "\tlogutil.BgLogger().Info(LitInfoEnvInitSucc,\n", "\t\tzap.String(\"category\", \"ddl-ingest\"),\n", "\t\tzap.Uint64(\"memory limitation\", maxMemoryQuota),\n", "\t\tzap.String(\"disk usage info\", LitDiskRoot.UsageInfo()),\n", "\t\tzap.Uint64(\"max open file number\", LitRLimit),\n", "\t\tzap.Bool(\"lightning is initialized\", LitInitialized))\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tzap.Uint64(\"memory limitation\", memTotal),\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 73 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package copr import ( "context" "math/rand" "runtime" "sync/atomic" "time" "github.com/pingcap/errors" tidb_config "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/store/driver/backoff" derr "github.com/pingcap/tidb/pkg/store/driver/error" "github.com/tikv/client-go/v2/config" "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/tikvrpc" ) type kvStore struct { store *tikv.KVStore mppStoreCnt *mppStoreCnt } // GetRegionCache returns the region cache instance. func (s *kvStore) GetRegionCache() *RegionCache { return &RegionCache{s.store.GetRegionCache()} } // CheckVisibility checks if it is safe to read using given ts. func (s *kvStore) CheckVisibility(startTime uint64) error { err := s.store.CheckVisibility(startTime) return derr.ToTiDBErr(err) } // GetTiKVClient gets the client instance. func (s *kvStore) GetTiKVClient() tikv.Client { client := s.store.GetTiKVClient() return &tikvClient{c: client} } type tikvClient struct { c tikv.Client } func (c *tikvClient) Close() error { err := c.c.Close() return derr.ToTiDBErr(err) } func (c *tikvClient) CloseAddr(addr string) error { err := c.c.CloseAddr(addr) return derr.ToTiDBErr(err) } // SendRequest sends Request. func (c *tikvClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { res, err := c.c.SendRequest(ctx, addr, req, timeout) return res, derr.ToTiDBErr(err) } // Store wraps tikv.KVStore and provides coprocessor utilities. type Store struct { *kvStore coprCache *coprCache replicaReadSeed uint32 numcpu int } // NewStore creates a new store instance. func NewStore(s *tikv.KVStore, coprCacheConfig *config.CoprocessorCache) (*Store, error) { coprCache, err := newCoprCache(coprCacheConfig) if err != nil { return nil, errors.Trace(err) } /* #nosec G404 */ return &Store{ kvStore: &kvStore{store: s, mppStoreCnt: &mppStoreCnt{}}, coprCache: coprCache, replicaReadSeed: rand.Uint32(), numcpu: runtime.GOMAXPROCS(0), }, nil } // Close releases resources allocated for coprocessor. func (s *Store) Close() { if s.coprCache != nil { s.coprCache.cache.Close() } } func (s *Store) nextReplicaReadSeed() uint32 { return atomic.AddUint32(&s.replicaReadSeed, 1) } // GetClient gets a client instance. func (s *Store) GetClient() kv.Client { return &CopClient{ store: s, replicaReadSeed: s.nextReplicaReadSeed(), } } // GetMPPClient gets a mpp client instance. func (s *Store) GetMPPClient() kv.MPPClient { return &MPPClient{ store: s.kvStore, } } func getEndPointType(t kv.StoreType) tikvrpc.EndpointType { switch t { case kv.TiKV: return tikvrpc.TiKV case kv.TiFlash: if tidb_config.GetGlobalConfig().DisaggregatedTiFlash { return tikvrpc.TiFlashCompute } return tikvrpc.TiFlash case kv.TiDB: return tikvrpc.TiDB default: return tikvrpc.TiKV } } // Backoffer wraps tikv.Backoffer and converts the error which returns by the functions of tikv.Backoffer to tidb error. type Backoffer = backoff.Backoffer
pkg/store/copr/store.go
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00017864123219624162, 0.0001709375937934965, 0.00015902513405308127, 0.00017323216889053583, 0.000005189610874367645 ]
{ "id": 4, "code_window": [ "\tLitInitialized = true\n", "\tlogutil.BgLogger().Info(LitInfoEnvInitSucc,\n", "\t\tzap.String(\"category\", \"ddl-ingest\"),\n", "\t\tzap.Uint64(\"memory limitation\", maxMemoryQuota),\n", "\t\tzap.String(\"disk usage info\", LitDiskRoot.UsageInfo()),\n", "\t\tzap.Uint64(\"max open file number\", LitRLimit),\n", "\t\tzap.Bool(\"lightning is initialized\", LitInitialized))\n", "}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tzap.Uint64(\"memory limitation\", memTotal),\n" ], "file_path": "pkg/ddl/ingest/env.go", "type": "replace", "edit_start_line_idx": 73 }
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") go_library( name = "goyacc_lib", srcs = [ "format_yacc.go", "main.go", ], importpath = "github.com/pingcap/tidb/pkg/parser/goyacc", visibility = ["//visibility:private"], deps = [ "//pkg/parser/format", "@com_github_cznic_mathutil//:mathutil", "@com_github_cznic_sortutil//:sortutil", "@com_github_cznic_strutil//:strutil", "@com_github_pingcap_errors//:errors", "@org_golang_x_exp//slices", "@org_modernc_parser//yacc", "@org_modernc_y//:y", ], ) go_binary( name = "goyacc", embed = [":goyacc_lib"], visibility = ["//visibility:public"], )
pkg/parser/goyacc/BUILD.bazel
0
https://github.com/pingcap/tidb/commit/5fe83fcc8ee476ac904e868042eea56529febc81
[ 0.00017745351942721754, 0.00017678877338767052, 0.00017547415336593986, 0.00017743863281793892, 9.295932272834762e-7 ]
{ "id": 0, "code_window": [ " \"stmtctx_test.go\",\n", " ],\n", " embed = [\":stmtctx\"],\n", " flaky = True,\n", " shard_count = 11,\n", " deps = [\n", " \"//pkg/kv\",\n", " \"//pkg/sessionctx/variable\",\n", " \"//pkg/testkit\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " shard_count = 12,\n" ], "file_path": "pkg/sessionctx/stmtctx/BUILD.bazel", "type": "replace", "edit_start_line_idx": 43 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stmtctx import ( "bytes" "encoding/json" "fmt" "io" "math" "slices" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/domain/resourcegroup" "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/disk" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/linter/constructor" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/nocopy" "github.com/pingcap/tidb/pkg/util/resourcegrouptag" "github.com/pingcap/tidb/pkg/util/topsql/stmtstats" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/tikv/client-go/v2/tikvrpc" "github.com/tikv/client-go/v2/util" atomic2 "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/exp/maps" ) const ( // WarnLevelError represents level "Error" for 'SHOW WARNINGS' syntax. WarnLevelError = "Error" // WarnLevelWarning represents level "Warning" for 'SHOW WARNINGS' syntax. WarnLevelWarning = "Warning" // WarnLevelNote represents level "Note" for 'SHOW WARNINGS' syntax. WarnLevelNote = "Note" ) var taskIDAlloc uint64 // AllocateTaskID allocates a new unique ID for a statement execution func AllocateTaskID() uint64 { return atomic.AddUint64(&taskIDAlloc, 1) } // SQLWarn relates a sql warning and it's level. type SQLWarn struct { Level string Err error } type jsonSQLWarn struct { Level string `json:"level"` SQLErr *terror.Error `json:"err,omitempty"` Msg string `json:"msg,omitempty"` } // MarshalJSON implements the Marshaler.MarshalJSON interface. func (warn *SQLWarn) MarshalJSON() ([]byte, error) { w := &jsonSQLWarn{ Level: warn.Level, } e := errors.Cause(warn.Err) switch x := e.(type) { case *terror.Error: // Omit outter errors because only the most inner error matters. w.SQLErr = x default: w.Msg = e.Error() } return json.Marshal(w) } // UnmarshalJSON implements the Unmarshaler.UnmarshalJSON interface. func (warn *SQLWarn) UnmarshalJSON(data []byte) error { var w jsonSQLWarn if err := json.Unmarshal(data, &w); err != nil { return err } warn.Level = w.Level if w.SQLErr != nil { warn.Err = w.SQLErr } else { warn.Err = errors.New(w.Msg) } return nil } // ReferenceCount indicates the reference count of StmtCtx. type ReferenceCount int32 const ( // ReferenceCountIsFrozen indicates the current StmtCtx is resetting, it'll refuse all the access from other sessions. ReferenceCountIsFrozen int32 = -1 // ReferenceCountNoReference indicates the current StmtCtx is not accessed by other sessions. ReferenceCountNoReference int32 = 0 ) // TryIncrease tries to increase the reference count. // There is a small chance that TryIncrease returns true while TryFreeze and // UnFreeze are invoked successfully during the execution of TryIncrease. func (rf *ReferenceCount) TryIncrease() bool { refCnt := atomic.LoadInt32((*int32)(rf)) for ; refCnt != ReferenceCountIsFrozen && !atomic.CompareAndSwapInt32((*int32)(rf), refCnt, refCnt+1); refCnt = atomic.LoadInt32((*int32)(rf)) { } return refCnt != ReferenceCountIsFrozen } // Decrease decreases the reference count. func (rf *ReferenceCount) Decrease() { for refCnt := atomic.LoadInt32((*int32)(rf)); !atomic.CompareAndSwapInt32((*int32)(rf), refCnt, refCnt-1); refCnt = atomic.LoadInt32((*int32)(rf)) { } } // TryFreeze tries to freeze the StmtCtx to frozen before resetting the old StmtCtx. func (rf *ReferenceCount) TryFreeze() bool { return atomic.LoadInt32((*int32)(rf)) == ReferenceCountNoReference && atomic.CompareAndSwapInt32((*int32)(rf), ReferenceCountNoReference, ReferenceCountIsFrozen) } // UnFreeze unfreeze the frozen StmtCtx thus the other session can access this StmtCtx. func (rf *ReferenceCount) UnFreeze() { atomic.StoreInt32((*int32)(rf), ReferenceCountNoReference) } var stmtCtxIDGenerator atomic.Uint64 // StatementContext contains variables for a statement. // It should be reset before executing a statement. type StatementContext struct { // NoCopy indicates that this struct cannot be copied because // copying this object will make the copied TypeCtx field to refer a wrong `AppendWarnings` func. _ nocopy.NoCopy _ constructor.Constructor `ctor:"NewStmtCtx,NewStmtCtxWithTimeZone,Reset"` ctxID uint64 // typeCtx is used to indicate how to make the type conversation. typeCtx types.Context // errCtx is used to indicate how to handle the errors errCtx errctx.Context // Set the following variables before execution StmtHints // IsDDLJobInQueue is used to mark whether the DDL job is put into the queue. // If IsDDLJobInQueue is true, it means the DDL job is in the queue of storage, and it can be handled by the DDL worker. IsDDLJobInQueue bool DDLJobID int64 InInsertStmt bool InUpdateStmt bool InDeleteStmt bool InSelectStmt bool InLoadDataStmt bool InExplainStmt bool InExplainAnalyzeStmt bool ExplainFormat string InCreateOrAlterStmt bool InSetSessionStatesStmt bool InPreparedPlanBuilding bool DupKeyAsWarning bool BadNullAsWarning bool DividedByZeroAsWarning bool ErrAutoincReadFailedAsWarning bool InShowWarning bool UseCache bool CacheType PlanCacheType BatchCheck bool InNullRejectCheck bool IgnoreNoPartition bool IgnoreExplainIDSuffix bool MultiSchemaInfo *model.MultiSchemaInfo // If the select statement was like 'select * from t as of timestamp ...' or in a stale read transaction // or is affected by the tidb_read_staleness session variable, then the statement will be makred as isStaleness // in stmtCtx IsStaleness bool InRestrictedSQL bool ViewDepth int32 // mu struct holds variables that change during execution. mu struct { sync.Mutex affectedRows uint64 foundRows uint64 /* following variables are ported from 'COPY_INFO' struct of MySQL server source, they are used to count rows for INSERT/REPLACE/UPDATE queries: If a row is inserted then the copied variable is incremented. If a row is updated by the INSERT ... ON DUPLICATE KEY UPDATE and the new data differs from the old one then the copied and the updated variables are incremented. The touched variable is incremented if a row was touched by the update part of the INSERT ... ON DUPLICATE KEY UPDATE no matter whether the row was actually changed or not. see https://github.com/mysql/mysql-server/blob/d2029238d6d9f648077664e4cdd611e231a6dc14/sql/sql_data_change.h#L60 for more details */ records uint64 deleted uint64 updated uint64 copied uint64 touched uint64 message string warnings []SQLWarn // extraWarnings record the extra warnings and are only used by the slow log only now. // If a warning is expected to be output only under some conditions (like in EXPLAIN or EXPLAIN VERBOSE) but it's // not under such conditions now, it is considered as an extra warning. // extraWarnings would not be printed through SHOW WARNINGS, but we want to always output them through the slow // log to help diagnostics, so we store them here separately. extraWarnings []SQLWarn execDetails execdetails.ExecDetails detailsSummary execdetails.P90Summary } // PrevAffectedRows is the affected-rows value(DDL is 0, DML is the number of affected rows). PrevAffectedRows int64 // PrevLastInsertID is the last insert ID of previous statement. PrevLastInsertID uint64 // LastInsertID is the auto-generated ID in the current statement. LastInsertID uint64 // InsertID is the given insert ID of an auto_increment column. InsertID uint64 BaseRowID int64 MaxRowID int64 // Copied from SessionVars.TimeZone. Priority mysql.PriorityEnum NotFillCache bool MemTracker *memory.Tracker DiskTracker *disk.Tracker // per statement resource group name // hint /* +ResourceGroup(name) */ can change the statement group name ResourceGroupName string RunawayChecker *resourcegroup.RunawayChecker IsTiFlash atomic2.Bool RuntimeStatsColl *execdetails.RuntimeStatsColl TableIDs []int64 IndexNames []string StmtType string OriginalSQL string digestMemo struct { sync.Once normalized string digest *parser.Digest } // BindSQL used to construct the key for plan cache. It records the binding used by the stmt. // If the binding is not used by the stmt, the value is empty BindSQL string // The several fields below are mainly for some diagnostic features, like stmt summary and slow query. // We cache the values here to avoid calculating them multiple times. // Note: // Avoid accessing these fields directly, use their Setter/Getter methods instead. // Other fields should be the zero value or be consistent with the plan field. // TODO: more clearly distinguish between the value is empty and the value has not been set planNormalized string planDigest *parser.Digest encodedPlan string planHint string planHintSet bool binaryPlan string // To avoid cycle import, we use interface{} for the following two fields. // flatPlan should be a *plannercore.FlatPhysicalPlan if it's not nil flatPlan interface{} // plan should be a plannercore.Plan if it's not nil plan interface{} Tables []TableEntry PointExec bool // for point update cached execution, Constant expression need to set "paramMarker" lockWaitStartTime int64 // LockWaitStartTime stores the pessimistic lock wait start time PessimisticLockWaited int32 LockKeysDuration int64 LockKeysCount int32 LockTableIDs map[int64]struct{} // table IDs need to be locked, empty for lock all tables TblInfo2UnionScan map[*model.TableInfo]bool TaskID uint64 // unique ID for an execution of a statement TaskMapBakTS uint64 // counter for // stmtCache is used to store some statement-related values. // add mutex to protect stmtCache concurrent access // https://github.com/pingcap/tidb/issues/36159 stmtCache struct { mu sync.Mutex data map[StmtCacheKey]interface{} } // Map to store all CTE storages of current SQL. // Will clean up at the end of the execution. CTEStorageMap interface{} SetVarHintRestore map[string]string // If the statement read from table cache, this flag is set. ReadFromTableCache bool // cache is used to reduce object allocation. cache struct { execdetails.RuntimeStatsColl MemTracker memory.Tracker DiskTracker disk.Tracker LogOnExceed [2]memory.LogOnExceed } // InVerboseExplain indicates the statement is "explain format='verbose' ...". InVerboseExplain bool // EnableOptimizeTrace indicates whether enable optimizer trace by 'trace plan statement' EnableOptimizeTrace bool // OptimizeTracer indicates the tracer for optimize OptimizeTracer *tracing.OptimizeTracer // EnableOptimizerCETrace indicate if cardinality estimation internal process needs to be traced. // CE Trace is currently a submodule of the optimizer trace and is controlled by a separated option. EnableOptimizerCETrace bool OptimizerCETrace []*tracing.CETraceRecord EnableOptimizerDebugTrace bool OptimizerDebugTrace interface{} // WaitLockLeaseTime is the duration of cached table read lease expiration time. WaitLockLeaseTime time.Duration // KvExecCounter is created from SessionVars.StmtStats to count the number of SQL // executions of the kv layer during the current execution of the statement. // Its life cycle is limited to this execution, and a new KvExecCounter is // always created during each statement execution. KvExecCounter *stmtstats.KvExecCounter // WeakConsistency is true when read consistency is weak and in a read statement and not in a transaction. WeakConsistency bool StatsLoad struct { // Timeout to wait for sync-load Timeout time.Duration // NeededItems stores the columns/indices whose stats are needed for planner. NeededItems []model.TableItemID // ResultCh to receive stats loading results ResultCh chan StatsLoadResult // LoadStartTime is to record the load start time to calculate latency LoadStartTime time.Time } // SysdateIsNow indicates whether sysdate() is an alias of now() in this statement SysdateIsNow bool // RCCheckTS indicates the current read-consistency read select statement will use `RCCheckTS` path. RCCheckTS bool // IsSQLRegistered uses to indicate whether the SQL has been registered for TopSQL. IsSQLRegistered atomic2.Bool // IsSQLAndPlanRegistered uses to indicate whether the SQL and plan has been registered for TopSQL. IsSQLAndPlanRegistered atomic2.Bool // IsReadOnly uses to indicate whether the SQL is read-only. IsReadOnly bool // usedStatsInfo records version of stats of each table used in the query. // It's a map of table physical id -> *UsedStatsInfoForTable usedStatsInfo map[int64]*UsedStatsInfoForTable // IsSyncStatsFailed indicates whether any failure happened during sync stats IsSyncStatsFailed bool // UseDynamicPruneMode indicates whether use UseDynamicPruneMode in query stmt UseDynamicPruneMode bool // ColRefFromPlan mark the column ref used by assignment in update statement. ColRefFromUpdatePlan []int64 // RangeFallback indicates that building complete ranges exceeds the memory limit so it falls back to less accurate ranges such as full range. RangeFallback bool // IsExplainAnalyzeDML is true if the statement is "explain analyze DML executors", before responding the explain // results to the client, the transaction should be committed first. See issue #37373 for more details. IsExplainAnalyzeDML bool // InHandleForeignKeyTrigger indicates currently are handling foreign key trigger. InHandleForeignKeyTrigger bool // ForeignKeyTriggerCtx is the contain information for foreign key cascade execution. ForeignKeyTriggerCtx struct { // The SavepointName is use to do rollback when handle foreign key cascade failed. SavepointName string HasFKCascades bool } // MPPQueryInfo stores some id and timestamp of current MPP query statement. MPPQueryInfo struct { QueryID atomic2.Uint64 QueryTS atomic2.Uint64 AllocatedMPPTaskID atomic2.Int64 AllocatedMPPGatherID atomic2.Uint64 } // TableStats stores the visited runtime table stats by table id during query TableStats map[int64]interface{} // useChunkAlloc indicates whether statement use chunk alloc useChunkAlloc bool // Check if TiFlash read engine is removed due to strict sql mode. TiFlashEngineRemovedDueToStrictSQLMode bool // StaleTSOProvider is used to provide stale timestamp oracle for read-only transactions. StaleTSOProvider struct { sync.Mutex value *uint64 eval func() (uint64, error) } } // NewStmtCtx creates a new statement context func NewStmtCtx() *StatementContext { return NewStmtCtxWithTimeZone(time.UTC) } // NewStmtCtxWithTimeZone creates a new StatementContext with the given timezone func NewStmtCtxWithTimeZone(tz *time.Location) *StatementContext { intest.AssertNotNil(tz) sc := &StatementContext{ ctxID: stmtCtxIDGenerator.Add(1), } sc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc) return sc } // Reset resets a statement context func (sc *StatementContext) Reset() { *sc = StatementContext{ ctxID: stmtCtxIDGenerator.Add(1), typeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc), } } // CtxID returns the context id of the statement func (sc *StatementContext) CtxID() uint64 { return sc.ctxID } // TimeZone returns the timezone of the type context func (sc *StatementContext) TimeZone() *time.Location { intest.AssertNotNil(sc) if sc == nil { return time.UTC } return sc.typeCtx.Location() } // SetTimeZone sets the timezone func (sc *StatementContext) SetTimeZone(tz *time.Location) { intest.AssertNotNil(tz) sc.typeCtx = sc.typeCtx.WithLocation(tz) } // TypeCtx returns the type context func (sc *StatementContext) TypeCtx() types.Context { return sc.typeCtx } // ErrCtx returns the error context // TODO: add a cache to the `ErrCtx` if needed, though it's not a big burden to generate `ErrCtx` everytime. func (sc *StatementContext) ErrCtx() errctx.Context { ctx := errctx.NewContext(sc) if sc.TypeFlags().IgnoreTruncateErr() { ctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelIgnore) } else if sc.TypeFlags().TruncateAsWarning() { ctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelWarn) } return ctx } // TypeFlags returns the type flags func (sc *StatementContext) TypeFlags() types.Flags { return sc.typeCtx.Flags() } // SetTypeFlags sets the type flags func (sc *StatementContext) SetTypeFlags(flags types.Flags) { sc.typeCtx = sc.typeCtx.WithFlags(flags) } // HandleTruncate ignores or returns the error based on the TypeContext inside. // TODO: replace this function with `HandleError`, for `TruncatedError` they should have the same effect. func (sc *StatementContext) HandleTruncate(err error) error { return sc.typeCtx.HandleTruncate(err) } // HandleError handles the error based on `ErrCtx()` func (sc *StatementContext) HandleError(err error) error { intest.AssertNotNil(sc) if sc == nil { return err } errCtx := sc.ErrCtx() return errCtx.HandleError(err) } // HandleErrorWithAlias handles the error based on `ErrCtx()` func (sc *StatementContext) HandleErrorWithAlias(internalErr, err, warnErr error) error { intest.AssertNotNil(sc) if sc == nil { return err } errCtx := sc.ErrCtx() return errCtx.HandleErrorWithAlias(internalErr, err, warnErr) } // StmtHints are SessionVars related sql hints. type StmtHints struct { // Hint Information MemQuotaQuery int64 MaxExecutionTime uint64 ReplicaRead byte AllowInSubqToJoinAndAgg bool NoIndexMergeHint bool StraightJoinOrder bool // EnableCascadesPlanner is use cascades planner for a single query only. EnableCascadesPlanner bool // ForceNthPlan indicates the PlanCounterTp number for finding physical plan. // -1 for disable. ForceNthPlan int64 ResourceGroup string // Hint flags HasAllowInSubqToJoinAndAggHint bool HasMemQuotaHint bool HasReplicaReadHint bool HasMaxExecutionTime bool HasEnableCascadesPlannerHint bool HasResourceGroup bool SetVars map[string]string // the original table hints OriginalTableHints []*ast.TableOptimizerHint } // TaskMapNeedBackUp indicates that whether we need to back up taskMap during physical optimizing. func (sh *StmtHints) TaskMapNeedBackUp() bool { return sh.ForceNthPlan != -1 } // Clone the StmtHints struct and returns the pointer of the new one. func (sh *StmtHints) Clone() *StmtHints { var ( vars map[string]string tableHints []*ast.TableOptimizerHint ) if len(sh.SetVars) > 0 { vars = make(map[string]string, len(sh.SetVars)) for k, v := range sh.SetVars { vars[k] = v } } if len(sh.OriginalTableHints) > 0 { tableHints = make([]*ast.TableOptimizerHint, len(sh.OriginalTableHints)) copy(tableHints, sh.OriginalTableHints) } return &StmtHints{ MemQuotaQuery: sh.MemQuotaQuery, MaxExecutionTime: sh.MaxExecutionTime, ReplicaRead: sh.ReplicaRead, AllowInSubqToJoinAndAgg: sh.AllowInSubqToJoinAndAgg, NoIndexMergeHint: sh.NoIndexMergeHint, StraightJoinOrder: sh.StraightJoinOrder, EnableCascadesPlanner: sh.EnableCascadesPlanner, ForceNthPlan: sh.ForceNthPlan, ResourceGroup: sh.ResourceGroup, HasAllowInSubqToJoinAndAggHint: sh.HasAllowInSubqToJoinAndAggHint, HasMemQuotaHint: sh.HasMemQuotaHint, HasReplicaReadHint: sh.HasReplicaReadHint, HasMaxExecutionTime: sh.HasMaxExecutionTime, HasEnableCascadesPlannerHint: sh.HasEnableCascadesPlannerHint, HasResourceGroup: sh.HasResourceGroup, SetVars: vars, OriginalTableHints: tableHints, } } // StmtCacheKey represents the key type in the StmtCache. type StmtCacheKey int const ( // StmtNowTsCacheKey is a variable for now/current_timestamp calculation/cache of one stmt. StmtNowTsCacheKey StmtCacheKey = iota // StmtSafeTSCacheKey is a variable for safeTS calculation/cache of one stmt. StmtSafeTSCacheKey // StmtExternalTSCacheKey is a variable for externalTS calculation/cache of one stmt. StmtExternalTSCacheKey ) // GetOrStoreStmtCache gets the cached value of the given key if it exists, otherwise stores the value. func (sc *StatementContext) GetOrStoreStmtCache(key StmtCacheKey, value interface{}) interface{} { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() if sc.stmtCache.data == nil { sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } if _, ok := sc.stmtCache.data[key]; !ok { sc.stmtCache.data[key] = value } return sc.stmtCache.data[key] } // GetOrEvaluateStmtCache gets the cached value of the given key if it exists, otherwise calculate the value. func (sc *StatementContext) GetOrEvaluateStmtCache(key StmtCacheKey, valueEvaluator func() (interface{}, error)) (interface{}, error) { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() if sc.stmtCache.data == nil { sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } if _, ok := sc.stmtCache.data[key]; !ok { value, err := valueEvaluator() if err != nil { return nil, err } sc.stmtCache.data[key] = value } return sc.stmtCache.data[key], nil } // ResetInStmtCache resets the cache of given key. func (sc *StatementContext) ResetInStmtCache(key StmtCacheKey) { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() delete(sc.stmtCache.data, key) } // ResetStmtCache resets all cached values. func (sc *StatementContext) ResetStmtCache() { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } // SQLDigest gets normalized and digest for provided sql. // it will cache result after first calling. func (sc *StatementContext) SQLDigest() (normalized string, sqlDigest *parser.Digest) { sc.digestMemo.Do(func() { sc.digestMemo.normalized, sc.digestMemo.digest = parser.NormalizeDigest(sc.OriginalSQL) }) return sc.digestMemo.normalized, sc.digestMemo.digest } // InitSQLDigest sets the normalized and digest for sql. func (sc *StatementContext) InitSQLDigest(normalized string, digest *parser.Digest) { sc.digestMemo.Do(func() { sc.digestMemo.normalized, sc.digestMemo.digest = normalized, digest }) } // ResetSQLDigest sets the normalized and digest for sql anyway, **DO NOT USE THIS UNLESS YOU KNOW WHAT YOU ARE DOING NOW**. func (sc *StatementContext) ResetSQLDigest(s string) { sc.digestMemo.normalized, sc.digestMemo.digest = parser.NormalizeDigest(s) } // GetPlanDigest gets the normalized plan and plan digest. func (sc *StatementContext) GetPlanDigest() (normalized string, planDigest *parser.Digest) { return sc.planNormalized, sc.planDigest } // GetPlan gets the plan field of stmtctx func (sc *StatementContext) GetPlan() interface{} { return sc.plan } // SetPlan sets the plan field of stmtctx func (sc *StatementContext) SetPlan(plan interface{}) { sc.plan = plan } // GetFlatPlan gets the flatPlan field of stmtctx func (sc *StatementContext) GetFlatPlan() interface{} { return sc.flatPlan } // SetFlatPlan sets the flatPlan field of stmtctx func (sc *StatementContext) SetFlatPlan(flat interface{}) { sc.flatPlan = flat } // GetBinaryPlan gets the binaryPlan field of stmtctx func (sc *StatementContext) GetBinaryPlan() string { return sc.binaryPlan } // SetBinaryPlan sets the binaryPlan field of stmtctx func (sc *StatementContext) SetBinaryPlan(binaryPlan string) { sc.binaryPlan = binaryPlan } // GetResourceGroupTagger returns the implementation of tikvrpc.ResourceGroupTagger related to self. func (sc *StatementContext) GetResourceGroupTagger() tikvrpc.ResourceGroupTagger { normalized, digest := sc.SQLDigest() planDigest := sc.planDigest return func(req *tikvrpc.Request) { if req == nil { return } if len(normalized) == 0 { return } req.ResourceGroupTag = resourcegrouptag.EncodeResourceGroupTag(digest, planDigest, resourcegrouptag.GetResourceGroupLabelByKey(resourcegrouptag.GetFirstKeyFromRequest(req))) } } // SetUseChunkAlloc set use chunk alloc status func (sc *StatementContext) SetUseChunkAlloc() { sc.useChunkAlloc = true } // ClearUseChunkAlloc clear useChunkAlloc status func (sc *StatementContext) ClearUseChunkAlloc() { sc.useChunkAlloc = false } // GetUseChunkAllocStatus returns useChunkAlloc status func (sc *StatementContext) GetUseChunkAllocStatus() bool { return sc.useChunkAlloc } // SetPlanDigest sets the normalized plan and plan digest. func (sc *StatementContext) SetPlanDigest(normalized string, planDigest *parser.Digest) { if planDigest != nil { sc.planNormalized, sc.planDigest = normalized, planDigest } } // GetEncodedPlan gets the encoded plan, it is used to avoid repeated encode. func (sc *StatementContext) GetEncodedPlan() string { return sc.encodedPlan } // SetEncodedPlan sets the encoded plan, it is used to avoid repeated encode. func (sc *StatementContext) SetEncodedPlan(encodedPlan string) { sc.encodedPlan = encodedPlan } // GetPlanHint gets the hint string generated from the plan. func (sc *StatementContext) GetPlanHint() (string, bool) { return sc.planHint, sc.planHintSet } // InitDiskTracker initializes the sc.DiskTracker, use cache to avoid allocation. func (sc *StatementContext) InitDiskTracker(label int, bytesLimit int64) { memory.InitTracker(&sc.cache.DiskTracker, label, bytesLimit, &sc.cache.LogOnExceed[0]) sc.DiskTracker = &sc.cache.DiskTracker } // InitMemTracker initializes the sc.MemTracker, use cache to avoid allocation. func (sc *StatementContext) InitMemTracker(label int, bytesLimit int64) { memory.InitTracker(&sc.cache.MemTracker, label, bytesLimit, &sc.cache.LogOnExceed[1]) sc.MemTracker = &sc.cache.MemTracker } // SetPlanHint sets the hint for the plan. func (sc *StatementContext) SetPlanHint(hint string) { sc.planHintSet = true sc.planHint = hint } // PlanCacheType is the flag of plan cache type PlanCacheType int const ( // DefaultNoCache no cache DefaultNoCache PlanCacheType = iota // SessionPrepared session prepared plan cache SessionPrepared // SessionNonPrepared session non-prepared plan cache SessionNonPrepared ) // SetSkipPlanCache sets to skip the plan cache and records the reason. func (sc *StatementContext) SetSkipPlanCache(reason error) { if !sc.UseCache { return // avoid unnecessary warnings } sc.UseCache = false switch sc.CacheType { case DefaultNoCache: sc.AppendWarning(errors.NewNoStackError("unknown cache type")) case SessionPrepared: sc.AppendWarning(errors.NewNoStackErrorf("skip prepared plan-cache: %s", reason.Error())) case SessionNonPrepared: if sc.InExplainStmt && sc.ExplainFormat == "plan_cache" { // use "plan_cache" rather than types.ExplainFormatPlanCache to avoid import cycle sc.AppendWarning(errors.NewNoStackErrorf("skip non-prepared plan-cache: %s", reason.Error())) } } } // TableEntry presents table in db. type TableEntry struct { DB string Table string } // AddAffectedRows adds affected rows. func (sc *StatementContext) AddAffectedRows(rows uint64) { if sc.InHandleForeignKeyTrigger { // For compatibility with MySQL, not add the affected row cause by the foreign key trigger. return } sc.mu.Lock() defer sc.mu.Unlock() sc.mu.affectedRows += rows } // SetAffectedRows sets affected rows. func (sc *StatementContext) SetAffectedRows(rows uint64) { sc.mu.Lock() sc.mu.affectedRows = rows sc.mu.Unlock() } // AffectedRows gets affected rows. func (sc *StatementContext) AffectedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.affectedRows } // FoundRows gets found rows. func (sc *StatementContext) FoundRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.foundRows } // AddFoundRows adds found rows. func (sc *StatementContext) AddFoundRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.foundRows += rows } // RecordRows is used to generate info message func (sc *StatementContext) RecordRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.records } // AddRecordRows adds record rows. func (sc *StatementContext) AddRecordRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.records += rows } // DeletedRows is used to generate info message func (sc *StatementContext) DeletedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.deleted } // AddDeletedRows adds record rows. func (sc *StatementContext) AddDeletedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.deleted += rows } // UpdatedRows is used to generate info message func (sc *StatementContext) UpdatedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.updated } // AddUpdatedRows adds updated rows. func (sc *StatementContext) AddUpdatedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.updated += rows } // CopiedRows is used to generate info message func (sc *StatementContext) CopiedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.copied } // AddCopiedRows adds copied rows. func (sc *StatementContext) AddCopiedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.copied += rows } // TouchedRows is used to generate info message func (sc *StatementContext) TouchedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.touched } // AddTouchedRows adds touched rows. func (sc *StatementContext) AddTouchedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.touched += rows } // GetMessage returns the extra message of the last executed command, if there is no message, it returns empty string func (sc *StatementContext) GetMessage() string { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.message } // SetMessage sets the info message generated by some commands func (sc *StatementContext) SetMessage(msg string) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.message = msg } // GetWarnings gets warnings. func (sc *StatementContext) GetWarnings() []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.warnings } // TruncateWarnings truncates warnings begin from start and returns the truncated warnings. func (sc *StatementContext) TruncateWarnings(start int) []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() sz := len(sc.mu.warnings) - start if sz <= 0 { return nil } ret := make([]SQLWarn, sz) copy(ret, sc.mu.warnings[start:]) sc.mu.warnings = sc.mu.warnings[:start] return ret } // WarningCount gets warning count. func (sc *StatementContext) WarningCount() uint16 { if sc.InShowWarning { return 0 } sc.mu.Lock() defer sc.mu.Unlock() return uint16(len(sc.mu.warnings)) } // NumErrorWarnings gets warning and error count. func (sc *StatementContext) NumErrorWarnings() (ec uint16, wc int) { sc.mu.Lock() defer sc.mu.Unlock() for _, w := range sc.mu.warnings { if w.Level == WarnLevelError { ec++ } } wc = len(sc.mu.warnings) return } // SetWarnings sets warnings. func (sc *StatementContext) SetWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.warnings = warns } // AppendWarning appends a warning with level 'Warning'. func (sc *StatementContext) AppendWarning(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelWarning, warn}) } } // AppendWarnings appends some warnings. func (sc *StatementContext) AppendWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, warns...) } } // AppendNote appends a warning with level 'Note'. func (sc *StatementContext) AppendNote(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelNote, warn}) } } // AppendError appends a warning with level 'Error'. func (sc *StatementContext) AppendError(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelError, warn}) } } // GetExtraWarnings gets extra warnings. func (sc *StatementContext) GetExtraWarnings() []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.extraWarnings } // SetExtraWarnings sets extra warnings. func (sc *StatementContext) SetExtraWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.extraWarnings = warns } // AppendExtraWarning appends an extra warning with level 'Warning'. func (sc *StatementContext) AppendExtraWarning(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelWarning, warn}) } } // AppendExtraNote appends an extra warning with level 'Note'. func (sc *StatementContext) AppendExtraNote(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelNote, warn}) } } // AppendExtraError appends an extra warning with level 'Error'. func (sc *StatementContext) AppendExtraError(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelError, warn}) } } // resetMuForRetry resets the changed states of sc.mu during execution. func (sc *StatementContext) resetMuForRetry() { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.affectedRows = 0 sc.mu.foundRows = 0 sc.mu.records = 0 sc.mu.deleted = 0 sc.mu.updated = 0 sc.mu.copied = 0 sc.mu.touched = 0 sc.mu.message = "" sc.mu.warnings = nil sc.mu.execDetails = execdetails.ExecDetails{} sc.mu.detailsSummary.Reset() } // ResetForRetry resets the changed states during execution. func (sc *StatementContext) ResetForRetry() { sc.resetMuForRetry() sc.MaxRowID = 0 sc.BaseRowID = 0 sc.TableIDs = sc.TableIDs[:0] sc.IndexNames = sc.IndexNames[:0] sc.TaskID = AllocateTaskID() } // MergeExecDetails merges a single region execution details into self, used to print // the information in slow query log. func (sc *StatementContext) MergeExecDetails(details *execdetails.ExecDetails, commitDetails *util.CommitDetails) { sc.mu.Lock() defer sc.mu.Unlock() if details != nil { sc.mu.execDetails.CopTime += details.CopTime sc.mu.execDetails.BackoffTime += details.BackoffTime sc.mu.execDetails.RequestCount++ sc.MergeScanDetail(details.ScanDetail) sc.MergeTimeDetail(details.TimeDetail) detail := &execdetails.DetailsNeedP90{ BackoffSleep: details.BackoffSleep, BackoffTimes: details.BackoffTimes, CalleeAddress: details.CalleeAddress, TimeDetail: details.TimeDetail, } sc.mu.detailsSummary.Merge(detail) } if commitDetails != nil { if sc.mu.execDetails.CommitDetail == nil { sc.mu.execDetails.CommitDetail = commitDetails } else { sc.mu.execDetails.CommitDetail.Merge(commitDetails) } } } // MergeScanDetail merges scan details into self. func (sc *StatementContext) MergeScanDetail(scanDetail *util.ScanDetail) { // Currently TiFlash cop task does not fill scanDetail, so need to skip it if scanDetail is nil if scanDetail == nil { return } if sc.mu.execDetails.ScanDetail == nil { sc.mu.execDetails.ScanDetail = &util.ScanDetail{} } sc.mu.execDetails.ScanDetail.Merge(scanDetail) } // MergeTimeDetail merges time details into self. func (sc *StatementContext) MergeTimeDetail(timeDetail util.TimeDetail) { sc.mu.execDetails.TimeDetail.ProcessTime += timeDetail.ProcessTime sc.mu.execDetails.TimeDetail.WaitTime += timeDetail.WaitTime } // MergeLockKeysExecDetails merges lock keys execution details into self. func (sc *StatementContext) MergeLockKeysExecDetails(lockKeys *util.LockKeysDetails) { sc.mu.Lock() defer sc.mu.Unlock() if sc.mu.execDetails.LockKeysDetail == nil { sc.mu.execDetails.LockKeysDetail = lockKeys } else { sc.mu.execDetails.LockKeysDetail.Merge(lockKeys) } } // GetExecDetails gets the execution details for the statement. func (sc *StatementContext) GetExecDetails() execdetails.ExecDetails { var details execdetails.ExecDetails sc.mu.Lock() defer sc.mu.Unlock() details = sc.mu.execDetails details.LockKeysDuration = time.Duration(atomic.LoadInt64(&sc.LockKeysDuration)) return details } // PushDownFlags converts StatementContext to tipb.SelectRequest.Flags. func (sc *StatementContext) PushDownFlags() uint64 { var flags uint64 if sc.InInsertStmt { flags |= model.FlagInInsertStmt } else if sc.InUpdateStmt || sc.InDeleteStmt { flags |= model.FlagInUpdateOrDeleteStmt } else if sc.InSelectStmt { flags |= model.FlagInSelectStmt } if sc.TypeFlags().IgnoreTruncateErr() { flags |= model.FlagIgnoreTruncate } else if sc.TypeFlags().TruncateAsWarning() { flags |= model.FlagTruncateAsWarning // TODO: remove this flag from TiKV. flags |= model.FlagOverflowAsWarning } if sc.TypeFlags().IgnoreZeroInDate() { flags |= model.FlagIgnoreZeroInDate } if sc.DividedByZeroAsWarning { flags |= model.FlagDividedByZeroAsWarning } if sc.InLoadDataStmt { flags |= model.FlagInLoadDataStmt } if sc.InRestrictedSQL { flags |= model.FlagInRestrictedSQL } return flags } // CopTasksDetails returns some useful information of cop-tasks during execution. func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { sc.mu.Lock() defer sc.mu.Unlock() n := sc.mu.detailsSummary.NumCopTasks d := &CopTasksDetails{ NumCopTasks: n, MaxBackoffTime: make(map[string]time.Duration), AvgBackoffTime: make(map[string]time.Duration), P90BackoffTime: make(map[string]time.Duration), TotBackoffTime: make(map[string]time.Duration), TotBackoffTimes: make(map[string]int), MaxBackoffAddress: make(map[string]string), } if n == 0 { return d } d.AvgProcessTime = sc.mu.execDetails.TimeDetail.ProcessTime / time.Duration(n) d.AvgWaitTime = sc.mu.execDetails.TimeDetail.WaitTime / time.Duration(n) d.P90ProcessTime = time.Duration((sc.mu.detailsSummary.ProcessTimePercentile.GetPercentile(0.9))) d.MaxProcessTime = sc.mu.detailsSummary.ProcessTimePercentile.GetMax().D d.MaxProcessAddress = sc.mu.detailsSummary.ProcessTimePercentile.GetMax().Addr d.P90WaitTime = time.Duration((sc.mu.detailsSummary.WaitTimePercentile.GetPercentile(0.9))) d.MaxWaitTime = sc.mu.detailsSummary.WaitTimePercentile.GetMax().D d.MaxWaitAddress = sc.mu.detailsSummary.WaitTimePercentile.GetMax().Addr for backoff, items := range sc.mu.detailsSummary.BackoffInfo { if items == nil { continue } n := items.ReqTimes d.MaxBackoffAddress[backoff] = items.BackoffPercentile.GetMax().Addr d.MaxBackoffTime[backoff] = items.BackoffPercentile.GetMax().D d.P90BackoffTime[backoff] = time.Duration(items.BackoffPercentile.GetPercentile(0.9)) d.AvgBackoffTime[backoff] = items.TotBackoffTime / time.Duration(n) d.TotBackoffTime[backoff] = items.TotBackoffTime d.TotBackoffTimes[backoff] = items.TotBackoffTimes } return d } // InitFromPBFlagAndTz set the flag and timezone of StatementContext from a `tipb.SelectRequest.Flags` and `*time.Location`. func (sc *StatementContext) InitFromPBFlagAndTz(flags uint64, tz *time.Location) { sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 sc.InDeleteStmt = (flags & model.FlagInUpdateOrDeleteStmt) > 0 sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 sc.SetTimeZone(tz) sc.SetTypeFlags(types.DefaultStmtFlags. WithIgnoreTruncateErr((flags & model.FlagIgnoreTruncate) > 0). WithTruncateAsWarning((flags & model.FlagTruncateAsWarning) > 0). WithIgnoreZeroInDate((flags & model.FlagIgnoreZeroInDate) > 0). WithAllowNegativeToUnsigned(!sc.InInsertStmt)) } // GetLockWaitStartTime returns the statement pessimistic lock wait start time func (sc *StatementContext) GetLockWaitStartTime() time.Time { startTime := atomic.LoadInt64(&sc.lockWaitStartTime) if startTime == 0 { startTime = time.Now().UnixNano() atomic.StoreInt64(&sc.lockWaitStartTime, startTime) } return time.Unix(0, startTime) } // RecordRangeFallback records range fallback. func (sc *StatementContext) RecordRangeFallback(rangeMaxSize int64) { // If range fallback happens, it means ether the query is unreasonable(for example, several long IN lists) or tidb_opt_range_max_size is too small // and the generated plan is probably suboptimal. In that case we don't put it into plan cache. if sc.UseCache { sc.SetSkipPlanCache(errors.Errorf("in-list is too long")) } if !sc.RangeFallback { sc.AppendWarning(errors.Errorf("Memory capacity of %v bytes for 'tidb_opt_range_max_size' exceeded when building ranges. Less accurate ranges such as full range are chosen", rangeMaxSize)) sc.RangeFallback = true } } // UseDynamicPartitionPrune indicates whether dynamic partition is used during the query func (sc *StatementContext) UseDynamicPartitionPrune() bool { return sc.UseDynamicPruneMode } // DetachMemDiskTracker detaches the memory and disk tracker from the sessionTracker. func (sc *StatementContext) DetachMemDiskTracker() { if sc == nil { return } if sc.MemTracker != nil { sc.MemTracker.Detach() } if sc.DiskTracker != nil { sc.DiskTracker.Detach() } } // SetStaleTSOProvider sets the stale TSO provider. func (sc *StatementContext) SetStaleTSOProvider(eval func() (uint64, error)) { sc.StaleTSOProvider.Lock() defer sc.StaleTSOProvider.Unlock() sc.StaleTSOProvider.value = nil sc.StaleTSOProvider.eval = eval } // GetStaleTSO returns the TSO for stale-read usage which calculate from PD's last response. func (sc *StatementContext) GetStaleTSO() (uint64, error) { sc.StaleTSOProvider.Lock() defer sc.StaleTSOProvider.Unlock() if sc.StaleTSOProvider.value != nil { return *sc.StaleTSOProvider.value, nil } if sc.StaleTSOProvider.eval == nil { return 0, nil } tso, err := sc.StaleTSOProvider.eval() if err != nil { return 0, err } sc.StaleTSOProvider.value = &tso return tso, nil } // AddSetVarHintRestore records the variables which are affected by SET_VAR hint. And restore them to the old value later. func (sc *StatementContext) AddSetVarHintRestore(name, val string) { if sc.SetVarHintRestore == nil { sc.SetVarHintRestore = make(map[string]string) } sc.SetVarHintRestore[name] = val } // CopTasksDetails collects some useful information of cop-tasks during execution. type CopTasksDetails struct { NumCopTasks int AvgProcessTime time.Duration P90ProcessTime time.Duration MaxProcessAddress string MaxProcessTime time.Duration AvgWaitTime time.Duration P90WaitTime time.Duration MaxWaitAddress string MaxWaitTime time.Duration MaxBackoffTime map[string]time.Duration MaxBackoffAddress map[string]string AvgBackoffTime map[string]time.Duration P90BackoffTime map[string]time.Duration TotBackoffTime map[string]time.Duration TotBackoffTimes map[string]int } // ToZapFields wraps the CopTasksDetails as zap.Fileds. func (d *CopTasksDetails) ToZapFields() (fields []zap.Field) { if d.NumCopTasks == 0 { return } fields = make([]zap.Field, 0, 10) fields = append(fields, zap.Int("num_cop_tasks", d.NumCopTasks)) fields = append(fields, zap.String("process_avg_time", strconv.FormatFloat(d.AvgProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_p90_time", strconv.FormatFloat(d.P90ProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_max_time", strconv.FormatFloat(d.MaxProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_max_addr", d.MaxProcessAddress)) fields = append(fields, zap.String("wait_avg_time", strconv.FormatFloat(d.AvgWaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_p90_time", strconv.FormatFloat(d.P90WaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_max_time", strconv.FormatFloat(d.MaxWaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_max_addr", d.MaxWaitAddress)) return fields } // GetUsedStatsInfo returns the map for recording the used stats during query. // If initIfNil is true, it will initialize it when this map is nil. func (sc *StatementContext) GetUsedStatsInfo(initIfNil bool) map[int64]*UsedStatsInfoForTable { if sc.usedStatsInfo == nil && initIfNil { sc.usedStatsInfo = make(map[int64]*UsedStatsInfoForTable) } return sc.usedStatsInfo } // RecordedStatsLoadStatusCnt returns the total number of recorded column/index stats status, which is not full loaded. func (sc *StatementContext) RecordedStatsLoadStatusCnt() (cnt int) { allStatus := sc.GetUsedStatsInfo(false) for _, status := range allStatus { if status == nil { continue } cnt += status.recordedColIdxCount() } return } // TypeCtxOrDefault returns the reference to the `TypeCtx` inside the statement context. // If the statement context is nil, it'll return a newly created default type context. // **don't** use this function if you can make sure the `sc` is not nil. We should limit the usage of this function as // little as possible. func (sc *StatementContext) TypeCtxOrDefault() types.Context { if sc != nil { return sc.typeCtx } return types.DefaultStmtNoWarningContext } // UsedStatsInfoForTable records stats that are used during query and their information. type UsedStatsInfoForTable struct { Name string TblInfo *model.TableInfo Version uint64 RealtimeCount int64 ModifyCount int64 ColumnStatsLoadStatus map[int64]string IndexStatsLoadStatus map[int64]string } // FormatForExplain format the content in the format expected to be printed in the execution plan. // case 1: if stats version is 0, print stats:pseudo. // case 2: if stats version is not 0, and there are column/index stats that are not full loaded, // print stats:partial, then print status of 3 column/index status at most. For the rest, only // the count will be printed, in the format like (more: 1 onlyCmsEvicted, 2 onlyHistRemained). func (s *UsedStatsInfoForTable) FormatForExplain() string { // statistics.PseudoVersion == 0 if s.Version == 0 { return "stats:pseudo" } var b strings.Builder if len(s.ColumnStatsLoadStatus)+len(s.IndexStatsLoadStatus) == 0 { return "" } b.WriteString("stats:partial") outputNumsLeft := 3 statusCnt := make(map[string]uint64, 1) var strs []string strs = append(strs, s.collectFromColOrIdxStatus(false, &outputNumsLeft, statusCnt)...) strs = append(strs, s.collectFromColOrIdxStatus(true, &outputNumsLeft, statusCnt)...) b.WriteString("[") b.WriteString(strings.Join(strs, ", ")) if len(statusCnt) > 0 { b.WriteString("...(more: ") keys := maps.Keys(statusCnt) slices.Sort(keys) var cntStrs []string for _, key := range keys { cntStrs = append(cntStrs, strconv.FormatUint(statusCnt[key], 10)+" "+key) } b.WriteString(strings.Join(cntStrs, ", ")) b.WriteString(")") } b.WriteString("]") return b.String() } // WriteToSlowLog format the content in the format expected to be printed to the slow log, then write to w. // The format is table name partition name:version[realtime row count;modify count][index load status][column load status]. func (s *UsedStatsInfoForTable) WriteToSlowLog(w io.Writer) { ver := "pseudo" // statistics.PseudoVersion == 0 if s.Version != 0 { ver = strconv.FormatUint(s.Version, 10) } fmt.Fprintf(w, "%s:%s[%d;%d]", s.Name, ver, s.RealtimeCount, s.ModifyCount) if ver == "pseudo" { return } if len(s.ColumnStatsLoadStatus)+len(s.IndexStatsLoadStatus) > 0 { fmt.Fprintf(w, "[%s][%s]", strings.Join(s.collectFromColOrIdxStatus(false, nil, nil), ","), strings.Join(s.collectFromColOrIdxStatus(true, nil, nil), ","), ) } } // collectFromColOrIdxStatus prints the status of column or index stats to a slice // of the string in the format of "col/idx name:status". // If outputNumsLeft is not nil, this function will output outputNumsLeft column/index // status at most, the rest will be counted in statusCnt, which is a map of status->count. func (s *UsedStatsInfoForTable) collectFromColOrIdxStatus( forColumn bool, outputNumsLeft *int, statusCnt map[string]uint64, ) []string { var status map[int64]string if forColumn { status = s.ColumnStatsLoadStatus } else { status = s.IndexStatsLoadStatus } keys := maps.Keys(status) slices.Sort(keys) strs := make([]string, 0, len(status)) for _, id := range keys { if outputNumsLeft == nil || *outputNumsLeft > 0 { var name string if s.TblInfo != nil { if forColumn { name = s.TblInfo.FindColumnNameByID(id) } else { name = s.TblInfo.FindIndexNameByID(id) } } if len(name) == 0 { name = "ID " + strconv.FormatInt(id, 10) } strs = append(strs, name+":"+status[id]) if outputNumsLeft != nil { *outputNumsLeft-- } } else if statusCnt != nil { statusCnt[status[id]] = statusCnt[status[id]] + 1 } } return strs } func (s *UsedStatsInfoForTable) recordedColIdxCount() int { return len(s.IndexStatsLoadStatus) + len(s.ColumnStatsLoadStatus) } // StatsLoadResult indicates result for StatsLoad type StatsLoadResult struct { Item model.TableItemID Error error } // HasError returns whether result has error func (r StatsLoadResult) HasError() bool { return r.Error != nil } // ErrorMsg returns StatsLoadResult err msg func (r StatsLoadResult) ErrorMsg() string { if r.Error == nil { return "" } b := bytes.NewBufferString("tableID:") b.WriteString(strconv.FormatInt(r.Item.TableID, 10)) b.WriteString(", id:") b.WriteString(strconv.FormatInt(r.Item.ID, 10)) b.WriteString(", isIndex:") b.WriteString(strconv.FormatBool(r.Item.IsIndex)) b.WriteString(", err:") b.WriteString(r.Error.Error()) return b.String() }
pkg/sessionctx/stmtctx/stmtctx.go
1
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.015588043257594109, 0.0008386305999010801, 0.0001622531417524442, 0.00017301179468631744, 0.002303197281435132 ]
{ "id": 0, "code_window": [ " \"stmtctx_test.go\",\n", " ],\n", " embed = [\":stmtctx\"],\n", " flaky = True,\n", " shard_count = 11,\n", " deps = [\n", " \"//pkg/kv\",\n", " \"//pkg/sessionctx/variable\",\n", " \"//pkg/testkit\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " shard_count = 12,\n" ], "file_path": "pkg/sessionctx/stmtctx/BUILD.bazel", "type": "replace", "edit_start_line_idx": 43 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package external import ( "context" "io" "net/http/httptest" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/johannesboyne/gofakes3" "github.com/johannesboyne/gofakes3/backend/s3mem" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/membuf" "github.com/pingcap/tidb/br/pkg/storage" "github.com/stretchr/testify/require" "golang.org/x/exp/rand" ) // mockExtStore is only used for test. type mockExtStore struct { src []byte idx uint64 } func (s *mockExtStore) Read(p []byte) (n int, err error) { // Read from src to p. if s.idx >= uint64(len(s.src)) { return 0, io.EOF } n = copy(p, s.src[s.idx:]) s.idx += uint64(n) return n, nil } func (s *mockExtStore) Seek(_ int64, _ int) (int64, error) { return 0, errors.Errorf("unsupported operation") } func (*mockExtStore) Close() error { return nil } func (s *mockExtStore) GetFileSize() (int64, error) { return int64(len(s.src)), nil } func TestByteReader(t *testing.T) { st, clean := NewS3WithBucketAndPrefix(t, "test", "testprefix") defer clean() // Prepare err := st.WriteFile(context.Background(), "testfile", []byte("abcde")) require.NoError(t, err) newRsc := func() storage.ExternalFileReader { rsc, err := st.Open(context.Background(), "testfile", nil) require.NoError(t, err) return rsc } // Test basic next() usage. br, err := newByteReader(context.Background(), newRsc(), 3) require.NoError(t, err) n, bs := br.next(1) require.Equal(t, 1, n) require.Equal(t, [][]byte{{'a'}}, bs) n, bs = br.next(2) require.Equal(t, 2, n) require.Equal(t, [][]byte{{'b', 'c'}}, bs) require.NoError(t, br.Close()) // Test basic readNBytes() usage. br, err = newByteReader(context.Background(), newRsc(), 3) require.NoError(t, err) x, err := br.readNBytes(2) require.NoError(t, err) require.Equal(t, 2, len(x)) require.Equal(t, byte('a'), x[0]) require.Equal(t, byte('b'), x[1]) require.NoError(t, br.Close()) br, err = newByteReader(context.Background(), newRsc(), 3) require.NoError(t, err) x, err = br.readNBytes(5) // Read all the data. require.NoError(t, err) require.Equal(t, 5, len(x)) require.Equal(t, byte('e'), x[4]) _, err = br.readNBytes(1) // EOF require.ErrorIs(t, err, io.EOF) require.NoError(t, br.Close()) br, err = newByteReader(context.Background(), newRsc(), 3) require.NoError(t, err) _, err = br.readNBytes(7) // EOF require.ErrorIs(t, err, io.ErrUnexpectedEOF) err = st.WriteFile(context.Background(), "testfile", []byte("abcdef")) require.NoError(t, err) ms := &mockExtStore{src: []byte("abcdef")} br, err = newByteReader(context.Background(), ms, 2) require.NoError(t, err) x, err = br.readNBytes(3) require.NoError(t, err) // Pollute mockExtStore to verify if the slice is not affected. copy(ms.src, "xyz") require.Equal(t, 3, len(x)) require.Equal(t, byte('c'), x[2]) require.NoError(t, br.Close()) ms = &mockExtStore{src: []byte("abcdef")} br, err = newByteReader(context.Background(), ms, 2) require.NoError(t, err) x, err = br.readNBytes(2) require.NoError(t, err) // Pollute mockExtStore to verify if the slice is not affected. copy(ms.src, "xyz") require.Equal(t, 2, len(x)) require.Equal(t, byte('b'), x[1]) require.NoError(t, br.Close()) } func TestByteReaderAuxBuf(t *testing.T) { ms := &mockExtStore{src: []byte("0123456789")} br, err := newByteReader(context.Background(), ms, 1) require.NoError(t, err) y1, err := br.readNBytes(1) require.NoError(t, err) require.Equal(t, []byte("0"), y1) y2, err := br.readNBytes(2) require.NoError(t, err) require.Equal(t, []byte("12"), y2) y3, err := br.readNBytes(1) require.NoError(t, err) require.Equal(t, []byte("3"), y3) y4, err := br.readNBytes(2) require.NoError(t, err) require.Equal(t, []byte("45"), y4) } func TestUnexpectedEOF(t *testing.T) { st, clean := NewS3WithBucketAndPrefix(t, "test", "testprefix") defer func() { clean() }() // Prepare err := st.WriteFile(context.Background(), "testfile", []byte("0123456789")) require.NoError(t, err) newRsc := func() storage.ExternalFileReader { rsc, err := st.Open(context.Background(), "testfile", nil) require.NoError(t, err) return rsc } br, err := newByteReader(context.Background(), newRsc(), 3) require.NoError(t, err) _, err = br.readNBytes(100) require.ErrorIs(t, err, io.ErrUnexpectedEOF) br, err = newByteReader(context.Background(), newRsc(), 3) require.NoError(t, err) _, err = br.readNBytes(100) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } func TestEmptyContent(t *testing.T) { ms := &mockExtStore{src: []byte{}} _, err := newByteReader(context.Background(), ms, 100) require.Equal(t, io.EOF, err) st, clean := NewS3WithBucketAndPrefix(t, "test", "testprefix") defer clean() // Prepare err = st.WriteFile(context.Background(), "testfile", []byte("")) require.NoError(t, err) newRsc := func() storage.ExternalFileReader { rsc, err := st.Open(context.Background(), "testfile", nil) require.NoError(t, err) return rsc } _, err = newByteReader(context.Background(), newRsc(), 100) require.Equal(t, io.EOF, err) } func TestSwitchMode(t *testing.T) { seed := time.Now().Unix() rand.Seed(uint64(seed)) t.Logf("seed: %d", seed) st := storage.NewMemStorage() // Prepare ctx := context.Background() writer := NewWriterBuilder(). SetPropSizeDistance(100). SetPropKeysDistance(2). BuildOneFile(st, "/test", "0") err := writer.Init(ctx, 5*1024*1024) require.NoError(t, err) kvCnt := 1000000 kvs := make([]common.KvPair, kvCnt) for i := 0; i < kvCnt; i++ { randLen := rand.Intn(10) + 1 kvs[i].Key = make([]byte, randLen) _, err := rand.Read(kvs[i].Key) require.NoError(t, err) randLen = rand.Intn(10) + 1 kvs[i].Val = make([]byte, randLen) _, err = rand.Read(kvs[i].Val) require.NoError(t, err) } for _, item := range kvs { err := writer.WriteRow(ctx, item.Key, item.Val) require.NoError(t, err) } err = writer.Close(ctx) require.NoError(t, err) pool := membuf.NewPool() ConcurrentReaderBufferSizePerConc = rand.Intn(100) + 1 kvReader, err := newKVReader(context.Background(), "/test/0/one-file", st, 0, 64*1024) require.NoError(t, err) kvReader.byteReader.enableConcurrentRead(st, "/test/0/one-file", 100, ConcurrentReaderBufferSizePerConc, pool.NewBuffer()) modeUseCon := false i := 0 for { if rand.Intn(5) == 0 { if modeUseCon { kvReader.byteReader.switchConcurrentMode(false) modeUseCon = false } else { kvReader.byteReader.switchConcurrentMode(true) modeUseCon = true } } key, val, err := kvReader.nextKV() if err == io.EOF { break } require.NoError(t, err) require.Equal(t, kvs[i].Key, key) require.Equal(t, kvs[i].Val, val) i++ } } // NewS3WithBucketAndPrefix creates a new S3Storage for testing. func NewS3WithBucketAndPrefix(t *testing.T, bucketName, prefixName string) (*storage.S3Storage, func()) { backend := s3mem.New() faker := gofakes3.New(backend) ts := httptest.NewServer(faker.Server()) err := backend.CreateBucket("test") require.NoError(t, err) config := aws.NewConfig() config.WithEndpoint(ts.URL) config.WithRegion("region") config.WithCredentials(credentials.NewStaticCredentials("dummy-access", "dummy-secret", "")) config.WithS3ForcePathStyle(true) // Removes need for subdomain svc := s3.New(session.New(), config) st := storage.NewS3StorageForTest(svc, &backuppb.S3{ Region: "region", Bucket: bucketName, Prefix: prefixName, Acl: "acl", Sse: "sse", StorageClass: "sc", }) return st, ts.Close }
br/pkg/lightning/backend/external/byte_reader_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.0002217279834439978, 0.0001736630074447021, 0.00016308965859934688, 0.0001727835478959605, 0.000009918154319166206 ]
{ "id": 0, "code_window": [ " \"stmtctx_test.go\",\n", " ],\n", " embed = [\":stmtctx\"],\n", " flaky = True,\n", " shard_count = 11,\n", " deps = [\n", " \"//pkg/kv\",\n", " \"//pkg/sessionctx/variable\",\n", " \"//pkg/testkit\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " shard_count = 12,\n" ], "file_path": "pkg/sessionctx/stmtctx/BUILD.bazel", "type": "replace", "edit_start_line_idx": 43 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package util import ( "net" "net/url" "strings" "github.com/pingcap/errors" ) // ParseHostPortAddr returns a scheme://host:port or host:port list func ParseHostPortAddr(s string) ([]string, error) { strs := strings.Split(s, ",") addrs := make([]string, 0, len(strs)) for _, str := range strs { str = strings.TrimSpace(str) // str may looks like 127.0.0.1:8000 if _, _, err := net.SplitHostPort(str); err == nil { addrs = append(addrs, str) continue } u, err := url.Parse(str) if err != nil { return nil, errors.Errorf("parse url %s failed %v", str, err) } if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { return nil, errors.Errorf("URL scheme must be http, https, unix, or unixs: %s", str) } if _, _, err := net.SplitHostPort(u.Host); err != nil { return nil, errors.Errorf(`URL address does not have the form "host:port": %s`, str) } if u.Path != "" { return nil, errors.Errorf("URL must not contain a path: %s", str) } addrs = append(addrs, u.String()) } return addrs, nil }
pkg/util/urls.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00026821394567377865, 0.0001895335881272331, 0.00017059229139704257, 0.00017423706594854593, 0.00003526975342538208 ]
{ "id": 0, "code_window": [ " \"stmtctx_test.go\",\n", " ],\n", " embed = [\":stmtctx\"],\n", " flaky = True,\n", " shard_count = 11,\n", " deps = [\n", " \"//pkg/kv\",\n", " \"//pkg/sessionctx/variable\",\n", " \"//pkg/testkit\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ " shard_count = 12,\n" ], "file_path": "pkg/sessionctx/stmtctx/BUILD.bazel", "type": "replace", "edit_start_line_idx": 43 }
[lightning] check-requirements=true [mydumper.csv] header = true header-schema-match = true [tikv-importer] duplicate-resolution = 'remove'
br/tests/lightning_config_skip_csv_header/err_config.toml
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00017679772281553596, 0.00017679772281553596, 0.00017679772281553596, 0.00017679772281553596, 0 ]
{ "id": 1, "code_window": [ "\tintest.AssertNotNil(tz)\n", "\tsc := &StatementContext{\n", "\t\tctxID: stmtCtxIDGenerator.Add(1),\n", "\t}\n", "\tsc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc)\n", "\treturn sc\n", "}\n", "\n", "// Reset resets a statement context\n", "func (sc *StatementContext) Reset() {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 444 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stmtctx import ( "bytes" "encoding/json" "fmt" "io" "math" "slices" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/domain/resourcegroup" "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/disk" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/linter/constructor" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/nocopy" "github.com/pingcap/tidb/pkg/util/resourcegrouptag" "github.com/pingcap/tidb/pkg/util/topsql/stmtstats" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/tikv/client-go/v2/tikvrpc" "github.com/tikv/client-go/v2/util" atomic2 "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/exp/maps" ) const ( // WarnLevelError represents level "Error" for 'SHOW WARNINGS' syntax. WarnLevelError = "Error" // WarnLevelWarning represents level "Warning" for 'SHOW WARNINGS' syntax. WarnLevelWarning = "Warning" // WarnLevelNote represents level "Note" for 'SHOW WARNINGS' syntax. WarnLevelNote = "Note" ) var taskIDAlloc uint64 // AllocateTaskID allocates a new unique ID for a statement execution func AllocateTaskID() uint64 { return atomic.AddUint64(&taskIDAlloc, 1) } // SQLWarn relates a sql warning and it's level. type SQLWarn struct { Level string Err error } type jsonSQLWarn struct { Level string `json:"level"` SQLErr *terror.Error `json:"err,omitempty"` Msg string `json:"msg,omitempty"` } // MarshalJSON implements the Marshaler.MarshalJSON interface. func (warn *SQLWarn) MarshalJSON() ([]byte, error) { w := &jsonSQLWarn{ Level: warn.Level, } e := errors.Cause(warn.Err) switch x := e.(type) { case *terror.Error: // Omit outter errors because only the most inner error matters. w.SQLErr = x default: w.Msg = e.Error() } return json.Marshal(w) } // UnmarshalJSON implements the Unmarshaler.UnmarshalJSON interface. func (warn *SQLWarn) UnmarshalJSON(data []byte) error { var w jsonSQLWarn if err := json.Unmarshal(data, &w); err != nil { return err } warn.Level = w.Level if w.SQLErr != nil { warn.Err = w.SQLErr } else { warn.Err = errors.New(w.Msg) } return nil } // ReferenceCount indicates the reference count of StmtCtx. type ReferenceCount int32 const ( // ReferenceCountIsFrozen indicates the current StmtCtx is resetting, it'll refuse all the access from other sessions. ReferenceCountIsFrozen int32 = -1 // ReferenceCountNoReference indicates the current StmtCtx is not accessed by other sessions. ReferenceCountNoReference int32 = 0 ) // TryIncrease tries to increase the reference count. // There is a small chance that TryIncrease returns true while TryFreeze and // UnFreeze are invoked successfully during the execution of TryIncrease. func (rf *ReferenceCount) TryIncrease() bool { refCnt := atomic.LoadInt32((*int32)(rf)) for ; refCnt != ReferenceCountIsFrozen && !atomic.CompareAndSwapInt32((*int32)(rf), refCnt, refCnt+1); refCnt = atomic.LoadInt32((*int32)(rf)) { } return refCnt != ReferenceCountIsFrozen } // Decrease decreases the reference count. func (rf *ReferenceCount) Decrease() { for refCnt := atomic.LoadInt32((*int32)(rf)); !atomic.CompareAndSwapInt32((*int32)(rf), refCnt, refCnt-1); refCnt = atomic.LoadInt32((*int32)(rf)) { } } // TryFreeze tries to freeze the StmtCtx to frozen before resetting the old StmtCtx. func (rf *ReferenceCount) TryFreeze() bool { return atomic.LoadInt32((*int32)(rf)) == ReferenceCountNoReference && atomic.CompareAndSwapInt32((*int32)(rf), ReferenceCountNoReference, ReferenceCountIsFrozen) } // UnFreeze unfreeze the frozen StmtCtx thus the other session can access this StmtCtx. func (rf *ReferenceCount) UnFreeze() { atomic.StoreInt32((*int32)(rf), ReferenceCountNoReference) } var stmtCtxIDGenerator atomic.Uint64 // StatementContext contains variables for a statement. // It should be reset before executing a statement. type StatementContext struct { // NoCopy indicates that this struct cannot be copied because // copying this object will make the copied TypeCtx field to refer a wrong `AppendWarnings` func. _ nocopy.NoCopy _ constructor.Constructor `ctor:"NewStmtCtx,NewStmtCtxWithTimeZone,Reset"` ctxID uint64 // typeCtx is used to indicate how to make the type conversation. typeCtx types.Context // errCtx is used to indicate how to handle the errors errCtx errctx.Context // Set the following variables before execution StmtHints // IsDDLJobInQueue is used to mark whether the DDL job is put into the queue. // If IsDDLJobInQueue is true, it means the DDL job is in the queue of storage, and it can be handled by the DDL worker. IsDDLJobInQueue bool DDLJobID int64 InInsertStmt bool InUpdateStmt bool InDeleteStmt bool InSelectStmt bool InLoadDataStmt bool InExplainStmt bool InExplainAnalyzeStmt bool ExplainFormat string InCreateOrAlterStmt bool InSetSessionStatesStmt bool InPreparedPlanBuilding bool DupKeyAsWarning bool BadNullAsWarning bool DividedByZeroAsWarning bool ErrAutoincReadFailedAsWarning bool InShowWarning bool UseCache bool CacheType PlanCacheType BatchCheck bool InNullRejectCheck bool IgnoreNoPartition bool IgnoreExplainIDSuffix bool MultiSchemaInfo *model.MultiSchemaInfo // If the select statement was like 'select * from t as of timestamp ...' or in a stale read transaction // or is affected by the tidb_read_staleness session variable, then the statement will be makred as isStaleness // in stmtCtx IsStaleness bool InRestrictedSQL bool ViewDepth int32 // mu struct holds variables that change during execution. mu struct { sync.Mutex affectedRows uint64 foundRows uint64 /* following variables are ported from 'COPY_INFO' struct of MySQL server source, they are used to count rows for INSERT/REPLACE/UPDATE queries: If a row is inserted then the copied variable is incremented. If a row is updated by the INSERT ... ON DUPLICATE KEY UPDATE and the new data differs from the old one then the copied and the updated variables are incremented. The touched variable is incremented if a row was touched by the update part of the INSERT ... ON DUPLICATE KEY UPDATE no matter whether the row was actually changed or not. see https://github.com/mysql/mysql-server/blob/d2029238d6d9f648077664e4cdd611e231a6dc14/sql/sql_data_change.h#L60 for more details */ records uint64 deleted uint64 updated uint64 copied uint64 touched uint64 message string warnings []SQLWarn // extraWarnings record the extra warnings and are only used by the slow log only now. // If a warning is expected to be output only under some conditions (like in EXPLAIN or EXPLAIN VERBOSE) but it's // not under such conditions now, it is considered as an extra warning. // extraWarnings would not be printed through SHOW WARNINGS, but we want to always output them through the slow // log to help diagnostics, so we store them here separately. extraWarnings []SQLWarn execDetails execdetails.ExecDetails detailsSummary execdetails.P90Summary } // PrevAffectedRows is the affected-rows value(DDL is 0, DML is the number of affected rows). PrevAffectedRows int64 // PrevLastInsertID is the last insert ID of previous statement. PrevLastInsertID uint64 // LastInsertID is the auto-generated ID in the current statement. LastInsertID uint64 // InsertID is the given insert ID of an auto_increment column. InsertID uint64 BaseRowID int64 MaxRowID int64 // Copied from SessionVars.TimeZone. Priority mysql.PriorityEnum NotFillCache bool MemTracker *memory.Tracker DiskTracker *disk.Tracker // per statement resource group name // hint /* +ResourceGroup(name) */ can change the statement group name ResourceGroupName string RunawayChecker *resourcegroup.RunawayChecker IsTiFlash atomic2.Bool RuntimeStatsColl *execdetails.RuntimeStatsColl TableIDs []int64 IndexNames []string StmtType string OriginalSQL string digestMemo struct { sync.Once normalized string digest *parser.Digest } // BindSQL used to construct the key for plan cache. It records the binding used by the stmt. // If the binding is not used by the stmt, the value is empty BindSQL string // The several fields below are mainly for some diagnostic features, like stmt summary and slow query. // We cache the values here to avoid calculating them multiple times. // Note: // Avoid accessing these fields directly, use their Setter/Getter methods instead. // Other fields should be the zero value or be consistent with the plan field. // TODO: more clearly distinguish between the value is empty and the value has not been set planNormalized string planDigest *parser.Digest encodedPlan string planHint string planHintSet bool binaryPlan string // To avoid cycle import, we use interface{} for the following two fields. // flatPlan should be a *plannercore.FlatPhysicalPlan if it's not nil flatPlan interface{} // plan should be a plannercore.Plan if it's not nil plan interface{} Tables []TableEntry PointExec bool // for point update cached execution, Constant expression need to set "paramMarker" lockWaitStartTime int64 // LockWaitStartTime stores the pessimistic lock wait start time PessimisticLockWaited int32 LockKeysDuration int64 LockKeysCount int32 LockTableIDs map[int64]struct{} // table IDs need to be locked, empty for lock all tables TblInfo2UnionScan map[*model.TableInfo]bool TaskID uint64 // unique ID for an execution of a statement TaskMapBakTS uint64 // counter for // stmtCache is used to store some statement-related values. // add mutex to protect stmtCache concurrent access // https://github.com/pingcap/tidb/issues/36159 stmtCache struct { mu sync.Mutex data map[StmtCacheKey]interface{} } // Map to store all CTE storages of current SQL. // Will clean up at the end of the execution. CTEStorageMap interface{} SetVarHintRestore map[string]string // If the statement read from table cache, this flag is set. ReadFromTableCache bool // cache is used to reduce object allocation. cache struct { execdetails.RuntimeStatsColl MemTracker memory.Tracker DiskTracker disk.Tracker LogOnExceed [2]memory.LogOnExceed } // InVerboseExplain indicates the statement is "explain format='verbose' ...". InVerboseExplain bool // EnableOptimizeTrace indicates whether enable optimizer trace by 'trace plan statement' EnableOptimizeTrace bool // OptimizeTracer indicates the tracer for optimize OptimizeTracer *tracing.OptimizeTracer // EnableOptimizerCETrace indicate if cardinality estimation internal process needs to be traced. // CE Trace is currently a submodule of the optimizer trace and is controlled by a separated option. EnableOptimizerCETrace bool OptimizerCETrace []*tracing.CETraceRecord EnableOptimizerDebugTrace bool OptimizerDebugTrace interface{} // WaitLockLeaseTime is the duration of cached table read lease expiration time. WaitLockLeaseTime time.Duration // KvExecCounter is created from SessionVars.StmtStats to count the number of SQL // executions of the kv layer during the current execution of the statement. // Its life cycle is limited to this execution, and a new KvExecCounter is // always created during each statement execution. KvExecCounter *stmtstats.KvExecCounter // WeakConsistency is true when read consistency is weak and in a read statement and not in a transaction. WeakConsistency bool StatsLoad struct { // Timeout to wait for sync-load Timeout time.Duration // NeededItems stores the columns/indices whose stats are needed for planner. NeededItems []model.TableItemID // ResultCh to receive stats loading results ResultCh chan StatsLoadResult // LoadStartTime is to record the load start time to calculate latency LoadStartTime time.Time } // SysdateIsNow indicates whether sysdate() is an alias of now() in this statement SysdateIsNow bool // RCCheckTS indicates the current read-consistency read select statement will use `RCCheckTS` path. RCCheckTS bool // IsSQLRegistered uses to indicate whether the SQL has been registered for TopSQL. IsSQLRegistered atomic2.Bool // IsSQLAndPlanRegistered uses to indicate whether the SQL and plan has been registered for TopSQL. IsSQLAndPlanRegistered atomic2.Bool // IsReadOnly uses to indicate whether the SQL is read-only. IsReadOnly bool // usedStatsInfo records version of stats of each table used in the query. // It's a map of table physical id -> *UsedStatsInfoForTable usedStatsInfo map[int64]*UsedStatsInfoForTable // IsSyncStatsFailed indicates whether any failure happened during sync stats IsSyncStatsFailed bool // UseDynamicPruneMode indicates whether use UseDynamicPruneMode in query stmt UseDynamicPruneMode bool // ColRefFromPlan mark the column ref used by assignment in update statement. ColRefFromUpdatePlan []int64 // RangeFallback indicates that building complete ranges exceeds the memory limit so it falls back to less accurate ranges such as full range. RangeFallback bool // IsExplainAnalyzeDML is true if the statement is "explain analyze DML executors", before responding the explain // results to the client, the transaction should be committed first. See issue #37373 for more details. IsExplainAnalyzeDML bool // InHandleForeignKeyTrigger indicates currently are handling foreign key trigger. InHandleForeignKeyTrigger bool // ForeignKeyTriggerCtx is the contain information for foreign key cascade execution. ForeignKeyTriggerCtx struct { // The SavepointName is use to do rollback when handle foreign key cascade failed. SavepointName string HasFKCascades bool } // MPPQueryInfo stores some id and timestamp of current MPP query statement. MPPQueryInfo struct { QueryID atomic2.Uint64 QueryTS atomic2.Uint64 AllocatedMPPTaskID atomic2.Int64 AllocatedMPPGatherID atomic2.Uint64 } // TableStats stores the visited runtime table stats by table id during query TableStats map[int64]interface{} // useChunkAlloc indicates whether statement use chunk alloc useChunkAlloc bool // Check if TiFlash read engine is removed due to strict sql mode. TiFlashEngineRemovedDueToStrictSQLMode bool // StaleTSOProvider is used to provide stale timestamp oracle for read-only transactions. StaleTSOProvider struct { sync.Mutex value *uint64 eval func() (uint64, error) } } // NewStmtCtx creates a new statement context func NewStmtCtx() *StatementContext { return NewStmtCtxWithTimeZone(time.UTC) } // NewStmtCtxWithTimeZone creates a new StatementContext with the given timezone func NewStmtCtxWithTimeZone(tz *time.Location) *StatementContext { intest.AssertNotNil(tz) sc := &StatementContext{ ctxID: stmtCtxIDGenerator.Add(1), } sc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc) return sc } // Reset resets a statement context func (sc *StatementContext) Reset() { *sc = StatementContext{ ctxID: stmtCtxIDGenerator.Add(1), typeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc), } } // CtxID returns the context id of the statement func (sc *StatementContext) CtxID() uint64 { return sc.ctxID } // TimeZone returns the timezone of the type context func (sc *StatementContext) TimeZone() *time.Location { intest.AssertNotNil(sc) if sc == nil { return time.UTC } return sc.typeCtx.Location() } // SetTimeZone sets the timezone func (sc *StatementContext) SetTimeZone(tz *time.Location) { intest.AssertNotNil(tz) sc.typeCtx = sc.typeCtx.WithLocation(tz) } // TypeCtx returns the type context func (sc *StatementContext) TypeCtx() types.Context { return sc.typeCtx } // ErrCtx returns the error context // TODO: add a cache to the `ErrCtx` if needed, though it's not a big burden to generate `ErrCtx` everytime. func (sc *StatementContext) ErrCtx() errctx.Context { ctx := errctx.NewContext(sc) if sc.TypeFlags().IgnoreTruncateErr() { ctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelIgnore) } else if sc.TypeFlags().TruncateAsWarning() { ctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelWarn) } return ctx } // TypeFlags returns the type flags func (sc *StatementContext) TypeFlags() types.Flags { return sc.typeCtx.Flags() } // SetTypeFlags sets the type flags func (sc *StatementContext) SetTypeFlags(flags types.Flags) { sc.typeCtx = sc.typeCtx.WithFlags(flags) } // HandleTruncate ignores or returns the error based on the TypeContext inside. // TODO: replace this function with `HandleError`, for `TruncatedError` they should have the same effect. func (sc *StatementContext) HandleTruncate(err error) error { return sc.typeCtx.HandleTruncate(err) } // HandleError handles the error based on `ErrCtx()` func (sc *StatementContext) HandleError(err error) error { intest.AssertNotNil(sc) if sc == nil { return err } errCtx := sc.ErrCtx() return errCtx.HandleError(err) } // HandleErrorWithAlias handles the error based on `ErrCtx()` func (sc *StatementContext) HandleErrorWithAlias(internalErr, err, warnErr error) error { intest.AssertNotNil(sc) if sc == nil { return err } errCtx := sc.ErrCtx() return errCtx.HandleErrorWithAlias(internalErr, err, warnErr) } // StmtHints are SessionVars related sql hints. type StmtHints struct { // Hint Information MemQuotaQuery int64 MaxExecutionTime uint64 ReplicaRead byte AllowInSubqToJoinAndAgg bool NoIndexMergeHint bool StraightJoinOrder bool // EnableCascadesPlanner is use cascades planner for a single query only. EnableCascadesPlanner bool // ForceNthPlan indicates the PlanCounterTp number for finding physical plan. // -1 for disable. ForceNthPlan int64 ResourceGroup string // Hint flags HasAllowInSubqToJoinAndAggHint bool HasMemQuotaHint bool HasReplicaReadHint bool HasMaxExecutionTime bool HasEnableCascadesPlannerHint bool HasResourceGroup bool SetVars map[string]string // the original table hints OriginalTableHints []*ast.TableOptimizerHint } // TaskMapNeedBackUp indicates that whether we need to back up taskMap during physical optimizing. func (sh *StmtHints) TaskMapNeedBackUp() bool { return sh.ForceNthPlan != -1 } // Clone the StmtHints struct and returns the pointer of the new one. func (sh *StmtHints) Clone() *StmtHints { var ( vars map[string]string tableHints []*ast.TableOptimizerHint ) if len(sh.SetVars) > 0 { vars = make(map[string]string, len(sh.SetVars)) for k, v := range sh.SetVars { vars[k] = v } } if len(sh.OriginalTableHints) > 0 { tableHints = make([]*ast.TableOptimizerHint, len(sh.OriginalTableHints)) copy(tableHints, sh.OriginalTableHints) } return &StmtHints{ MemQuotaQuery: sh.MemQuotaQuery, MaxExecutionTime: sh.MaxExecutionTime, ReplicaRead: sh.ReplicaRead, AllowInSubqToJoinAndAgg: sh.AllowInSubqToJoinAndAgg, NoIndexMergeHint: sh.NoIndexMergeHint, StraightJoinOrder: sh.StraightJoinOrder, EnableCascadesPlanner: sh.EnableCascadesPlanner, ForceNthPlan: sh.ForceNthPlan, ResourceGroup: sh.ResourceGroup, HasAllowInSubqToJoinAndAggHint: sh.HasAllowInSubqToJoinAndAggHint, HasMemQuotaHint: sh.HasMemQuotaHint, HasReplicaReadHint: sh.HasReplicaReadHint, HasMaxExecutionTime: sh.HasMaxExecutionTime, HasEnableCascadesPlannerHint: sh.HasEnableCascadesPlannerHint, HasResourceGroup: sh.HasResourceGroup, SetVars: vars, OriginalTableHints: tableHints, } } // StmtCacheKey represents the key type in the StmtCache. type StmtCacheKey int const ( // StmtNowTsCacheKey is a variable for now/current_timestamp calculation/cache of one stmt. StmtNowTsCacheKey StmtCacheKey = iota // StmtSafeTSCacheKey is a variable for safeTS calculation/cache of one stmt. StmtSafeTSCacheKey // StmtExternalTSCacheKey is a variable for externalTS calculation/cache of one stmt. StmtExternalTSCacheKey ) // GetOrStoreStmtCache gets the cached value of the given key if it exists, otherwise stores the value. func (sc *StatementContext) GetOrStoreStmtCache(key StmtCacheKey, value interface{}) interface{} { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() if sc.stmtCache.data == nil { sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } if _, ok := sc.stmtCache.data[key]; !ok { sc.stmtCache.data[key] = value } return sc.stmtCache.data[key] } // GetOrEvaluateStmtCache gets the cached value of the given key if it exists, otherwise calculate the value. func (sc *StatementContext) GetOrEvaluateStmtCache(key StmtCacheKey, valueEvaluator func() (interface{}, error)) (interface{}, error) { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() if sc.stmtCache.data == nil { sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } if _, ok := sc.stmtCache.data[key]; !ok { value, err := valueEvaluator() if err != nil { return nil, err } sc.stmtCache.data[key] = value } return sc.stmtCache.data[key], nil } // ResetInStmtCache resets the cache of given key. func (sc *StatementContext) ResetInStmtCache(key StmtCacheKey) { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() delete(sc.stmtCache.data, key) } // ResetStmtCache resets all cached values. func (sc *StatementContext) ResetStmtCache() { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } // SQLDigest gets normalized and digest for provided sql. // it will cache result after first calling. func (sc *StatementContext) SQLDigest() (normalized string, sqlDigest *parser.Digest) { sc.digestMemo.Do(func() { sc.digestMemo.normalized, sc.digestMemo.digest = parser.NormalizeDigest(sc.OriginalSQL) }) return sc.digestMemo.normalized, sc.digestMemo.digest } // InitSQLDigest sets the normalized and digest for sql. func (sc *StatementContext) InitSQLDigest(normalized string, digest *parser.Digest) { sc.digestMemo.Do(func() { sc.digestMemo.normalized, sc.digestMemo.digest = normalized, digest }) } // ResetSQLDigest sets the normalized and digest for sql anyway, **DO NOT USE THIS UNLESS YOU KNOW WHAT YOU ARE DOING NOW**. func (sc *StatementContext) ResetSQLDigest(s string) { sc.digestMemo.normalized, sc.digestMemo.digest = parser.NormalizeDigest(s) } // GetPlanDigest gets the normalized plan and plan digest. func (sc *StatementContext) GetPlanDigest() (normalized string, planDigest *parser.Digest) { return sc.planNormalized, sc.planDigest } // GetPlan gets the plan field of stmtctx func (sc *StatementContext) GetPlan() interface{} { return sc.plan } // SetPlan sets the plan field of stmtctx func (sc *StatementContext) SetPlan(plan interface{}) { sc.plan = plan } // GetFlatPlan gets the flatPlan field of stmtctx func (sc *StatementContext) GetFlatPlan() interface{} { return sc.flatPlan } // SetFlatPlan sets the flatPlan field of stmtctx func (sc *StatementContext) SetFlatPlan(flat interface{}) { sc.flatPlan = flat } // GetBinaryPlan gets the binaryPlan field of stmtctx func (sc *StatementContext) GetBinaryPlan() string { return sc.binaryPlan } // SetBinaryPlan sets the binaryPlan field of stmtctx func (sc *StatementContext) SetBinaryPlan(binaryPlan string) { sc.binaryPlan = binaryPlan } // GetResourceGroupTagger returns the implementation of tikvrpc.ResourceGroupTagger related to self. func (sc *StatementContext) GetResourceGroupTagger() tikvrpc.ResourceGroupTagger { normalized, digest := sc.SQLDigest() planDigest := sc.planDigest return func(req *tikvrpc.Request) { if req == nil { return } if len(normalized) == 0 { return } req.ResourceGroupTag = resourcegrouptag.EncodeResourceGroupTag(digest, planDigest, resourcegrouptag.GetResourceGroupLabelByKey(resourcegrouptag.GetFirstKeyFromRequest(req))) } } // SetUseChunkAlloc set use chunk alloc status func (sc *StatementContext) SetUseChunkAlloc() { sc.useChunkAlloc = true } // ClearUseChunkAlloc clear useChunkAlloc status func (sc *StatementContext) ClearUseChunkAlloc() { sc.useChunkAlloc = false } // GetUseChunkAllocStatus returns useChunkAlloc status func (sc *StatementContext) GetUseChunkAllocStatus() bool { return sc.useChunkAlloc } // SetPlanDigest sets the normalized plan and plan digest. func (sc *StatementContext) SetPlanDigest(normalized string, planDigest *parser.Digest) { if planDigest != nil { sc.planNormalized, sc.planDigest = normalized, planDigest } } // GetEncodedPlan gets the encoded plan, it is used to avoid repeated encode. func (sc *StatementContext) GetEncodedPlan() string { return sc.encodedPlan } // SetEncodedPlan sets the encoded plan, it is used to avoid repeated encode. func (sc *StatementContext) SetEncodedPlan(encodedPlan string) { sc.encodedPlan = encodedPlan } // GetPlanHint gets the hint string generated from the plan. func (sc *StatementContext) GetPlanHint() (string, bool) { return sc.planHint, sc.planHintSet } // InitDiskTracker initializes the sc.DiskTracker, use cache to avoid allocation. func (sc *StatementContext) InitDiskTracker(label int, bytesLimit int64) { memory.InitTracker(&sc.cache.DiskTracker, label, bytesLimit, &sc.cache.LogOnExceed[0]) sc.DiskTracker = &sc.cache.DiskTracker } // InitMemTracker initializes the sc.MemTracker, use cache to avoid allocation. func (sc *StatementContext) InitMemTracker(label int, bytesLimit int64) { memory.InitTracker(&sc.cache.MemTracker, label, bytesLimit, &sc.cache.LogOnExceed[1]) sc.MemTracker = &sc.cache.MemTracker } // SetPlanHint sets the hint for the plan. func (sc *StatementContext) SetPlanHint(hint string) { sc.planHintSet = true sc.planHint = hint } // PlanCacheType is the flag of plan cache type PlanCacheType int const ( // DefaultNoCache no cache DefaultNoCache PlanCacheType = iota // SessionPrepared session prepared plan cache SessionPrepared // SessionNonPrepared session non-prepared plan cache SessionNonPrepared ) // SetSkipPlanCache sets to skip the plan cache and records the reason. func (sc *StatementContext) SetSkipPlanCache(reason error) { if !sc.UseCache { return // avoid unnecessary warnings } sc.UseCache = false switch sc.CacheType { case DefaultNoCache: sc.AppendWarning(errors.NewNoStackError("unknown cache type")) case SessionPrepared: sc.AppendWarning(errors.NewNoStackErrorf("skip prepared plan-cache: %s", reason.Error())) case SessionNonPrepared: if sc.InExplainStmt && sc.ExplainFormat == "plan_cache" { // use "plan_cache" rather than types.ExplainFormatPlanCache to avoid import cycle sc.AppendWarning(errors.NewNoStackErrorf("skip non-prepared plan-cache: %s", reason.Error())) } } } // TableEntry presents table in db. type TableEntry struct { DB string Table string } // AddAffectedRows adds affected rows. func (sc *StatementContext) AddAffectedRows(rows uint64) { if sc.InHandleForeignKeyTrigger { // For compatibility with MySQL, not add the affected row cause by the foreign key trigger. return } sc.mu.Lock() defer sc.mu.Unlock() sc.mu.affectedRows += rows } // SetAffectedRows sets affected rows. func (sc *StatementContext) SetAffectedRows(rows uint64) { sc.mu.Lock() sc.mu.affectedRows = rows sc.mu.Unlock() } // AffectedRows gets affected rows. func (sc *StatementContext) AffectedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.affectedRows } // FoundRows gets found rows. func (sc *StatementContext) FoundRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.foundRows } // AddFoundRows adds found rows. func (sc *StatementContext) AddFoundRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.foundRows += rows } // RecordRows is used to generate info message func (sc *StatementContext) RecordRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.records } // AddRecordRows adds record rows. func (sc *StatementContext) AddRecordRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.records += rows } // DeletedRows is used to generate info message func (sc *StatementContext) DeletedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.deleted } // AddDeletedRows adds record rows. func (sc *StatementContext) AddDeletedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.deleted += rows } // UpdatedRows is used to generate info message func (sc *StatementContext) UpdatedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.updated } // AddUpdatedRows adds updated rows. func (sc *StatementContext) AddUpdatedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.updated += rows } // CopiedRows is used to generate info message func (sc *StatementContext) CopiedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.copied } // AddCopiedRows adds copied rows. func (sc *StatementContext) AddCopiedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.copied += rows } // TouchedRows is used to generate info message func (sc *StatementContext) TouchedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.touched } // AddTouchedRows adds touched rows. func (sc *StatementContext) AddTouchedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.touched += rows } // GetMessage returns the extra message of the last executed command, if there is no message, it returns empty string func (sc *StatementContext) GetMessage() string { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.message } // SetMessage sets the info message generated by some commands func (sc *StatementContext) SetMessage(msg string) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.message = msg } // GetWarnings gets warnings. func (sc *StatementContext) GetWarnings() []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.warnings } // TruncateWarnings truncates warnings begin from start and returns the truncated warnings. func (sc *StatementContext) TruncateWarnings(start int) []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() sz := len(sc.mu.warnings) - start if sz <= 0 { return nil } ret := make([]SQLWarn, sz) copy(ret, sc.mu.warnings[start:]) sc.mu.warnings = sc.mu.warnings[:start] return ret } // WarningCount gets warning count. func (sc *StatementContext) WarningCount() uint16 { if sc.InShowWarning { return 0 } sc.mu.Lock() defer sc.mu.Unlock() return uint16(len(sc.mu.warnings)) } // NumErrorWarnings gets warning and error count. func (sc *StatementContext) NumErrorWarnings() (ec uint16, wc int) { sc.mu.Lock() defer sc.mu.Unlock() for _, w := range sc.mu.warnings { if w.Level == WarnLevelError { ec++ } } wc = len(sc.mu.warnings) return } // SetWarnings sets warnings. func (sc *StatementContext) SetWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.warnings = warns } // AppendWarning appends a warning with level 'Warning'. func (sc *StatementContext) AppendWarning(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelWarning, warn}) } } // AppendWarnings appends some warnings. func (sc *StatementContext) AppendWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, warns...) } } // AppendNote appends a warning with level 'Note'. func (sc *StatementContext) AppendNote(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelNote, warn}) } } // AppendError appends a warning with level 'Error'. func (sc *StatementContext) AppendError(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelError, warn}) } } // GetExtraWarnings gets extra warnings. func (sc *StatementContext) GetExtraWarnings() []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.extraWarnings } // SetExtraWarnings sets extra warnings. func (sc *StatementContext) SetExtraWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.extraWarnings = warns } // AppendExtraWarning appends an extra warning with level 'Warning'. func (sc *StatementContext) AppendExtraWarning(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelWarning, warn}) } } // AppendExtraNote appends an extra warning with level 'Note'. func (sc *StatementContext) AppendExtraNote(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelNote, warn}) } } // AppendExtraError appends an extra warning with level 'Error'. func (sc *StatementContext) AppendExtraError(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelError, warn}) } } // resetMuForRetry resets the changed states of sc.mu during execution. func (sc *StatementContext) resetMuForRetry() { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.affectedRows = 0 sc.mu.foundRows = 0 sc.mu.records = 0 sc.mu.deleted = 0 sc.mu.updated = 0 sc.mu.copied = 0 sc.mu.touched = 0 sc.mu.message = "" sc.mu.warnings = nil sc.mu.execDetails = execdetails.ExecDetails{} sc.mu.detailsSummary.Reset() } // ResetForRetry resets the changed states during execution. func (sc *StatementContext) ResetForRetry() { sc.resetMuForRetry() sc.MaxRowID = 0 sc.BaseRowID = 0 sc.TableIDs = sc.TableIDs[:0] sc.IndexNames = sc.IndexNames[:0] sc.TaskID = AllocateTaskID() } // MergeExecDetails merges a single region execution details into self, used to print // the information in slow query log. func (sc *StatementContext) MergeExecDetails(details *execdetails.ExecDetails, commitDetails *util.CommitDetails) { sc.mu.Lock() defer sc.mu.Unlock() if details != nil { sc.mu.execDetails.CopTime += details.CopTime sc.mu.execDetails.BackoffTime += details.BackoffTime sc.mu.execDetails.RequestCount++ sc.MergeScanDetail(details.ScanDetail) sc.MergeTimeDetail(details.TimeDetail) detail := &execdetails.DetailsNeedP90{ BackoffSleep: details.BackoffSleep, BackoffTimes: details.BackoffTimes, CalleeAddress: details.CalleeAddress, TimeDetail: details.TimeDetail, } sc.mu.detailsSummary.Merge(detail) } if commitDetails != nil { if sc.mu.execDetails.CommitDetail == nil { sc.mu.execDetails.CommitDetail = commitDetails } else { sc.mu.execDetails.CommitDetail.Merge(commitDetails) } } } // MergeScanDetail merges scan details into self. func (sc *StatementContext) MergeScanDetail(scanDetail *util.ScanDetail) { // Currently TiFlash cop task does not fill scanDetail, so need to skip it if scanDetail is nil if scanDetail == nil { return } if sc.mu.execDetails.ScanDetail == nil { sc.mu.execDetails.ScanDetail = &util.ScanDetail{} } sc.mu.execDetails.ScanDetail.Merge(scanDetail) } // MergeTimeDetail merges time details into self. func (sc *StatementContext) MergeTimeDetail(timeDetail util.TimeDetail) { sc.mu.execDetails.TimeDetail.ProcessTime += timeDetail.ProcessTime sc.mu.execDetails.TimeDetail.WaitTime += timeDetail.WaitTime } // MergeLockKeysExecDetails merges lock keys execution details into self. func (sc *StatementContext) MergeLockKeysExecDetails(lockKeys *util.LockKeysDetails) { sc.mu.Lock() defer sc.mu.Unlock() if sc.mu.execDetails.LockKeysDetail == nil { sc.mu.execDetails.LockKeysDetail = lockKeys } else { sc.mu.execDetails.LockKeysDetail.Merge(lockKeys) } } // GetExecDetails gets the execution details for the statement. func (sc *StatementContext) GetExecDetails() execdetails.ExecDetails { var details execdetails.ExecDetails sc.mu.Lock() defer sc.mu.Unlock() details = sc.mu.execDetails details.LockKeysDuration = time.Duration(atomic.LoadInt64(&sc.LockKeysDuration)) return details } // PushDownFlags converts StatementContext to tipb.SelectRequest.Flags. func (sc *StatementContext) PushDownFlags() uint64 { var flags uint64 if sc.InInsertStmt { flags |= model.FlagInInsertStmt } else if sc.InUpdateStmt || sc.InDeleteStmt { flags |= model.FlagInUpdateOrDeleteStmt } else if sc.InSelectStmt { flags |= model.FlagInSelectStmt } if sc.TypeFlags().IgnoreTruncateErr() { flags |= model.FlagIgnoreTruncate } else if sc.TypeFlags().TruncateAsWarning() { flags |= model.FlagTruncateAsWarning // TODO: remove this flag from TiKV. flags |= model.FlagOverflowAsWarning } if sc.TypeFlags().IgnoreZeroInDate() { flags |= model.FlagIgnoreZeroInDate } if sc.DividedByZeroAsWarning { flags |= model.FlagDividedByZeroAsWarning } if sc.InLoadDataStmt { flags |= model.FlagInLoadDataStmt } if sc.InRestrictedSQL { flags |= model.FlagInRestrictedSQL } return flags } // CopTasksDetails returns some useful information of cop-tasks during execution. func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { sc.mu.Lock() defer sc.mu.Unlock() n := sc.mu.detailsSummary.NumCopTasks d := &CopTasksDetails{ NumCopTasks: n, MaxBackoffTime: make(map[string]time.Duration), AvgBackoffTime: make(map[string]time.Duration), P90BackoffTime: make(map[string]time.Duration), TotBackoffTime: make(map[string]time.Duration), TotBackoffTimes: make(map[string]int), MaxBackoffAddress: make(map[string]string), } if n == 0 { return d } d.AvgProcessTime = sc.mu.execDetails.TimeDetail.ProcessTime / time.Duration(n) d.AvgWaitTime = sc.mu.execDetails.TimeDetail.WaitTime / time.Duration(n) d.P90ProcessTime = time.Duration((sc.mu.detailsSummary.ProcessTimePercentile.GetPercentile(0.9))) d.MaxProcessTime = sc.mu.detailsSummary.ProcessTimePercentile.GetMax().D d.MaxProcessAddress = sc.mu.detailsSummary.ProcessTimePercentile.GetMax().Addr d.P90WaitTime = time.Duration((sc.mu.detailsSummary.WaitTimePercentile.GetPercentile(0.9))) d.MaxWaitTime = sc.mu.detailsSummary.WaitTimePercentile.GetMax().D d.MaxWaitAddress = sc.mu.detailsSummary.WaitTimePercentile.GetMax().Addr for backoff, items := range sc.mu.detailsSummary.BackoffInfo { if items == nil { continue } n := items.ReqTimes d.MaxBackoffAddress[backoff] = items.BackoffPercentile.GetMax().Addr d.MaxBackoffTime[backoff] = items.BackoffPercentile.GetMax().D d.P90BackoffTime[backoff] = time.Duration(items.BackoffPercentile.GetPercentile(0.9)) d.AvgBackoffTime[backoff] = items.TotBackoffTime / time.Duration(n) d.TotBackoffTime[backoff] = items.TotBackoffTime d.TotBackoffTimes[backoff] = items.TotBackoffTimes } return d } // InitFromPBFlagAndTz set the flag and timezone of StatementContext from a `tipb.SelectRequest.Flags` and `*time.Location`. func (sc *StatementContext) InitFromPBFlagAndTz(flags uint64, tz *time.Location) { sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 sc.InDeleteStmt = (flags & model.FlagInUpdateOrDeleteStmt) > 0 sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 sc.SetTimeZone(tz) sc.SetTypeFlags(types.DefaultStmtFlags. WithIgnoreTruncateErr((flags & model.FlagIgnoreTruncate) > 0). WithTruncateAsWarning((flags & model.FlagTruncateAsWarning) > 0). WithIgnoreZeroInDate((flags & model.FlagIgnoreZeroInDate) > 0). WithAllowNegativeToUnsigned(!sc.InInsertStmt)) } // GetLockWaitStartTime returns the statement pessimistic lock wait start time func (sc *StatementContext) GetLockWaitStartTime() time.Time { startTime := atomic.LoadInt64(&sc.lockWaitStartTime) if startTime == 0 { startTime = time.Now().UnixNano() atomic.StoreInt64(&sc.lockWaitStartTime, startTime) } return time.Unix(0, startTime) } // RecordRangeFallback records range fallback. func (sc *StatementContext) RecordRangeFallback(rangeMaxSize int64) { // If range fallback happens, it means ether the query is unreasonable(for example, several long IN lists) or tidb_opt_range_max_size is too small // and the generated plan is probably suboptimal. In that case we don't put it into plan cache. if sc.UseCache { sc.SetSkipPlanCache(errors.Errorf("in-list is too long")) } if !sc.RangeFallback { sc.AppendWarning(errors.Errorf("Memory capacity of %v bytes for 'tidb_opt_range_max_size' exceeded when building ranges. Less accurate ranges such as full range are chosen", rangeMaxSize)) sc.RangeFallback = true } } // UseDynamicPartitionPrune indicates whether dynamic partition is used during the query func (sc *StatementContext) UseDynamicPartitionPrune() bool { return sc.UseDynamicPruneMode } // DetachMemDiskTracker detaches the memory and disk tracker from the sessionTracker. func (sc *StatementContext) DetachMemDiskTracker() { if sc == nil { return } if sc.MemTracker != nil { sc.MemTracker.Detach() } if sc.DiskTracker != nil { sc.DiskTracker.Detach() } } // SetStaleTSOProvider sets the stale TSO provider. func (sc *StatementContext) SetStaleTSOProvider(eval func() (uint64, error)) { sc.StaleTSOProvider.Lock() defer sc.StaleTSOProvider.Unlock() sc.StaleTSOProvider.value = nil sc.StaleTSOProvider.eval = eval } // GetStaleTSO returns the TSO for stale-read usage which calculate from PD's last response. func (sc *StatementContext) GetStaleTSO() (uint64, error) { sc.StaleTSOProvider.Lock() defer sc.StaleTSOProvider.Unlock() if sc.StaleTSOProvider.value != nil { return *sc.StaleTSOProvider.value, nil } if sc.StaleTSOProvider.eval == nil { return 0, nil } tso, err := sc.StaleTSOProvider.eval() if err != nil { return 0, err } sc.StaleTSOProvider.value = &tso return tso, nil } // AddSetVarHintRestore records the variables which are affected by SET_VAR hint. And restore them to the old value later. func (sc *StatementContext) AddSetVarHintRestore(name, val string) { if sc.SetVarHintRestore == nil { sc.SetVarHintRestore = make(map[string]string) } sc.SetVarHintRestore[name] = val } // CopTasksDetails collects some useful information of cop-tasks during execution. type CopTasksDetails struct { NumCopTasks int AvgProcessTime time.Duration P90ProcessTime time.Duration MaxProcessAddress string MaxProcessTime time.Duration AvgWaitTime time.Duration P90WaitTime time.Duration MaxWaitAddress string MaxWaitTime time.Duration MaxBackoffTime map[string]time.Duration MaxBackoffAddress map[string]string AvgBackoffTime map[string]time.Duration P90BackoffTime map[string]time.Duration TotBackoffTime map[string]time.Duration TotBackoffTimes map[string]int } // ToZapFields wraps the CopTasksDetails as zap.Fileds. func (d *CopTasksDetails) ToZapFields() (fields []zap.Field) { if d.NumCopTasks == 0 { return } fields = make([]zap.Field, 0, 10) fields = append(fields, zap.Int("num_cop_tasks", d.NumCopTasks)) fields = append(fields, zap.String("process_avg_time", strconv.FormatFloat(d.AvgProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_p90_time", strconv.FormatFloat(d.P90ProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_max_time", strconv.FormatFloat(d.MaxProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_max_addr", d.MaxProcessAddress)) fields = append(fields, zap.String("wait_avg_time", strconv.FormatFloat(d.AvgWaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_p90_time", strconv.FormatFloat(d.P90WaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_max_time", strconv.FormatFloat(d.MaxWaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_max_addr", d.MaxWaitAddress)) return fields } // GetUsedStatsInfo returns the map for recording the used stats during query. // If initIfNil is true, it will initialize it when this map is nil. func (sc *StatementContext) GetUsedStatsInfo(initIfNil bool) map[int64]*UsedStatsInfoForTable { if sc.usedStatsInfo == nil && initIfNil { sc.usedStatsInfo = make(map[int64]*UsedStatsInfoForTable) } return sc.usedStatsInfo } // RecordedStatsLoadStatusCnt returns the total number of recorded column/index stats status, which is not full loaded. func (sc *StatementContext) RecordedStatsLoadStatusCnt() (cnt int) { allStatus := sc.GetUsedStatsInfo(false) for _, status := range allStatus { if status == nil { continue } cnt += status.recordedColIdxCount() } return } // TypeCtxOrDefault returns the reference to the `TypeCtx` inside the statement context. // If the statement context is nil, it'll return a newly created default type context. // **don't** use this function if you can make sure the `sc` is not nil. We should limit the usage of this function as // little as possible. func (sc *StatementContext) TypeCtxOrDefault() types.Context { if sc != nil { return sc.typeCtx } return types.DefaultStmtNoWarningContext } // UsedStatsInfoForTable records stats that are used during query and their information. type UsedStatsInfoForTable struct { Name string TblInfo *model.TableInfo Version uint64 RealtimeCount int64 ModifyCount int64 ColumnStatsLoadStatus map[int64]string IndexStatsLoadStatus map[int64]string } // FormatForExplain format the content in the format expected to be printed in the execution plan. // case 1: if stats version is 0, print stats:pseudo. // case 2: if stats version is not 0, and there are column/index stats that are not full loaded, // print stats:partial, then print status of 3 column/index status at most. For the rest, only // the count will be printed, in the format like (more: 1 onlyCmsEvicted, 2 onlyHistRemained). func (s *UsedStatsInfoForTable) FormatForExplain() string { // statistics.PseudoVersion == 0 if s.Version == 0 { return "stats:pseudo" } var b strings.Builder if len(s.ColumnStatsLoadStatus)+len(s.IndexStatsLoadStatus) == 0 { return "" } b.WriteString("stats:partial") outputNumsLeft := 3 statusCnt := make(map[string]uint64, 1) var strs []string strs = append(strs, s.collectFromColOrIdxStatus(false, &outputNumsLeft, statusCnt)...) strs = append(strs, s.collectFromColOrIdxStatus(true, &outputNumsLeft, statusCnt)...) b.WriteString("[") b.WriteString(strings.Join(strs, ", ")) if len(statusCnt) > 0 { b.WriteString("...(more: ") keys := maps.Keys(statusCnt) slices.Sort(keys) var cntStrs []string for _, key := range keys { cntStrs = append(cntStrs, strconv.FormatUint(statusCnt[key], 10)+" "+key) } b.WriteString(strings.Join(cntStrs, ", ")) b.WriteString(")") } b.WriteString("]") return b.String() } // WriteToSlowLog format the content in the format expected to be printed to the slow log, then write to w. // The format is table name partition name:version[realtime row count;modify count][index load status][column load status]. func (s *UsedStatsInfoForTable) WriteToSlowLog(w io.Writer) { ver := "pseudo" // statistics.PseudoVersion == 0 if s.Version != 0 { ver = strconv.FormatUint(s.Version, 10) } fmt.Fprintf(w, "%s:%s[%d;%d]", s.Name, ver, s.RealtimeCount, s.ModifyCount) if ver == "pseudo" { return } if len(s.ColumnStatsLoadStatus)+len(s.IndexStatsLoadStatus) > 0 { fmt.Fprintf(w, "[%s][%s]", strings.Join(s.collectFromColOrIdxStatus(false, nil, nil), ","), strings.Join(s.collectFromColOrIdxStatus(true, nil, nil), ","), ) } } // collectFromColOrIdxStatus prints the status of column or index stats to a slice // of the string in the format of "col/idx name:status". // If outputNumsLeft is not nil, this function will output outputNumsLeft column/index // status at most, the rest will be counted in statusCnt, which is a map of status->count. func (s *UsedStatsInfoForTable) collectFromColOrIdxStatus( forColumn bool, outputNumsLeft *int, statusCnt map[string]uint64, ) []string { var status map[int64]string if forColumn { status = s.ColumnStatsLoadStatus } else { status = s.IndexStatsLoadStatus } keys := maps.Keys(status) slices.Sort(keys) strs := make([]string, 0, len(status)) for _, id := range keys { if outputNumsLeft == nil || *outputNumsLeft > 0 { var name string if s.TblInfo != nil { if forColumn { name = s.TblInfo.FindColumnNameByID(id) } else { name = s.TblInfo.FindIndexNameByID(id) } } if len(name) == 0 { name = "ID " + strconv.FormatInt(id, 10) } strs = append(strs, name+":"+status[id]) if outputNumsLeft != nil { *outputNumsLeft-- } } else if statusCnt != nil { statusCnt[status[id]] = statusCnt[status[id]] + 1 } } return strs } func (s *UsedStatsInfoForTable) recordedColIdxCount() int { return len(s.IndexStatsLoadStatus) + len(s.ColumnStatsLoadStatus) } // StatsLoadResult indicates result for StatsLoad type StatsLoadResult struct { Item model.TableItemID Error error } // HasError returns whether result has error func (r StatsLoadResult) HasError() bool { return r.Error != nil } // ErrorMsg returns StatsLoadResult err msg func (r StatsLoadResult) ErrorMsg() string { if r.Error == nil { return "" } b := bytes.NewBufferString("tableID:") b.WriteString(strconv.FormatInt(r.Item.TableID, 10)) b.WriteString(", id:") b.WriteString(strconv.FormatInt(r.Item.ID, 10)) b.WriteString(", isIndex:") b.WriteString(strconv.FormatBool(r.Item.IsIndex)) b.WriteString(", err:") b.WriteString(r.Error.Error()) return b.String() }
pkg/sessionctx/stmtctx/stmtctx.go
1
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.9974810481071472, 0.014769960194826126, 0.00015980415628291667, 0.001440611551515758, 0.08002551645040512 ]
{ "id": 1, "code_window": [ "\tintest.AssertNotNil(tz)\n", "\tsc := &StatementContext{\n", "\t\tctxID: stmtCtxIDGenerator.Add(1),\n", "\t}\n", "\tsc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc)\n", "\treturn sc\n", "}\n", "\n", "// Reset resets a statement context\n", "func (sc *StatementContext) Reset() {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 444 }
// Copyright 2022 PingCAP, Inc. Licensed under Apache-2.0. package restore import ( "bytes" "context" "crypto/sha256" "encoding/hex" "strings" "testing" "time" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/stream" "github.com/pingcap/tidb/pkg/util/codec" "github.com/stretchr/testify/require" ) func TestStartWithComparator(t *testing.T) { comparator := NewStartWithComparator() require.True(t, comparator.Compare([]byte("aa_key"), []byte("aa"))) require.False(t, comparator.Compare([]byte("aa_key"), []byte("aak"))) require.False(t, comparator.Compare([]byte("aa_key"), []byte("bb"))) } func fakeStorage(t *testing.T) storage.ExternalStorage { baseDir := t.TempDir() s, err := storage.NewLocalStorage(baseDir) require.NoError(t, err) return s } func encodeKey(key string, ts int64) []byte { encodedKey := codec.EncodeBytes([]byte{}, []byte(key)) return codec.EncodeUintDesc(encodedKey, uint64(ts)) } func encodeShortValue(val string, ts int64) []byte { flagShortValuePrefix := byte('v') valBytes := []byte(val) buff := make([]byte, 0, 11+len(valBytes)) buff = append(buff, byte('P')) buff = codec.EncodeUvarint(buff, uint64(ts)) buff = append(buff, flagShortValuePrefix) buff = append(buff, byte(len(valBytes))) buff = append(buff, valBytes...) return buff } type cf struct { key string startTs int64 commitTS int64 val string } func fakeCFs() (defaultCFs, writeCFs []*cf) { defaultCFs = []*cf{ { key: "aa_big_key_1", startTs: time.Now().UnixNano(), val: "aa_big_val_1", }, { key: "bb_big_key_1", startTs: time.Now().UnixNano(), val: "bb_big_val_1", }, { key: "cc_big_key_1", startTs: time.Now().UnixNano(), val: "cc_big_val_1", }, } writeCFs = []*cf{ { key: "aa_small_key_1", startTs: time.Now().UnixNano(), commitTS: time.Now().UnixNano(), val: "aa_small_val_1", }, { key: "aa_big_key_1", startTs: defaultCFs[0].startTs, commitTS: time.Now().UnixNano(), val: "aa_short_val_1", }, { key: "bb_small_key_1", startTs: time.Now().UnixNano(), commitTS: time.Now().UnixNano(), val: "bb_small_val_1", }, { key: "bb_big_key_1", startTs: defaultCFs[1].startTs, commitTS: time.Now().UnixNano(), val: "bb_short_val_1", }, } return } func fakeDataFile(t *testing.T, s storage.ExternalStorage) (defaultCFDataFile, writeCFDataFile *backuppb.DataFileInfo) { const ( defaultCFFile = "default_cf" writeCFFile = "write_cf" ) defaultCFs, writeCFs := fakeCFs() ctx := context.Background() defaultCFBuf := bytes.NewBuffer([]byte{}) for _, defaultCF := range defaultCFs { defaultCFBuf.Write(stream.EncodeKVEntry(encodeKey(defaultCF.key, defaultCF.startTs), []byte(defaultCF.val))) } err := s.WriteFile(ctx, defaultCFFile, defaultCFBuf.Bytes()) require.NoError(t, err) defaultCFCheckSum := sha256.Sum256(defaultCFBuf.Bytes()) defaultCFDataFile = &backuppb.DataFileInfo{ Path: defaultCFFile, Cf: defaultCFName, Sha256: defaultCFCheckSum[:], } writeCFBuf := bytes.NewBuffer([]byte{}) for _, writeCF := range writeCFs { writeCFBuf.Write(stream.EncodeKVEntry(encodeKey(writeCF.key, writeCF.commitTS), encodeShortValue(writeCF.val, writeCF.startTs))) } err = s.WriteFile(ctx, writeCFFile, writeCFBuf.Bytes()) require.NoError(t, err) writeCFCheckSum := sha256.Sum256(writeCFBuf.Bytes()) writeCFDataFile = &backuppb.DataFileInfo{ Path: writeCFFile, Cf: writeCFName, Sha256: writeCFCheckSum[:], } return } func TestSearchFromDataFile(t *testing.T) { s := fakeStorage(t) defaultCFDataFile, writeCFDataFile := fakeDataFile(t, s) comparator := NewStartWithComparator() searchKey := []byte("aa_big_key_1") bs := NewStreamBackupSearch(s, comparator, searchKey) ch := make(chan *StreamKVInfo, 16) ctx := context.Background() err := bs.searchFromDataFile(ctx, defaultCFDataFile, ch) require.NoError(t, err) err = bs.searchFromDataFile(ctx, writeCFDataFile, ch) require.NoError(t, err) close(ch) hexSearchKey := strings.ToUpper(hex.EncodeToString(searchKey)) searchKeyCount := 0 for kvEntry := range ch { require.True(t, strings.HasPrefix(kvEntry.Key, hexSearchKey)) searchKeyCount++ } require.Equal(t, 2, searchKeyCount) } func TestMergeCFEntries(t *testing.T) { defaultCFs, writeCFs := fakeCFs() defaultCFEntries := make(map[string]*StreamKVInfo, 8) writeCFEntries := make(map[string]*StreamKVInfo, 8) for _, defaultCF := range defaultCFs { encodedKey := hex.EncodeToString(encodeKey(defaultCF.key, defaultCF.startTs)) defaultCFEntries[encodedKey] = &StreamKVInfo{ Key: hex.EncodeToString([]byte(defaultCF.key)), EncodedKey: encodedKey, StartTs: uint64(defaultCF.startTs), CFName: defaultCFName, Value: defaultCF.val, } } for _, writeCF := range writeCFs { encodedKey := hex.EncodeToString(encodeKey(writeCF.key, writeCF.commitTS)) writeCFEntries[encodedKey] = &StreamKVInfo{ Key: hex.EncodeToString([]byte(writeCF.key)), EncodedKey: encodedKey, StartTs: uint64(writeCF.startTs), CommitTs: uint64(writeCF.commitTS), CFName: writeCFName, Value: writeCF.val, } } s := fakeStorage(t) comparator := NewStartWithComparator() bs := NewStreamBackupSearch(s, comparator, []byte{}) kvEntries := bs.mergeCFEntries(defaultCFEntries, writeCFEntries) require.Equal(t, len(writeCFs)+1, len(kvEntries)) }
br/pkg/restore/search_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00025687439483590424, 0.0001793117553461343, 0.00016573081666138023, 0.0001730015646899119, 0.00002415032395219896 ]
{ "id": 1, "code_window": [ "\tintest.AssertNotNil(tz)\n", "\tsc := &StatementContext{\n", "\t\tctxID: stmtCtxIDGenerator.Add(1),\n", "\t}\n", "\tsc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc)\n", "\treturn sc\n", "}\n", "\n", "// Reset resets a statement context\n", "func (sc *StatementContext) Reset() {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 444 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cbotest import ( "flag" "testing" "github.com/pingcap/tidb/pkg/testkit/testdata" "github.com/pingcap/tidb/pkg/testkit/testmain" "github.com/pingcap/tidb/pkg/testkit/testsetup" "go.uber.org/goleak" ) var testDataMap = make(testdata.BookKeeper) func TestMain(m *testing.M) { testsetup.SetupForCommonTest() flag.Parse() testDataMap.LoadTestSuiteData("testdata", "analyze_suite") opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), goleak.IgnoreTopFunction("gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun"), goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), } callback := func(i int) int { testDataMap.GenerateOutputIfNeeded() return i } goleak.VerifyTestMain(testmain.WrapTestingM(m, callback), opts...) } func GetAnalyzeSuiteData() testdata.TestData { return testDataMap["analyze_suite"] }
pkg/planner/core/casetest/cbotest/main_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.0001792472758097574, 0.00017519656103104353, 0.0001705966715235263, 0.00017561507411301136, 0.000002921784016507445 ]
{ "id": 1, "code_window": [ "\tintest.AssertNotNil(tz)\n", "\tsc := &StatementContext{\n", "\t\tctxID: stmtCtxIDGenerator.Add(1),\n", "\t}\n", "\tsc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc)\n", "\treturn sc\n", "}\n", "\n", "// Reset resets a statement context\n", "func (sc *StatementContext) Reset() {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 444 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "bytes" "encoding/json" "io" "time" "github.com/klauspost/compress/gzip" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/types" compressutil "github.com/pingcap/tidb/pkg/util/compress" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/memory" "go.uber.org/zap" ) func dumpJSONExtendedStats(statsColl *statistics.ExtendedStatsColl) []*util.JSONExtendedStats { if statsColl == nil || len(statsColl.Stats) == 0 { return nil } stats := make([]*util.JSONExtendedStats, 0, len(statsColl.Stats)) for name, item := range statsColl.Stats { js := &util.JSONExtendedStats{ StatsName: name, ColIDs: item.ColIDs, Tp: item.Tp, ScalarVals: item.ScalarVals, StringVals: item.StringVals, } stats = append(stats, js) } return stats } func extendedStatsFromJSON(statsColl []*util.JSONExtendedStats) *statistics.ExtendedStatsColl { if len(statsColl) == 0 { return nil } stats := statistics.NewExtendedStatsColl() for _, js := range statsColl { item := &statistics.ExtendedStatsItem{ ColIDs: js.ColIDs, Tp: js.Tp, ScalarVals: js.ScalarVals, StringVals: js.StringVals, } stats.Stats[js.StatsName] = item } return stats } func dumpJSONCol(hist *statistics.Histogram, cmsketch *statistics.CMSketch, topn *statistics.TopN, fmsketch *statistics.FMSketch, statsVer *int64) *util.JSONColumn { jsonCol := &util.JSONColumn{ Histogram: statistics.HistogramToProto(hist), NullCount: hist.NullCount, TotColSize: hist.TotColSize, LastUpdateVersion: hist.LastUpdateVersion, Correlation: hist.Correlation, StatsVer: statsVer, } if cmsketch != nil || topn != nil { jsonCol.CMSketch = statistics.CMSketchToProto(cmsketch, topn) } if fmsketch != nil { jsonCol.FMSketch = statistics.FMSketchToProto(fmsketch) } return jsonCol } // GenJSONTableFromStats generate jsonTable from tableInfo and stats func GenJSONTableFromStats(sctx sessionctx.Context, dbName string, tableInfo *model.TableInfo, tbl *statistics.Table) (*util.JSONTable, error) { tracker := memory.NewTracker(memory.LabelForAnalyzeMemory, -1) tracker.AttachTo(sctx.GetSessionVars().MemTracker) defer tracker.Detach() jsonTbl := &util.JSONTable{ DatabaseName: dbName, TableName: tableInfo.Name.L, Columns: make(map[string]*util.JSONColumn, len(tbl.Columns)), Indices: make(map[string]*util.JSONColumn, len(tbl.Indices)), Count: tbl.RealtimeCount, ModifyCount: tbl.ModifyCount, Version: tbl.Version, } for _, col := range tbl.Columns { sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) hist, err := col.ConvertTo(sc, types.NewFieldType(mysql.TypeBlob)) if err != nil { return nil, errors.Trace(err) } proto := dumpJSONCol(hist, col.CMSketch, col.TopN, col.FMSketch, &col.StatsVer) tracker.Consume(proto.TotalMemoryUsage()) if err := sctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil { return nil, err } jsonTbl.Columns[col.Info.Name.L] = proto col.FMSketch.DestroyAndPutToPool() hist.DestroyAndPutToPool() } for _, idx := range tbl.Indices { proto := dumpJSONCol(&idx.Histogram, idx.CMSketch, idx.TopN, nil, &idx.StatsVer) tracker.Consume(proto.TotalMemoryUsage()) if err := sctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil { return nil, err } jsonTbl.Indices[idx.Info.Name.L] = proto } jsonTbl.ExtStats = dumpJSONExtendedStats(tbl.ExtendedStats) return jsonTbl, nil } // TableStatsFromJSON loads statistic from JSONTable and return the Table of statistic. func TableStatsFromJSON(tableInfo *model.TableInfo, physicalID int64, jsonTbl *util.JSONTable) (*statistics.Table, error) { newHistColl := statistics.HistColl{ PhysicalID: physicalID, HavePhysicalID: true, RealtimeCount: jsonTbl.Count, ModifyCount: jsonTbl.ModifyCount, Columns: make(map[int64]*statistics.Column, len(jsonTbl.Columns)), Indices: make(map[int64]*statistics.Index, len(jsonTbl.Indices)), } tbl := &statistics.Table{ HistColl: newHistColl, } for id, jsonIdx := range jsonTbl.Indices { for _, idxInfo := range tableInfo.Indices { if idxInfo.Name.L != id { continue } hist := statistics.HistogramFromProto(jsonIdx.Histogram) hist.ID, hist.NullCount, hist.LastUpdateVersion, hist.Correlation = idxInfo.ID, jsonIdx.NullCount, jsonIdx.LastUpdateVersion, jsonIdx.Correlation cm, topN := statistics.CMSketchAndTopNFromProto(jsonIdx.CMSketch) statsVer := int64(statistics.Version0) if jsonIdx.StatsVer != nil { statsVer = *jsonIdx.StatsVer } else if jsonIdx.Histogram.Ndv > 0 || jsonIdx.NullCount > 0 { // If the statistics are collected without setting stats version(which happens in v4.0 and earlier versions), // we set it to 1. statsVer = int64(statistics.Version1) } idx := &statistics.Index{ Histogram: *hist, CMSketch: cm, TopN: topN, Info: idxInfo, StatsVer: statsVer, PhysicalID: physicalID, StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } tbl.Indices[idx.ID] = idx } } for id, jsonCol := range jsonTbl.Columns { for _, colInfo := range tableInfo.Columns { if colInfo.Name.L != id { continue } hist := statistics.HistogramFromProto(jsonCol.Histogram) sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) tmpFT := colInfo.FieldType // For new collation data, when storing the bounds of the histogram, we store the collate key instead of the // original value. // But there's additional conversion logic for new collation data, and the collate key might be longer than // the FieldType.flen. // If we use the original FieldType here, there might be errors like "Invalid utf8mb4 character string" // or "Data too long". // So we change it to TypeBlob to bypass those logics here. if colInfo.FieldType.EvalType() == types.ETString && colInfo.FieldType.GetType() != mysql.TypeEnum && colInfo.FieldType.GetType() != mysql.TypeSet { tmpFT = *types.NewFieldType(mysql.TypeBlob) } hist, err := hist.ConvertTo(sc, &tmpFT) if err != nil { return nil, errors.Trace(err) } cm, topN := statistics.CMSketchAndTopNFromProto(jsonCol.CMSketch) fms := statistics.FMSketchFromProto(jsonCol.FMSketch) hist.ID, hist.NullCount, hist.LastUpdateVersion, hist.TotColSize, hist.Correlation = colInfo.ID, jsonCol.NullCount, jsonCol.LastUpdateVersion, jsonCol.TotColSize, jsonCol.Correlation statsVer := int64(statistics.Version0) if jsonCol.StatsVer != nil { statsVer = *jsonCol.StatsVer } else if jsonCol.Histogram.Ndv > 0 || jsonCol.NullCount > 0 { // If the statistics are collected without setting stats version(which happens in v4.0 and earlier versions), // we set it to 1. statsVer = int64(statistics.Version1) } col := &statistics.Column{ PhysicalID: physicalID, Histogram: *hist, CMSketch: cm, TopN: topN, FMSketch: fms, Info: colInfo, IsHandle: tableInfo.PKIsHandle && mysql.HasPriKeyFlag(colInfo.GetFlag()), StatsVer: statsVer, StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } tbl.Columns[col.ID] = col } } tbl.ExtendedStats = extendedStatsFromJSON(jsonTbl.ExtStats) return tbl, nil } // JSONTableToBlocks convert JSONTable to json, then compresses it to blocks by gzip. func JSONTableToBlocks(jsTable *util.JSONTable, blockSize int) ([][]byte, error) { data, err := json.Marshal(jsTable) if err != nil { return nil, errors.Trace(err) } var gzippedData bytes.Buffer gzipWriter := compressutil.GzipWriterPool.Get().(*gzip.Writer) defer compressutil.GzipWriterPool.Put(gzipWriter) gzipWriter.Reset(&gzippedData) if _, err := gzipWriter.Write(data); err != nil { return nil, errors.Trace(err) } if err := gzipWriter.Close(); err != nil { return nil, errors.Trace(err) } blocksNum := gzippedData.Len() / blockSize if gzippedData.Len()%blockSize != 0 { blocksNum = blocksNum + 1 } blocks := make([][]byte, blocksNum) for i := 0; i < blocksNum-1; i++ { blocks[i] = gzippedData.Bytes()[blockSize*i : blockSize*(i+1)] } blocks[blocksNum-1] = gzippedData.Bytes()[blockSize*(blocksNum-1):] return blocks, nil } // BlocksToJSONTable convert gzip-compressed blocks to JSONTable func BlocksToJSONTable(blocks [][]byte) (*util.JSONTable, error) { if len(blocks) == 0 { return nil, errors.New("Block empty error") } data := blocks[0] for i := 1; i < len(blocks); i++ { data = append(data, blocks[i]...) } gzippedData := bytes.NewReader(data) gzipReader := compressutil.GzipReaderPool.Get().(*gzip.Reader) if err := gzipReader.Reset(gzippedData); err != nil { compressutil.GzipReaderPool.Put(gzipReader) return nil, err } defer func() { compressutil.GzipReaderPool.Put(gzipReader) }() if err := gzipReader.Close(); err != nil { return nil, err } jsonStr, err := io.ReadAll(gzipReader) if err != nil { return nil, errors.Trace(err) } jsonTbl := util.JSONTable{} err = json.Unmarshal(jsonStr, &jsonTbl) if err != nil { return nil, errors.Trace(err) } return &jsonTbl, nil } // TableHistoricalStatsToJSON converts the historical stats of a table to JSONTable. func TableHistoricalStatsToJSON(sctx sessionctx.Context, physicalID int64, snapshot uint64) (jt *util.JSONTable, exist bool, err error) { // get meta version rows, _, err := util.ExecRows(sctx, "select distinct version from mysql.stats_meta_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) if err != nil { return nil, false, errors.AddStack(err) } if len(rows) < 1 { logutil.BgLogger().Warn("failed to get records of stats_meta_history", zap.Int64("table-id", physicalID), zap.Uint64("snapshotTS", snapshot)) return nil, false, nil } statsMetaVersion := rows[0].GetInt64(0) // get stats meta rows, _, err = util.ExecRows(sctx, "select modify_count, count from mysql.stats_meta_history where table_id = %? and version = %?", physicalID, statsMetaVersion) if err != nil { return nil, false, errors.AddStack(err) } modifyCount, count := rows[0].GetInt64(0), rows[0].GetInt64(1) // get stats version rows, _, err = util.ExecRows(sctx, "select distinct version from mysql.stats_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) if err != nil { return nil, false, errors.AddStack(err) } if len(rows) < 1 { logutil.BgLogger().Warn("failed to get record of stats_history", zap.Int64("table-id", physicalID), zap.Uint64("snapshotTS", snapshot)) return nil, false, nil } statsVersion := rows[0].GetInt64(0) // get stats rows, _, err = util.ExecRows(sctx, "select stats_data from mysql.stats_history where table_id = %? and version = %? order by seq_no", physicalID, statsVersion) if err != nil { return nil, false, errors.AddStack(err) } blocks := make([][]byte, 0) for _, row := range rows { blocks = append(blocks, row.GetBytes(0)) } jsonTbl, err := BlocksToJSONTable(blocks) if err != nil { return nil, false, errors.AddStack(err) } jsonTbl.Count = count jsonTbl.ModifyCount = modifyCount jsonTbl.IsHistoricalStats = true return jsonTbl, true, nil }
pkg/statistics/handle/storage/json.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.9787524342536926, 0.029286479577422142, 0.00016639442765153944, 0.00017458145157434046, 0.16528426110744476 ]
{ "id": 2, "code_window": [ "func (sc *StatementContext) Reset() {\n", "\t*sc = StatementContext{\n", "\t\tctxID: stmtCtxIDGenerator.Add(1),\n", "\t\ttypeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc),\n", "\t}\n", "}\n", "\n", "// CtxID returns the context id of the statement\n", "func (sc *StatementContext) CtxID() uint64 {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 453 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "stmtctx", srcs = ["stmtctx.go"], importpath = "github.com/pingcap/tidb/pkg/sessionctx/stmtctx", visibility = ["//visibility:public"], deps = [ "//pkg/domain/resourcegroup", "//pkg/errctx", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/types", "//pkg/util/disk", "//pkg/util/execdetails", "//pkg/util/intest", "//pkg/util/linter/constructor", "//pkg/util/memory", "//pkg/util/nocopy", "//pkg/util/resourcegrouptag", "//pkg/util/topsql/stmtstats", "//pkg/util/tracing", "@com_github_pingcap_errors//:errors", "@com_github_tikv_client_go_v2//tikvrpc", "@com_github_tikv_client_go_v2//util", "@org_golang_x_exp//maps", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], ) go_test( name = "stmtctx_test", timeout = "short", srcs = [ "main_test.go", "stmtctx_test.go", ], embed = [":stmtctx"], flaky = True, shard_count = 11, deps = [ "//pkg/kv", "//pkg/sessionctx/variable", "//pkg/testkit", "//pkg/testkit/testsetup", "//pkg/types", "//pkg/util/execdetails", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//util", "@org_uber_go_goleak//:goleak", ], )
pkg/sessionctx/stmtctx/BUILD.bazel
1
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.0002256937586935237, 0.00018205858941655606, 0.00016872771084308624, 0.00017486183787696064, 0.00001968816650332883 ]
{ "id": 2, "code_window": [ "func (sc *StatementContext) Reset() {\n", "\t*sc = StatementContext{\n", "\t\tctxID: stmtCtxIDGenerator.Add(1),\n", "\t\ttypeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc),\n", "\t}\n", "}\n", "\n", "// CtxID returns the context id of the statement\n", "func (sc *StatementContext) CtxID() uint64 {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 453 }
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggfuncs import ( "encoding/binary" "math" "unsafe" "github.com/dgryski/go-farm" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/set" "github.com/pingcap/tidb/pkg/util/stringutil" ) const ( // DefPartialResult4CountDistinctIntSize is the size of partialResult4CountDistinctInt DefPartialResult4CountDistinctIntSize = int64(unsafe.Sizeof(partialResult4CountDistinctInt{})) // DefPartialResult4CountDistinctRealSize is the size of partialResult4CountDistinctReal DefPartialResult4CountDistinctRealSize = int64(unsafe.Sizeof(partialResult4CountDistinctReal{})) // DefPartialResult4CountDistinctDecimalSize is the size of partialResult4CountDistinctDecimal DefPartialResult4CountDistinctDecimalSize = int64(unsafe.Sizeof(partialResult4CountDistinctDecimal{})) // DefPartialResult4CountDistinctDurationSize is the size of partialResult4CountDistinctDuration DefPartialResult4CountDistinctDurationSize = int64(unsafe.Sizeof(partialResult4CountDistinctDuration{})) // DefPartialResult4CountDistinctStringSize is the size of partialResult4CountDistinctString DefPartialResult4CountDistinctStringSize = int64(unsafe.Sizeof(partialResult4CountDistinctString{})) // DefPartialResult4CountWithDistinctSize is the size of partialResult4CountWithDistinct DefPartialResult4CountWithDistinctSize = int64(unsafe.Sizeof(partialResult4CountWithDistinct{})) // DefPartialResult4ApproxCountDistinctSize is the size of partialResult4ApproxCountDistinct DefPartialResult4ApproxCountDistinctSize = int64(unsafe.Sizeof(partialResult4ApproxCountDistinct{})) ) type partialResult4CountDistinctInt struct { valSet set.Int64SetWithMemoryUsage } type countOriginalWithDistinct4Int struct { baseCount } func (*countOriginalWithDistinct4Int) AllocPartialResult() (pr PartialResult, memDelta int64) { valSet, setSize := set.NewInt64SetWithMemoryUsage() return PartialResult(&partialResult4CountDistinctInt{ valSet: valSet, }), DefPartialResult4CountDistinctIntSize + setSize } func (*countOriginalWithDistinct4Int) ResetPartialResult(pr PartialResult) { p := (*partialResult4CountDistinctInt)(pr) p.valSet, _ = set.NewInt64SetWithMemoryUsage() } func (e *countOriginalWithDistinct4Int) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4CountDistinctInt)(pr) chk.AppendInt64(e.ordinal, int64(p.valSet.Count())) return nil } func (e *countOriginalWithDistinct4Int) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4CountDistinctInt)(pr) for _, row := range rowsInGroup { input, isNull, err := e.args[0].EvalInt(sctx, row) if err != nil { return memDelta, err } if isNull { continue } if p.valSet.Exist(input) { continue } memDelta += p.valSet.Insert(input) } return memDelta, nil } type partialResult4CountDistinctReal struct { valSet set.Float64SetWithMemoryUsage } type countOriginalWithDistinct4Real struct { baseCount } func (*countOriginalWithDistinct4Real) AllocPartialResult() (pr PartialResult, memDelta int64) { valSet, setSize := set.NewFloat64SetWithMemoryUsage() return PartialResult(&partialResult4CountDistinctReal{ valSet: valSet, }), DefPartialResult4CountDistinctRealSize + setSize } func (*countOriginalWithDistinct4Real) ResetPartialResult(pr PartialResult) { p := (*partialResult4CountDistinctReal)(pr) p.valSet, _ = set.NewFloat64SetWithMemoryUsage() } func (e *countOriginalWithDistinct4Real) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4CountDistinctReal)(pr) chk.AppendInt64(e.ordinal, int64(p.valSet.Count())) return nil } func (e *countOriginalWithDistinct4Real) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4CountDistinctReal)(pr) for _, row := range rowsInGroup { input, isNull, err := e.args[0].EvalReal(sctx, row) if err != nil { return memDelta, err } if isNull { continue } if p.valSet.Exist(input) { continue } memDelta += p.valSet.Insert(input) } return memDelta, nil } type partialResult4CountDistinctDecimal struct { valSet set.StringSetWithMemoryUsage } type countOriginalWithDistinct4Decimal struct { baseCount } func (*countOriginalWithDistinct4Decimal) AllocPartialResult() (pr PartialResult, memDelta int64) { valSet, setSize := set.NewStringSetWithMemoryUsage() return PartialResult(&partialResult4CountDistinctDecimal{ valSet: valSet, }), DefPartialResult4CountDistinctDecimalSize + setSize } func (*countOriginalWithDistinct4Decimal) ResetPartialResult(pr PartialResult) { p := (*partialResult4CountDistinctDecimal)(pr) p.valSet, _ = set.NewStringSetWithMemoryUsage() } func (e *countOriginalWithDistinct4Decimal) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4CountDistinctDecimal)(pr) chk.AppendInt64(e.ordinal, int64(p.valSet.Count())) return nil } func (e *countOriginalWithDistinct4Decimal) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4CountDistinctDecimal)(pr) for _, row := range rowsInGroup { input, isNull, err := e.args[0].EvalDecimal(sctx, row) if err != nil { return memDelta, err } if isNull { continue } hash, err := input.ToHashKey() if err != nil { return memDelta, err } decStr := string(hack.String(hash)) if p.valSet.Exist(decStr) { continue } memDelta += p.valSet.Insert(decStr) memDelta += int64(len(decStr)) } return memDelta, nil } type partialResult4CountDistinctDuration struct { valSet set.Int64SetWithMemoryUsage } type countOriginalWithDistinct4Duration struct { baseCount } func (*countOriginalWithDistinct4Duration) AllocPartialResult() (pr PartialResult, memDelta int64) { valSet, setSize := set.NewInt64SetWithMemoryUsage() return PartialResult(&partialResult4CountDistinctDuration{ valSet: valSet, }), DefPartialResult4CountDistinctDurationSize + setSize } func (*countOriginalWithDistinct4Duration) ResetPartialResult(pr PartialResult) { p := (*partialResult4CountDistinctDuration)(pr) p.valSet, _ = set.NewInt64SetWithMemoryUsage() } func (e *countOriginalWithDistinct4Duration) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4CountDistinctDuration)(pr) chk.AppendInt64(e.ordinal, int64(p.valSet.Count())) return nil } func (e *countOriginalWithDistinct4Duration) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4CountDistinctDuration)(pr) for _, row := range rowsInGroup { input, isNull, err := e.args[0].EvalDuration(sctx, row) if err != nil { return memDelta, err } if isNull { continue } if p.valSet.Exist(int64(input.Duration)) { continue } memDelta += p.valSet.Insert(int64(input.Duration)) } return memDelta, nil } type partialResult4CountDistinctString struct { valSet set.StringSetWithMemoryUsage } type countOriginalWithDistinct4String struct { baseCount } func (*countOriginalWithDistinct4String) AllocPartialResult() (pr PartialResult, memDelta int64) { valSet, setSize := set.NewStringSetWithMemoryUsage() return PartialResult(&partialResult4CountDistinctString{ valSet: valSet, }), DefPartialResult4CountDistinctStringSize + setSize } func (*countOriginalWithDistinct4String) ResetPartialResult(pr PartialResult) { p := (*partialResult4CountDistinctString)(pr) p.valSet, _ = set.NewStringSetWithMemoryUsage() } func (e *countOriginalWithDistinct4String) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4CountDistinctString)(pr) chk.AppendInt64(e.ordinal, int64(p.valSet.Count())) return nil } func (e *countOriginalWithDistinct4String) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4CountDistinctString)(pr) collator := collate.GetCollator(e.args[0].GetType().GetCollate()) for _, row := range rowsInGroup { input, isNull, err := e.args[0].EvalString(sctx, row) if err != nil { return memDelta, err } if isNull { continue } input = string(collator.Key(input)) if p.valSet.Exist(input) { continue } input = stringutil.Copy(input) memDelta += p.valSet.Insert(input) memDelta += int64(len(input)) } return memDelta, nil } type countOriginalWithDistinct struct { baseCount } type partialResult4CountWithDistinct struct { valSet set.StringSetWithMemoryUsage } func (*countOriginalWithDistinct) AllocPartialResult() (pr PartialResult, memDelta int64) { valSet, setSize := set.NewStringSetWithMemoryUsage() return PartialResult(&partialResult4CountWithDistinct{ valSet: valSet, }), DefPartialResult4CountWithDistinctSize + setSize } func (*countOriginalWithDistinct) ResetPartialResult(pr PartialResult) { p := (*partialResult4CountWithDistinct)(pr) p.valSet, _ = set.NewStringSetWithMemoryUsage() } func (e *countOriginalWithDistinct) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4CountWithDistinct)(pr) chk.AppendInt64(e.ordinal, int64(p.valSet.Count())) return nil } func (e *countOriginalWithDistinct) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4CountWithDistinct)(pr) encodedBytes := make([]byte, 0) collators := make([]collate.Collator, 0, len(e.args)) for _, arg := range e.args { collators = append(collators, collate.GetCollator(arg.GetType().GetCollate())) } // decimal struct is the biggest type we will use. buf := make([]byte, types.MyDecimalStructSize) for _, row := range rowsInGroup { var err error var hasNull, isNull bool encodedBytes = encodedBytes[:0] for i := 0; i < len(e.args) && !hasNull; i++ { encodedBytes, isNull, err = evalAndEncode(sctx, e.args[i], collators[i], row, buf, encodedBytes) if err != nil { return memDelta, err } if isNull { hasNull = true break } } encodedString := string(encodedBytes) if hasNull || p.valSet.Exist(encodedString) { continue } memDelta += p.valSet.Insert(encodedString) memDelta += int64(len(encodedString)) } return memDelta, nil } // evalAndEncode eval one row with an expression and encode value to bytes. func evalAndEncode( sctx sessionctx.Context, arg expression.Expression, collator collate.Collator, row chunk.Row, buf, encodedBytes []byte, ) (_ []byte, isNull bool, err error) { switch tp := arg.GetType().EvalType(); tp { case types.ETInt: var val int64 val, isNull, err = arg.EvalInt(sctx, row) if err != nil || isNull { break } encodedBytes = appendInt64(encodedBytes, buf, val) case types.ETReal: var val float64 val, isNull, err = arg.EvalReal(sctx, row) if err != nil || isNull { break } encodedBytes = appendFloat64(encodedBytes, buf, val) case types.ETDecimal: var val *types.MyDecimal val, isNull, err = arg.EvalDecimal(sctx, row) if err != nil || isNull { break } encodedBytes, err = appendDecimal(encodedBytes, val) case types.ETTimestamp, types.ETDatetime: var val types.Time val, isNull, err = arg.EvalTime(sctx, row) if err != nil || isNull { break } encodedBytes = appendTime(encodedBytes, buf, val) case types.ETDuration: var val types.Duration val, isNull, err = arg.EvalDuration(sctx, row) if err != nil || isNull { break } encodedBytes = appendDuration(encodedBytes, buf, val) case types.ETJson: var val types.BinaryJSON val, isNull, err = arg.EvalJSON(sctx, row) if err != nil || isNull { break } encodedBytes = val.HashValue(encodedBytes) case types.ETString: var val string val, isNull, err = arg.EvalString(sctx, row) if err != nil || isNull { break } encodedBytes = codec.EncodeCompactBytes(encodedBytes, collator.Key(val)) default: return nil, false, errors.Errorf("unsupported column type for encode %d", tp) } return encodedBytes, isNull, err } func appendInt64(encodedBytes, buf []byte, val int64) []byte { *(*int64)(unsafe.Pointer(&buf[0])) = val buf = buf[:8] encodedBytes = append(encodedBytes, buf...) return encodedBytes } func appendFloat64(encodedBytes, buf []byte, val float64) []byte { *(*float64)(unsafe.Pointer(&buf[0])) = val buf = buf[:8] encodedBytes = append(encodedBytes, buf...) return encodedBytes } func appendDecimal(encodedBytes []byte, val *types.MyDecimal) ([]byte, error) { hash, err := val.ToHashKey() encodedBytes = append(encodedBytes, hash...) return encodedBytes, err } // WriteTime writes `t` into `buf`. func WriteTime(buf []byte, t types.Time) { binary.BigEndian.PutUint16(buf, uint16(t.Year())) buf[2] = uint8(t.Month()) buf[3] = uint8(t.Day()) buf[4] = uint8(t.Hour()) buf[5] = uint8(t.Minute()) buf[6] = uint8(t.Second()) binary.BigEndian.PutUint32(buf[8:], uint32(t.Microsecond())) buf[12] = t.Type() buf[13] = uint8(t.Fsp()) buf[7], buf[14], buf[15] = uint8(0), uint8(0), uint8(0) } func appendTime(encodedBytes, buf []byte, val types.Time) []byte { WriteTime(buf, val) buf = buf[:16] encodedBytes = append(encodedBytes, buf...) return encodedBytes } func appendDuration(encodedBytes, buf []byte, val types.Duration) []byte { *(*types.Duration)(unsafe.Pointer(&buf[0])) = val buf = buf[:16] encodedBytes = append(encodedBytes, buf...) return encodedBytes } func intHash64(x uint64) uint64 { x ^= x >> 33 x *= 0xff51afd7ed558ccd x ^= x >> 33 x *= 0xc4ceb9fe1a85ec53 x ^= x >> 33 return x } type baseApproxCountDistinct struct { baseAggFunc } const ( // The maximum degree of buffer size before the values are discarded uniquesHashMaxSizeDegree uint8 = 17 // The maximum number of elements before the values are discarded uniquesHashMaxSize = uint32(1) << (uniquesHashMaxSizeDegree - 1) // Initial buffer size degree uniquesHashSetInitialSizeDegree uint8 = 4 // The number of least significant bits used for thinning. The remaining high-order bits are used to determine the position in the hash table. uniquesHashBitsForSkip = 32 - uniquesHashMaxSizeDegree ) type approxCountDistinctHashValue uint32 // partialResult4ApproxCountDistinct use `BJKST` algorithm to compute approximate result of count distinct. // According to an experimental survey http://www.vldb.org/pvldb/vol11/p499-harmouch.pdf, the error guarantee of BJKST // was even better than the theoretical lower bounds. // For the calculation state, it uses a sample of element hash values with a size up to uniquesHashMaxSize. Compared // with the widely known HyperLogLog algorithm, this algorithm is less effective in terms of accuracy and // memory consumption (even up to proportionality), but it is adaptive. This means that with fairly high accuracy, it // consumes less memory during simultaneous computation of cardinality for a large number of data sets whose cardinality // has power law distribution (i.e. in cases when most of the data sets are small). // This algorithm is also very accurate for data sets with small cardinality and very efficient on CPU. If number of // distinct element is more than 2^32, relative error may be high. type partialResult4ApproxCountDistinct struct { size uint32 // Number of elements. sizeDegree uint8 // The size of the table as a power of 2. skipDegree uint8 // Skip elements not divisible by 2 ^ skipDegree. hasZero bool // The hash table contains an element with a hash value of 0. buf []approxCountDistinctHashValue } // NewPartialResult4ApproxCountDistinct builds a partial result for agg function ApproxCountDistinct. func NewPartialResult4ApproxCountDistinct() *partialResult4ApproxCountDistinct { p := &partialResult4ApproxCountDistinct{} p.reset() return p } func (p *partialResult4ApproxCountDistinct) InsertHash64(x uint64) { // no need to rehash, just cast into uint32 p.insertHash(approxCountDistinctHashValue(x)) } func (p *partialResult4ApproxCountDistinct) MemUsage() int64 { return int64(len(p.buf)) * DefUint32Size } func (p *partialResult4ApproxCountDistinct) alloc(newSizeDegree uint8) { p.size = 0 p.skipDegree = 0 p.hasZero = false p.buf = make([]approxCountDistinctHashValue, uint32(1)<<newSizeDegree) p.sizeDegree = newSizeDegree } func (p *partialResult4ApproxCountDistinct) reset() { p.alloc(uniquesHashSetInitialSizeDegree) } func (p *partialResult4ApproxCountDistinct) bufSize() uint32 { return uint32(1) << p.sizeDegree } func (p *partialResult4ApproxCountDistinct) mask() uint32 { return p.bufSize() - 1 } func (p *partialResult4ApproxCountDistinct) place(x approxCountDistinctHashValue) uint32 { return uint32(x>>uniquesHashBitsForSkip) & p.mask() } // Increase the size of the buffer 2 times or up to new size degree. func (p *partialResult4ApproxCountDistinct) resize(newSizeDegree uint8) { oldSize := p.bufSize() oldBuf := p.buf if 0 == newSizeDegree { newSizeDegree = p.sizeDegree + 1 } p.buf = make([]approxCountDistinctHashValue, uint32(1)<<newSizeDegree) p.sizeDegree = newSizeDegree // Move some items to new locations. for i := uint32(0); i < oldSize; i++ { x := oldBuf[i] if x != 0 { p.reinsertImpl(x) } } } func (p *partialResult4ApproxCountDistinct) readAndMerge(rb []byte) error { rhsSkipDegree := rb[0] rb = rb[1:] if rhsSkipDegree > p.skipDegree { p.skipDegree = rhsSkipDegree p.rehash() } rb, rhsSize, err := codec.DecodeUvarint(rb) if err != nil { return err } if rhsSize > uint64(uniquesHashMaxSize) { return errors.New("Cannot read partialResult4ApproxCountDistinct: too large size degree") } if p.bufSize() < uint32(rhsSize) { newSizeDegree := max(uniquesHashSetInitialSizeDegree, uint8(math.Log2(float64(rhsSize-1)))+2) p.resize(newSizeDegree) } for i := uint32(0); i < uint32(rhsSize); i++ { x := *(*approxCountDistinctHashValue)(unsafe.Pointer(&rb[0])) rb = rb[4:] p.insertHash(x) } return err } // Correct system errors due to collisions during hashing in uint32. func (p *partialResult4ApproxCountDistinct) fixedSize() uint64 { if 0 == p.skipDegree { return uint64(p.size) } res := uint64(p.size) * (uint64(1) << p.skipDegree) // Pseudo-random remainder. res += intHash64(uint64(p.size)) & ((uint64(1) << p.skipDegree) - 1) // When different elements randomly scattered across 2^32 buckets, filled buckets with average of `res` obtained. p32 := uint64(1) << 32 fixedRes := math.Round(float64(p32) * (math.Log(float64(p32)) - math.Log(float64(p32-res)))) return uint64(fixedRes) } func (p *partialResult4ApproxCountDistinct) insertHash(hashValue approxCountDistinctHashValue) { if !p.good(hashValue) { return } p.insertImpl(hashValue) p.shrinkIfNeed() } // The value is divided by 2 ^ skip_degree func (p *partialResult4ApproxCountDistinct) good(hash approxCountDistinctHashValue) bool { return hash == ((hash >> p.skipDegree) << p.skipDegree) } // Insert a value func (p *partialResult4ApproxCountDistinct) insertImpl(x approxCountDistinctHashValue) { if x == 0 { if !p.hasZero { p.size++ } p.hasZero = true return } placeValue := p.place(x) for p.buf[placeValue] != 0 && p.buf[placeValue] != x { placeValue++ placeValue &= p.mask() } if p.buf[placeValue] == x { return } p.buf[placeValue] = x p.size++ } // If the hash table is full enough, then do resize. // If there are too many items, then throw half the pieces until they are small enough. func (p *partialResult4ApproxCountDistinct) shrinkIfNeed() { if p.size > p.maxFill() { if p.size > uniquesHashMaxSize { for p.size > uniquesHashMaxSize { p.skipDegree++ p.rehash() } } else { p.resize(0) } } } func (p *partialResult4ApproxCountDistinct) maxFill() uint32 { return uint32(1) << (p.sizeDegree - 1) } // Delete all values whose hashes do not divide by 2 ^ skip_degree func (p *partialResult4ApproxCountDistinct) rehash() { for i := uint32(0); i < p.bufSize(); i++ { if p.buf[i] != 0 && !p.good(p.buf[i]) { p.buf[i] = 0 p.size-- } } for i := uint32(0); i < p.bufSize(); i++ { if p.buf[i] != 0 && i != p.place(p.buf[i]) { x := p.buf[i] p.buf[i] = 0 p.reinsertImpl(x) } } } // Insert a value into the new buffer that was in the old buffer. // Used when increasing the size of the buffer, as well as when reading from a file. func (p *partialResult4ApproxCountDistinct) reinsertImpl(x approxCountDistinctHashValue) { placeValue := p.place(x) for p.buf[placeValue] != 0 { placeValue++ placeValue &= p.mask() } p.buf[placeValue] = x } func (p *partialResult4ApproxCountDistinct) merge(tar *partialResult4ApproxCountDistinct) { if tar.skipDegree > p.skipDegree { p.skipDegree = tar.skipDegree p.rehash() } if !p.hasZero && tar.hasZero { p.hasZero = true p.size++ p.shrinkIfNeed() } for i := uint32(0); i < tar.bufSize(); i++ { if tar.buf[i] != 0 && p.good(tar.buf[i]) { p.insertImpl(tar.buf[i]) p.shrinkIfNeed() } } } func (p *partialResult4ApproxCountDistinct) Serialize() []byte { var buf [4]byte res := make([]byte, 0, 1+binary.MaxVarintLen64+p.size*4) res = append(res, p.skipDegree) res = codec.EncodeUvarint(res, uint64(p.size)) if p.hasZero { binary.LittleEndian.PutUint32(buf[:], 0) res = append(res, buf[:]...) } for i := uint32(0); i < p.bufSize(); i++ { if p.buf[i] != 0 { binary.LittleEndian.PutUint32(buf[:], uint32(p.buf[i])) res = append(res, buf[:]...) } } return res } func (e *baseApproxCountDistinct) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4ApproxCountDistinct)(pr) chk.AppendInt64(e.ordinal, int64(p.fixedSize())) return nil } func (*baseApproxCountDistinct) AllocPartialResult() (pr PartialResult, memDelta int64) { return (PartialResult)(NewPartialResult4ApproxCountDistinct()), DefPartialResult4ApproxCountDistinctSize } func (*baseApproxCountDistinct) ResetPartialResult(pr PartialResult) { p := (*partialResult4ApproxCountDistinct)(pr) p.reset() } func (*baseApproxCountDistinct) MergePartialResult(_ sessionctx.Context, src, dst PartialResult) (memDelta int64, err error) { p1, p2 := (*partialResult4ApproxCountDistinct)(src), (*partialResult4ApproxCountDistinct)(dst) p2.merge(p1) return 0, nil } type approxCountDistinctOriginal struct { baseApproxCountDistinct } func (e *approxCountDistinctOriginal) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4ApproxCountDistinct)(pr) encodedBytes := make([]byte, 0) // decimal struct is the biggest type we will use. buf := make([]byte, types.MyDecimalStructSize) collators := make([]collate.Collator, 0, len(e.args)) for _, arg := range e.args { collators = append(collators, collate.GetCollator(arg.GetType().GetCollate())) } for _, row := range rowsInGroup { var err error var hasNull, isNull bool encodedBytes = encodedBytes[:0] for i := 0; i < len(e.args) && !hasNull; i++ { encodedBytes, isNull, err = evalAndEncode(sctx, e.args[i], collators[i], row, buf, encodedBytes) if err != nil { return memDelta, err } if isNull { hasNull = true break } } if hasNull { continue } oldMemUsage := p.MemUsage() x := farm.Hash64(encodedBytes) p.InsertHash64(x) newMemUsage := p.MemUsage() memDelta += newMemUsage - oldMemUsage } return memDelta, nil } type approxCountDistinctPartial1 struct { approxCountDistinctOriginal } func (e *approxCountDistinctPartial1) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4ApproxCountDistinct)(pr) chk.AppendBytes(e.ordinal, p.Serialize()) return nil } type approxCountDistinctPartial2 struct { approxCountDistinctPartial1 } func (e *approxCountDistinctPartial2) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4ApproxCountDistinct)(pr) for _, row := range rowsInGroup { input, isNull, err := e.args[0].EvalString(sctx, row) if err != nil { return memDelta, err } if isNull { continue } oldMemUsage := p.MemUsage() err = p.readAndMerge(hack.Slice(input)) if err != nil { return memDelta, err } newMemUsage := p.MemUsage() memDelta += newMemUsage - oldMemUsage } return memDelta, nil } type approxCountDistinctFinal struct { approxCountDistinctPartial2 } func (e *approxCountDistinctFinal) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { return e.baseApproxCountDistinct.AppendFinalResult2Chunk(sctx, pr, chk) }
pkg/executor/aggfuncs/func_count_distinct.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.9325301051139832, 0.02178175561130047, 0.0001599654642632231, 0.0001754281111061573, 0.13788510859012604 ]
{ "id": 2, "code_window": [ "func (sc *StatementContext) Reset() {\n", "\t*sc = StatementContext{\n", "\t\tctxID: stmtCtxIDGenerator.Add(1),\n", "\t\ttypeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc),\n", "\t}\n", "}\n", "\n", "// CtxID returns the context id of the statement\n", "func (sc *StatementContext) CtxID() uint64 {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 453 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package variable import ( "testing" "github.com/stretchr/testify/require" ) func TestRemovedOpt(t *testing.T) { require.NoError(t, CheckSysVarIsRemoved(TiDBEnable1PC)) require.False(t, IsRemovedSysVar(TiDBEnable1PC)) require.Error(t, CheckSysVarIsRemoved(tiDBEnableAlterPlacement)) require.True(t, IsRemovedSysVar(tiDBEnableAlterPlacement)) }
pkg/sessionctx/variable/removed_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.0001790940441424027, 0.0001750092051224783, 0.00017009754083119333, 0.0001758360886014998, 0.0000037190541206655325 ]
{ "id": 2, "code_window": [ "func (sc *StatementContext) Reset() {\n", "\t*sc = StatementContext{\n", "\t\tctxID: stmtCtxIDGenerator.Add(1),\n", "\t\ttypeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc),\n", "\t}\n", "}\n", "\n", "// CtxID returns the context id of the statement\n", "func (sc *StatementContext) CtxID() uint64 {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 453 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package charset import ( "bytes" "golang.org/x/text/encoding" ) // EncodingBinImpl is the instance of encodingBin. var EncodingBinImpl = &encodingBin{encodingBase{enc: encoding.Nop}} func init() { EncodingBinImpl.self = EncodingBinImpl } // encodingBin is the binary encoding. type encodingBin struct { encodingBase } // Name implements Encoding interface. func (*encodingBin) Name() string { return CharsetBin } // Tp implements Encoding interface. func (*encodingBin) Tp() EncodingTp { return EncodingTpBin } // Peek implements Encoding interface. func (*encodingBin) Peek(src []byte) []byte { if len(src) == 0 { return src } return src[:1] } // IsValid implements Encoding interface. func (*encodingBin) IsValid(_ []byte) bool { return true } // Foreach implements Encoding interface. func (*encodingBin) Foreach(src []byte, _ Op, fn func(from, to []byte, ok bool) bool) { for i := 0; i < len(src); i++ { if !fn(src[i:i+1], src[i:i+1], true) { return } } } func (*encodingBin) Transform(_ *bytes.Buffer, src []byte, _ Op) ([]byte, error) { return src, nil }
pkg/parser/charset/encoding_bin.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.000255223159911111, 0.00019134335161652416, 0.00016294400847982615, 0.0001767245848895982, 0.00003403122173040174 ]
{ "id": 3, "code_window": [ "// TypeCtx returns the type context\n", "func (sc *StatementContext) TypeCtx() types.Context {\n", "\treturn sc.typeCtx\n", "}\n", "\n", "// ErrCtx returns the error context\n", "// TODO: add a cache to the `ErrCtx` if needed, though it's not a big burden to generate `ErrCtx` everytime.\n", "func (sc *StatementContext) ErrCtx() errctx.Context {\n", "\tctx := errctx.NewContext(sc)\n", "\n", "\tif sc.TypeFlags().IgnoreTruncateErr() {\n", "\t\tctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelIgnore)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "func (sc *StatementContext) initErrCtx() {\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "replace", "edit_start_line_idx": 481 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stmtctx import ( "bytes" "encoding/json" "fmt" "io" "math" "slices" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/domain/resourcegroup" "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/disk" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/linter/constructor" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/nocopy" "github.com/pingcap/tidb/pkg/util/resourcegrouptag" "github.com/pingcap/tidb/pkg/util/topsql/stmtstats" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/tikv/client-go/v2/tikvrpc" "github.com/tikv/client-go/v2/util" atomic2 "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/exp/maps" ) const ( // WarnLevelError represents level "Error" for 'SHOW WARNINGS' syntax. WarnLevelError = "Error" // WarnLevelWarning represents level "Warning" for 'SHOW WARNINGS' syntax. WarnLevelWarning = "Warning" // WarnLevelNote represents level "Note" for 'SHOW WARNINGS' syntax. WarnLevelNote = "Note" ) var taskIDAlloc uint64 // AllocateTaskID allocates a new unique ID for a statement execution func AllocateTaskID() uint64 { return atomic.AddUint64(&taskIDAlloc, 1) } // SQLWarn relates a sql warning and it's level. type SQLWarn struct { Level string Err error } type jsonSQLWarn struct { Level string `json:"level"` SQLErr *terror.Error `json:"err,omitempty"` Msg string `json:"msg,omitempty"` } // MarshalJSON implements the Marshaler.MarshalJSON interface. func (warn *SQLWarn) MarshalJSON() ([]byte, error) { w := &jsonSQLWarn{ Level: warn.Level, } e := errors.Cause(warn.Err) switch x := e.(type) { case *terror.Error: // Omit outter errors because only the most inner error matters. w.SQLErr = x default: w.Msg = e.Error() } return json.Marshal(w) } // UnmarshalJSON implements the Unmarshaler.UnmarshalJSON interface. func (warn *SQLWarn) UnmarshalJSON(data []byte) error { var w jsonSQLWarn if err := json.Unmarshal(data, &w); err != nil { return err } warn.Level = w.Level if w.SQLErr != nil { warn.Err = w.SQLErr } else { warn.Err = errors.New(w.Msg) } return nil } // ReferenceCount indicates the reference count of StmtCtx. type ReferenceCount int32 const ( // ReferenceCountIsFrozen indicates the current StmtCtx is resetting, it'll refuse all the access from other sessions. ReferenceCountIsFrozen int32 = -1 // ReferenceCountNoReference indicates the current StmtCtx is not accessed by other sessions. ReferenceCountNoReference int32 = 0 ) // TryIncrease tries to increase the reference count. // There is a small chance that TryIncrease returns true while TryFreeze and // UnFreeze are invoked successfully during the execution of TryIncrease. func (rf *ReferenceCount) TryIncrease() bool { refCnt := atomic.LoadInt32((*int32)(rf)) for ; refCnt != ReferenceCountIsFrozen && !atomic.CompareAndSwapInt32((*int32)(rf), refCnt, refCnt+1); refCnt = atomic.LoadInt32((*int32)(rf)) { } return refCnt != ReferenceCountIsFrozen } // Decrease decreases the reference count. func (rf *ReferenceCount) Decrease() { for refCnt := atomic.LoadInt32((*int32)(rf)); !atomic.CompareAndSwapInt32((*int32)(rf), refCnt, refCnt-1); refCnt = atomic.LoadInt32((*int32)(rf)) { } } // TryFreeze tries to freeze the StmtCtx to frozen before resetting the old StmtCtx. func (rf *ReferenceCount) TryFreeze() bool { return atomic.LoadInt32((*int32)(rf)) == ReferenceCountNoReference && atomic.CompareAndSwapInt32((*int32)(rf), ReferenceCountNoReference, ReferenceCountIsFrozen) } // UnFreeze unfreeze the frozen StmtCtx thus the other session can access this StmtCtx. func (rf *ReferenceCount) UnFreeze() { atomic.StoreInt32((*int32)(rf), ReferenceCountNoReference) } var stmtCtxIDGenerator atomic.Uint64 // StatementContext contains variables for a statement. // It should be reset before executing a statement. type StatementContext struct { // NoCopy indicates that this struct cannot be copied because // copying this object will make the copied TypeCtx field to refer a wrong `AppendWarnings` func. _ nocopy.NoCopy _ constructor.Constructor `ctor:"NewStmtCtx,NewStmtCtxWithTimeZone,Reset"` ctxID uint64 // typeCtx is used to indicate how to make the type conversation. typeCtx types.Context // errCtx is used to indicate how to handle the errors errCtx errctx.Context // Set the following variables before execution StmtHints // IsDDLJobInQueue is used to mark whether the DDL job is put into the queue. // If IsDDLJobInQueue is true, it means the DDL job is in the queue of storage, and it can be handled by the DDL worker. IsDDLJobInQueue bool DDLJobID int64 InInsertStmt bool InUpdateStmt bool InDeleteStmt bool InSelectStmt bool InLoadDataStmt bool InExplainStmt bool InExplainAnalyzeStmt bool ExplainFormat string InCreateOrAlterStmt bool InSetSessionStatesStmt bool InPreparedPlanBuilding bool DupKeyAsWarning bool BadNullAsWarning bool DividedByZeroAsWarning bool ErrAutoincReadFailedAsWarning bool InShowWarning bool UseCache bool CacheType PlanCacheType BatchCheck bool InNullRejectCheck bool IgnoreNoPartition bool IgnoreExplainIDSuffix bool MultiSchemaInfo *model.MultiSchemaInfo // If the select statement was like 'select * from t as of timestamp ...' or in a stale read transaction // or is affected by the tidb_read_staleness session variable, then the statement will be makred as isStaleness // in stmtCtx IsStaleness bool InRestrictedSQL bool ViewDepth int32 // mu struct holds variables that change during execution. mu struct { sync.Mutex affectedRows uint64 foundRows uint64 /* following variables are ported from 'COPY_INFO' struct of MySQL server source, they are used to count rows for INSERT/REPLACE/UPDATE queries: If a row is inserted then the copied variable is incremented. If a row is updated by the INSERT ... ON DUPLICATE KEY UPDATE and the new data differs from the old one then the copied and the updated variables are incremented. The touched variable is incremented if a row was touched by the update part of the INSERT ... ON DUPLICATE KEY UPDATE no matter whether the row was actually changed or not. see https://github.com/mysql/mysql-server/blob/d2029238d6d9f648077664e4cdd611e231a6dc14/sql/sql_data_change.h#L60 for more details */ records uint64 deleted uint64 updated uint64 copied uint64 touched uint64 message string warnings []SQLWarn // extraWarnings record the extra warnings and are only used by the slow log only now. // If a warning is expected to be output only under some conditions (like in EXPLAIN or EXPLAIN VERBOSE) but it's // not under such conditions now, it is considered as an extra warning. // extraWarnings would not be printed through SHOW WARNINGS, but we want to always output them through the slow // log to help diagnostics, so we store them here separately. extraWarnings []SQLWarn execDetails execdetails.ExecDetails detailsSummary execdetails.P90Summary } // PrevAffectedRows is the affected-rows value(DDL is 0, DML is the number of affected rows). PrevAffectedRows int64 // PrevLastInsertID is the last insert ID of previous statement. PrevLastInsertID uint64 // LastInsertID is the auto-generated ID in the current statement. LastInsertID uint64 // InsertID is the given insert ID of an auto_increment column. InsertID uint64 BaseRowID int64 MaxRowID int64 // Copied from SessionVars.TimeZone. Priority mysql.PriorityEnum NotFillCache bool MemTracker *memory.Tracker DiskTracker *disk.Tracker // per statement resource group name // hint /* +ResourceGroup(name) */ can change the statement group name ResourceGroupName string RunawayChecker *resourcegroup.RunawayChecker IsTiFlash atomic2.Bool RuntimeStatsColl *execdetails.RuntimeStatsColl TableIDs []int64 IndexNames []string StmtType string OriginalSQL string digestMemo struct { sync.Once normalized string digest *parser.Digest } // BindSQL used to construct the key for plan cache. It records the binding used by the stmt. // If the binding is not used by the stmt, the value is empty BindSQL string // The several fields below are mainly for some diagnostic features, like stmt summary and slow query. // We cache the values here to avoid calculating them multiple times. // Note: // Avoid accessing these fields directly, use their Setter/Getter methods instead. // Other fields should be the zero value or be consistent with the plan field. // TODO: more clearly distinguish between the value is empty and the value has not been set planNormalized string planDigest *parser.Digest encodedPlan string planHint string planHintSet bool binaryPlan string // To avoid cycle import, we use interface{} for the following two fields. // flatPlan should be a *plannercore.FlatPhysicalPlan if it's not nil flatPlan interface{} // plan should be a plannercore.Plan if it's not nil plan interface{} Tables []TableEntry PointExec bool // for point update cached execution, Constant expression need to set "paramMarker" lockWaitStartTime int64 // LockWaitStartTime stores the pessimistic lock wait start time PessimisticLockWaited int32 LockKeysDuration int64 LockKeysCount int32 LockTableIDs map[int64]struct{} // table IDs need to be locked, empty for lock all tables TblInfo2UnionScan map[*model.TableInfo]bool TaskID uint64 // unique ID for an execution of a statement TaskMapBakTS uint64 // counter for // stmtCache is used to store some statement-related values. // add mutex to protect stmtCache concurrent access // https://github.com/pingcap/tidb/issues/36159 stmtCache struct { mu sync.Mutex data map[StmtCacheKey]interface{} } // Map to store all CTE storages of current SQL. // Will clean up at the end of the execution. CTEStorageMap interface{} SetVarHintRestore map[string]string // If the statement read from table cache, this flag is set. ReadFromTableCache bool // cache is used to reduce object allocation. cache struct { execdetails.RuntimeStatsColl MemTracker memory.Tracker DiskTracker disk.Tracker LogOnExceed [2]memory.LogOnExceed } // InVerboseExplain indicates the statement is "explain format='verbose' ...". InVerboseExplain bool // EnableOptimizeTrace indicates whether enable optimizer trace by 'trace plan statement' EnableOptimizeTrace bool // OptimizeTracer indicates the tracer for optimize OptimizeTracer *tracing.OptimizeTracer // EnableOptimizerCETrace indicate if cardinality estimation internal process needs to be traced. // CE Trace is currently a submodule of the optimizer trace and is controlled by a separated option. EnableOptimizerCETrace bool OptimizerCETrace []*tracing.CETraceRecord EnableOptimizerDebugTrace bool OptimizerDebugTrace interface{} // WaitLockLeaseTime is the duration of cached table read lease expiration time. WaitLockLeaseTime time.Duration // KvExecCounter is created from SessionVars.StmtStats to count the number of SQL // executions of the kv layer during the current execution of the statement. // Its life cycle is limited to this execution, and a new KvExecCounter is // always created during each statement execution. KvExecCounter *stmtstats.KvExecCounter // WeakConsistency is true when read consistency is weak and in a read statement and not in a transaction. WeakConsistency bool StatsLoad struct { // Timeout to wait for sync-load Timeout time.Duration // NeededItems stores the columns/indices whose stats are needed for planner. NeededItems []model.TableItemID // ResultCh to receive stats loading results ResultCh chan StatsLoadResult // LoadStartTime is to record the load start time to calculate latency LoadStartTime time.Time } // SysdateIsNow indicates whether sysdate() is an alias of now() in this statement SysdateIsNow bool // RCCheckTS indicates the current read-consistency read select statement will use `RCCheckTS` path. RCCheckTS bool // IsSQLRegistered uses to indicate whether the SQL has been registered for TopSQL. IsSQLRegistered atomic2.Bool // IsSQLAndPlanRegistered uses to indicate whether the SQL and plan has been registered for TopSQL. IsSQLAndPlanRegistered atomic2.Bool // IsReadOnly uses to indicate whether the SQL is read-only. IsReadOnly bool // usedStatsInfo records version of stats of each table used in the query. // It's a map of table physical id -> *UsedStatsInfoForTable usedStatsInfo map[int64]*UsedStatsInfoForTable // IsSyncStatsFailed indicates whether any failure happened during sync stats IsSyncStatsFailed bool // UseDynamicPruneMode indicates whether use UseDynamicPruneMode in query stmt UseDynamicPruneMode bool // ColRefFromPlan mark the column ref used by assignment in update statement. ColRefFromUpdatePlan []int64 // RangeFallback indicates that building complete ranges exceeds the memory limit so it falls back to less accurate ranges such as full range. RangeFallback bool // IsExplainAnalyzeDML is true if the statement is "explain analyze DML executors", before responding the explain // results to the client, the transaction should be committed first. See issue #37373 for more details. IsExplainAnalyzeDML bool // InHandleForeignKeyTrigger indicates currently are handling foreign key trigger. InHandleForeignKeyTrigger bool // ForeignKeyTriggerCtx is the contain information for foreign key cascade execution. ForeignKeyTriggerCtx struct { // The SavepointName is use to do rollback when handle foreign key cascade failed. SavepointName string HasFKCascades bool } // MPPQueryInfo stores some id and timestamp of current MPP query statement. MPPQueryInfo struct { QueryID atomic2.Uint64 QueryTS atomic2.Uint64 AllocatedMPPTaskID atomic2.Int64 AllocatedMPPGatherID atomic2.Uint64 } // TableStats stores the visited runtime table stats by table id during query TableStats map[int64]interface{} // useChunkAlloc indicates whether statement use chunk alloc useChunkAlloc bool // Check if TiFlash read engine is removed due to strict sql mode. TiFlashEngineRemovedDueToStrictSQLMode bool // StaleTSOProvider is used to provide stale timestamp oracle for read-only transactions. StaleTSOProvider struct { sync.Mutex value *uint64 eval func() (uint64, error) } } // NewStmtCtx creates a new statement context func NewStmtCtx() *StatementContext { return NewStmtCtxWithTimeZone(time.UTC) } // NewStmtCtxWithTimeZone creates a new StatementContext with the given timezone func NewStmtCtxWithTimeZone(tz *time.Location) *StatementContext { intest.AssertNotNil(tz) sc := &StatementContext{ ctxID: stmtCtxIDGenerator.Add(1), } sc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc) return sc } // Reset resets a statement context func (sc *StatementContext) Reset() { *sc = StatementContext{ ctxID: stmtCtxIDGenerator.Add(1), typeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc), } } // CtxID returns the context id of the statement func (sc *StatementContext) CtxID() uint64 { return sc.ctxID } // TimeZone returns the timezone of the type context func (sc *StatementContext) TimeZone() *time.Location { intest.AssertNotNil(sc) if sc == nil { return time.UTC } return sc.typeCtx.Location() } // SetTimeZone sets the timezone func (sc *StatementContext) SetTimeZone(tz *time.Location) { intest.AssertNotNil(tz) sc.typeCtx = sc.typeCtx.WithLocation(tz) } // TypeCtx returns the type context func (sc *StatementContext) TypeCtx() types.Context { return sc.typeCtx } // ErrCtx returns the error context // TODO: add a cache to the `ErrCtx` if needed, though it's not a big burden to generate `ErrCtx` everytime. func (sc *StatementContext) ErrCtx() errctx.Context { ctx := errctx.NewContext(sc) if sc.TypeFlags().IgnoreTruncateErr() { ctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelIgnore) } else if sc.TypeFlags().TruncateAsWarning() { ctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelWarn) } return ctx } // TypeFlags returns the type flags func (sc *StatementContext) TypeFlags() types.Flags { return sc.typeCtx.Flags() } // SetTypeFlags sets the type flags func (sc *StatementContext) SetTypeFlags(flags types.Flags) { sc.typeCtx = sc.typeCtx.WithFlags(flags) } // HandleTruncate ignores or returns the error based on the TypeContext inside. // TODO: replace this function with `HandleError`, for `TruncatedError` they should have the same effect. func (sc *StatementContext) HandleTruncate(err error) error { return sc.typeCtx.HandleTruncate(err) } // HandleError handles the error based on `ErrCtx()` func (sc *StatementContext) HandleError(err error) error { intest.AssertNotNil(sc) if sc == nil { return err } errCtx := sc.ErrCtx() return errCtx.HandleError(err) } // HandleErrorWithAlias handles the error based on `ErrCtx()` func (sc *StatementContext) HandleErrorWithAlias(internalErr, err, warnErr error) error { intest.AssertNotNil(sc) if sc == nil { return err } errCtx := sc.ErrCtx() return errCtx.HandleErrorWithAlias(internalErr, err, warnErr) } // StmtHints are SessionVars related sql hints. type StmtHints struct { // Hint Information MemQuotaQuery int64 MaxExecutionTime uint64 ReplicaRead byte AllowInSubqToJoinAndAgg bool NoIndexMergeHint bool StraightJoinOrder bool // EnableCascadesPlanner is use cascades planner for a single query only. EnableCascadesPlanner bool // ForceNthPlan indicates the PlanCounterTp number for finding physical plan. // -1 for disable. ForceNthPlan int64 ResourceGroup string // Hint flags HasAllowInSubqToJoinAndAggHint bool HasMemQuotaHint bool HasReplicaReadHint bool HasMaxExecutionTime bool HasEnableCascadesPlannerHint bool HasResourceGroup bool SetVars map[string]string // the original table hints OriginalTableHints []*ast.TableOptimizerHint } // TaskMapNeedBackUp indicates that whether we need to back up taskMap during physical optimizing. func (sh *StmtHints) TaskMapNeedBackUp() bool { return sh.ForceNthPlan != -1 } // Clone the StmtHints struct and returns the pointer of the new one. func (sh *StmtHints) Clone() *StmtHints { var ( vars map[string]string tableHints []*ast.TableOptimizerHint ) if len(sh.SetVars) > 0 { vars = make(map[string]string, len(sh.SetVars)) for k, v := range sh.SetVars { vars[k] = v } } if len(sh.OriginalTableHints) > 0 { tableHints = make([]*ast.TableOptimizerHint, len(sh.OriginalTableHints)) copy(tableHints, sh.OriginalTableHints) } return &StmtHints{ MemQuotaQuery: sh.MemQuotaQuery, MaxExecutionTime: sh.MaxExecutionTime, ReplicaRead: sh.ReplicaRead, AllowInSubqToJoinAndAgg: sh.AllowInSubqToJoinAndAgg, NoIndexMergeHint: sh.NoIndexMergeHint, StraightJoinOrder: sh.StraightJoinOrder, EnableCascadesPlanner: sh.EnableCascadesPlanner, ForceNthPlan: sh.ForceNthPlan, ResourceGroup: sh.ResourceGroup, HasAllowInSubqToJoinAndAggHint: sh.HasAllowInSubqToJoinAndAggHint, HasMemQuotaHint: sh.HasMemQuotaHint, HasReplicaReadHint: sh.HasReplicaReadHint, HasMaxExecutionTime: sh.HasMaxExecutionTime, HasEnableCascadesPlannerHint: sh.HasEnableCascadesPlannerHint, HasResourceGroup: sh.HasResourceGroup, SetVars: vars, OriginalTableHints: tableHints, } } // StmtCacheKey represents the key type in the StmtCache. type StmtCacheKey int const ( // StmtNowTsCacheKey is a variable for now/current_timestamp calculation/cache of one stmt. StmtNowTsCacheKey StmtCacheKey = iota // StmtSafeTSCacheKey is a variable for safeTS calculation/cache of one stmt. StmtSafeTSCacheKey // StmtExternalTSCacheKey is a variable for externalTS calculation/cache of one stmt. StmtExternalTSCacheKey ) // GetOrStoreStmtCache gets the cached value of the given key if it exists, otherwise stores the value. func (sc *StatementContext) GetOrStoreStmtCache(key StmtCacheKey, value interface{}) interface{} { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() if sc.stmtCache.data == nil { sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } if _, ok := sc.stmtCache.data[key]; !ok { sc.stmtCache.data[key] = value } return sc.stmtCache.data[key] } // GetOrEvaluateStmtCache gets the cached value of the given key if it exists, otherwise calculate the value. func (sc *StatementContext) GetOrEvaluateStmtCache(key StmtCacheKey, valueEvaluator func() (interface{}, error)) (interface{}, error) { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() if sc.stmtCache.data == nil { sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } if _, ok := sc.stmtCache.data[key]; !ok { value, err := valueEvaluator() if err != nil { return nil, err } sc.stmtCache.data[key] = value } return sc.stmtCache.data[key], nil } // ResetInStmtCache resets the cache of given key. func (sc *StatementContext) ResetInStmtCache(key StmtCacheKey) { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() delete(sc.stmtCache.data, key) } // ResetStmtCache resets all cached values. func (sc *StatementContext) ResetStmtCache() { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } // SQLDigest gets normalized and digest for provided sql. // it will cache result after first calling. func (sc *StatementContext) SQLDigest() (normalized string, sqlDigest *parser.Digest) { sc.digestMemo.Do(func() { sc.digestMemo.normalized, sc.digestMemo.digest = parser.NormalizeDigest(sc.OriginalSQL) }) return sc.digestMemo.normalized, sc.digestMemo.digest } // InitSQLDigest sets the normalized and digest for sql. func (sc *StatementContext) InitSQLDigest(normalized string, digest *parser.Digest) { sc.digestMemo.Do(func() { sc.digestMemo.normalized, sc.digestMemo.digest = normalized, digest }) } // ResetSQLDigest sets the normalized and digest for sql anyway, **DO NOT USE THIS UNLESS YOU KNOW WHAT YOU ARE DOING NOW**. func (sc *StatementContext) ResetSQLDigest(s string) { sc.digestMemo.normalized, sc.digestMemo.digest = parser.NormalizeDigest(s) } // GetPlanDigest gets the normalized plan and plan digest. func (sc *StatementContext) GetPlanDigest() (normalized string, planDigest *parser.Digest) { return sc.planNormalized, sc.planDigest } // GetPlan gets the plan field of stmtctx func (sc *StatementContext) GetPlan() interface{} { return sc.plan } // SetPlan sets the plan field of stmtctx func (sc *StatementContext) SetPlan(plan interface{}) { sc.plan = plan } // GetFlatPlan gets the flatPlan field of stmtctx func (sc *StatementContext) GetFlatPlan() interface{} { return sc.flatPlan } // SetFlatPlan sets the flatPlan field of stmtctx func (sc *StatementContext) SetFlatPlan(flat interface{}) { sc.flatPlan = flat } // GetBinaryPlan gets the binaryPlan field of stmtctx func (sc *StatementContext) GetBinaryPlan() string { return sc.binaryPlan } // SetBinaryPlan sets the binaryPlan field of stmtctx func (sc *StatementContext) SetBinaryPlan(binaryPlan string) { sc.binaryPlan = binaryPlan } // GetResourceGroupTagger returns the implementation of tikvrpc.ResourceGroupTagger related to self. func (sc *StatementContext) GetResourceGroupTagger() tikvrpc.ResourceGroupTagger { normalized, digest := sc.SQLDigest() planDigest := sc.planDigest return func(req *tikvrpc.Request) { if req == nil { return } if len(normalized) == 0 { return } req.ResourceGroupTag = resourcegrouptag.EncodeResourceGroupTag(digest, planDigest, resourcegrouptag.GetResourceGroupLabelByKey(resourcegrouptag.GetFirstKeyFromRequest(req))) } } // SetUseChunkAlloc set use chunk alloc status func (sc *StatementContext) SetUseChunkAlloc() { sc.useChunkAlloc = true } // ClearUseChunkAlloc clear useChunkAlloc status func (sc *StatementContext) ClearUseChunkAlloc() { sc.useChunkAlloc = false } // GetUseChunkAllocStatus returns useChunkAlloc status func (sc *StatementContext) GetUseChunkAllocStatus() bool { return sc.useChunkAlloc } // SetPlanDigest sets the normalized plan and plan digest. func (sc *StatementContext) SetPlanDigest(normalized string, planDigest *parser.Digest) { if planDigest != nil { sc.planNormalized, sc.planDigest = normalized, planDigest } } // GetEncodedPlan gets the encoded plan, it is used to avoid repeated encode. func (sc *StatementContext) GetEncodedPlan() string { return sc.encodedPlan } // SetEncodedPlan sets the encoded plan, it is used to avoid repeated encode. func (sc *StatementContext) SetEncodedPlan(encodedPlan string) { sc.encodedPlan = encodedPlan } // GetPlanHint gets the hint string generated from the plan. func (sc *StatementContext) GetPlanHint() (string, bool) { return sc.planHint, sc.planHintSet } // InitDiskTracker initializes the sc.DiskTracker, use cache to avoid allocation. func (sc *StatementContext) InitDiskTracker(label int, bytesLimit int64) { memory.InitTracker(&sc.cache.DiskTracker, label, bytesLimit, &sc.cache.LogOnExceed[0]) sc.DiskTracker = &sc.cache.DiskTracker } // InitMemTracker initializes the sc.MemTracker, use cache to avoid allocation. func (sc *StatementContext) InitMemTracker(label int, bytesLimit int64) { memory.InitTracker(&sc.cache.MemTracker, label, bytesLimit, &sc.cache.LogOnExceed[1]) sc.MemTracker = &sc.cache.MemTracker } // SetPlanHint sets the hint for the plan. func (sc *StatementContext) SetPlanHint(hint string) { sc.planHintSet = true sc.planHint = hint } // PlanCacheType is the flag of plan cache type PlanCacheType int const ( // DefaultNoCache no cache DefaultNoCache PlanCacheType = iota // SessionPrepared session prepared plan cache SessionPrepared // SessionNonPrepared session non-prepared plan cache SessionNonPrepared ) // SetSkipPlanCache sets to skip the plan cache and records the reason. func (sc *StatementContext) SetSkipPlanCache(reason error) { if !sc.UseCache { return // avoid unnecessary warnings } sc.UseCache = false switch sc.CacheType { case DefaultNoCache: sc.AppendWarning(errors.NewNoStackError("unknown cache type")) case SessionPrepared: sc.AppendWarning(errors.NewNoStackErrorf("skip prepared plan-cache: %s", reason.Error())) case SessionNonPrepared: if sc.InExplainStmt && sc.ExplainFormat == "plan_cache" { // use "plan_cache" rather than types.ExplainFormatPlanCache to avoid import cycle sc.AppendWarning(errors.NewNoStackErrorf("skip non-prepared plan-cache: %s", reason.Error())) } } } // TableEntry presents table in db. type TableEntry struct { DB string Table string } // AddAffectedRows adds affected rows. func (sc *StatementContext) AddAffectedRows(rows uint64) { if sc.InHandleForeignKeyTrigger { // For compatibility with MySQL, not add the affected row cause by the foreign key trigger. return } sc.mu.Lock() defer sc.mu.Unlock() sc.mu.affectedRows += rows } // SetAffectedRows sets affected rows. func (sc *StatementContext) SetAffectedRows(rows uint64) { sc.mu.Lock() sc.mu.affectedRows = rows sc.mu.Unlock() } // AffectedRows gets affected rows. func (sc *StatementContext) AffectedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.affectedRows } // FoundRows gets found rows. func (sc *StatementContext) FoundRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.foundRows } // AddFoundRows adds found rows. func (sc *StatementContext) AddFoundRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.foundRows += rows } // RecordRows is used to generate info message func (sc *StatementContext) RecordRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.records } // AddRecordRows adds record rows. func (sc *StatementContext) AddRecordRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.records += rows } // DeletedRows is used to generate info message func (sc *StatementContext) DeletedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.deleted } // AddDeletedRows adds record rows. func (sc *StatementContext) AddDeletedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.deleted += rows } // UpdatedRows is used to generate info message func (sc *StatementContext) UpdatedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.updated } // AddUpdatedRows adds updated rows. func (sc *StatementContext) AddUpdatedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.updated += rows } // CopiedRows is used to generate info message func (sc *StatementContext) CopiedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.copied } // AddCopiedRows adds copied rows. func (sc *StatementContext) AddCopiedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.copied += rows } // TouchedRows is used to generate info message func (sc *StatementContext) TouchedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.touched } // AddTouchedRows adds touched rows. func (sc *StatementContext) AddTouchedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.touched += rows } // GetMessage returns the extra message of the last executed command, if there is no message, it returns empty string func (sc *StatementContext) GetMessage() string { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.message } // SetMessage sets the info message generated by some commands func (sc *StatementContext) SetMessage(msg string) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.message = msg } // GetWarnings gets warnings. func (sc *StatementContext) GetWarnings() []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.warnings } // TruncateWarnings truncates warnings begin from start and returns the truncated warnings. func (sc *StatementContext) TruncateWarnings(start int) []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() sz := len(sc.mu.warnings) - start if sz <= 0 { return nil } ret := make([]SQLWarn, sz) copy(ret, sc.mu.warnings[start:]) sc.mu.warnings = sc.mu.warnings[:start] return ret } // WarningCount gets warning count. func (sc *StatementContext) WarningCount() uint16 { if sc.InShowWarning { return 0 } sc.mu.Lock() defer sc.mu.Unlock() return uint16(len(sc.mu.warnings)) } // NumErrorWarnings gets warning and error count. func (sc *StatementContext) NumErrorWarnings() (ec uint16, wc int) { sc.mu.Lock() defer sc.mu.Unlock() for _, w := range sc.mu.warnings { if w.Level == WarnLevelError { ec++ } } wc = len(sc.mu.warnings) return } // SetWarnings sets warnings. func (sc *StatementContext) SetWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.warnings = warns } // AppendWarning appends a warning with level 'Warning'. func (sc *StatementContext) AppendWarning(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelWarning, warn}) } } // AppendWarnings appends some warnings. func (sc *StatementContext) AppendWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, warns...) } } // AppendNote appends a warning with level 'Note'. func (sc *StatementContext) AppendNote(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelNote, warn}) } } // AppendError appends a warning with level 'Error'. func (sc *StatementContext) AppendError(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelError, warn}) } } // GetExtraWarnings gets extra warnings. func (sc *StatementContext) GetExtraWarnings() []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.extraWarnings } // SetExtraWarnings sets extra warnings. func (sc *StatementContext) SetExtraWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.extraWarnings = warns } // AppendExtraWarning appends an extra warning with level 'Warning'. func (sc *StatementContext) AppendExtraWarning(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelWarning, warn}) } } // AppendExtraNote appends an extra warning with level 'Note'. func (sc *StatementContext) AppendExtraNote(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelNote, warn}) } } // AppendExtraError appends an extra warning with level 'Error'. func (sc *StatementContext) AppendExtraError(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelError, warn}) } } // resetMuForRetry resets the changed states of sc.mu during execution. func (sc *StatementContext) resetMuForRetry() { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.affectedRows = 0 sc.mu.foundRows = 0 sc.mu.records = 0 sc.mu.deleted = 0 sc.mu.updated = 0 sc.mu.copied = 0 sc.mu.touched = 0 sc.mu.message = "" sc.mu.warnings = nil sc.mu.execDetails = execdetails.ExecDetails{} sc.mu.detailsSummary.Reset() } // ResetForRetry resets the changed states during execution. func (sc *StatementContext) ResetForRetry() { sc.resetMuForRetry() sc.MaxRowID = 0 sc.BaseRowID = 0 sc.TableIDs = sc.TableIDs[:0] sc.IndexNames = sc.IndexNames[:0] sc.TaskID = AllocateTaskID() } // MergeExecDetails merges a single region execution details into self, used to print // the information in slow query log. func (sc *StatementContext) MergeExecDetails(details *execdetails.ExecDetails, commitDetails *util.CommitDetails) { sc.mu.Lock() defer sc.mu.Unlock() if details != nil { sc.mu.execDetails.CopTime += details.CopTime sc.mu.execDetails.BackoffTime += details.BackoffTime sc.mu.execDetails.RequestCount++ sc.MergeScanDetail(details.ScanDetail) sc.MergeTimeDetail(details.TimeDetail) detail := &execdetails.DetailsNeedP90{ BackoffSleep: details.BackoffSleep, BackoffTimes: details.BackoffTimes, CalleeAddress: details.CalleeAddress, TimeDetail: details.TimeDetail, } sc.mu.detailsSummary.Merge(detail) } if commitDetails != nil { if sc.mu.execDetails.CommitDetail == nil { sc.mu.execDetails.CommitDetail = commitDetails } else { sc.mu.execDetails.CommitDetail.Merge(commitDetails) } } } // MergeScanDetail merges scan details into self. func (sc *StatementContext) MergeScanDetail(scanDetail *util.ScanDetail) { // Currently TiFlash cop task does not fill scanDetail, so need to skip it if scanDetail is nil if scanDetail == nil { return } if sc.mu.execDetails.ScanDetail == nil { sc.mu.execDetails.ScanDetail = &util.ScanDetail{} } sc.mu.execDetails.ScanDetail.Merge(scanDetail) } // MergeTimeDetail merges time details into self. func (sc *StatementContext) MergeTimeDetail(timeDetail util.TimeDetail) { sc.mu.execDetails.TimeDetail.ProcessTime += timeDetail.ProcessTime sc.mu.execDetails.TimeDetail.WaitTime += timeDetail.WaitTime } // MergeLockKeysExecDetails merges lock keys execution details into self. func (sc *StatementContext) MergeLockKeysExecDetails(lockKeys *util.LockKeysDetails) { sc.mu.Lock() defer sc.mu.Unlock() if sc.mu.execDetails.LockKeysDetail == nil { sc.mu.execDetails.LockKeysDetail = lockKeys } else { sc.mu.execDetails.LockKeysDetail.Merge(lockKeys) } } // GetExecDetails gets the execution details for the statement. func (sc *StatementContext) GetExecDetails() execdetails.ExecDetails { var details execdetails.ExecDetails sc.mu.Lock() defer sc.mu.Unlock() details = sc.mu.execDetails details.LockKeysDuration = time.Duration(atomic.LoadInt64(&sc.LockKeysDuration)) return details } // PushDownFlags converts StatementContext to tipb.SelectRequest.Flags. func (sc *StatementContext) PushDownFlags() uint64 { var flags uint64 if sc.InInsertStmt { flags |= model.FlagInInsertStmt } else if sc.InUpdateStmt || sc.InDeleteStmt { flags |= model.FlagInUpdateOrDeleteStmt } else if sc.InSelectStmt { flags |= model.FlagInSelectStmt } if sc.TypeFlags().IgnoreTruncateErr() { flags |= model.FlagIgnoreTruncate } else if sc.TypeFlags().TruncateAsWarning() { flags |= model.FlagTruncateAsWarning // TODO: remove this flag from TiKV. flags |= model.FlagOverflowAsWarning } if sc.TypeFlags().IgnoreZeroInDate() { flags |= model.FlagIgnoreZeroInDate } if sc.DividedByZeroAsWarning { flags |= model.FlagDividedByZeroAsWarning } if sc.InLoadDataStmt { flags |= model.FlagInLoadDataStmt } if sc.InRestrictedSQL { flags |= model.FlagInRestrictedSQL } return flags } // CopTasksDetails returns some useful information of cop-tasks during execution. func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { sc.mu.Lock() defer sc.mu.Unlock() n := sc.mu.detailsSummary.NumCopTasks d := &CopTasksDetails{ NumCopTasks: n, MaxBackoffTime: make(map[string]time.Duration), AvgBackoffTime: make(map[string]time.Duration), P90BackoffTime: make(map[string]time.Duration), TotBackoffTime: make(map[string]time.Duration), TotBackoffTimes: make(map[string]int), MaxBackoffAddress: make(map[string]string), } if n == 0 { return d } d.AvgProcessTime = sc.mu.execDetails.TimeDetail.ProcessTime / time.Duration(n) d.AvgWaitTime = sc.mu.execDetails.TimeDetail.WaitTime / time.Duration(n) d.P90ProcessTime = time.Duration((sc.mu.detailsSummary.ProcessTimePercentile.GetPercentile(0.9))) d.MaxProcessTime = sc.mu.detailsSummary.ProcessTimePercentile.GetMax().D d.MaxProcessAddress = sc.mu.detailsSummary.ProcessTimePercentile.GetMax().Addr d.P90WaitTime = time.Duration((sc.mu.detailsSummary.WaitTimePercentile.GetPercentile(0.9))) d.MaxWaitTime = sc.mu.detailsSummary.WaitTimePercentile.GetMax().D d.MaxWaitAddress = sc.mu.detailsSummary.WaitTimePercentile.GetMax().Addr for backoff, items := range sc.mu.detailsSummary.BackoffInfo { if items == nil { continue } n := items.ReqTimes d.MaxBackoffAddress[backoff] = items.BackoffPercentile.GetMax().Addr d.MaxBackoffTime[backoff] = items.BackoffPercentile.GetMax().D d.P90BackoffTime[backoff] = time.Duration(items.BackoffPercentile.GetPercentile(0.9)) d.AvgBackoffTime[backoff] = items.TotBackoffTime / time.Duration(n) d.TotBackoffTime[backoff] = items.TotBackoffTime d.TotBackoffTimes[backoff] = items.TotBackoffTimes } return d } // InitFromPBFlagAndTz set the flag and timezone of StatementContext from a `tipb.SelectRequest.Flags` and `*time.Location`. func (sc *StatementContext) InitFromPBFlagAndTz(flags uint64, tz *time.Location) { sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 sc.InDeleteStmt = (flags & model.FlagInUpdateOrDeleteStmt) > 0 sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 sc.SetTimeZone(tz) sc.SetTypeFlags(types.DefaultStmtFlags. WithIgnoreTruncateErr((flags & model.FlagIgnoreTruncate) > 0). WithTruncateAsWarning((flags & model.FlagTruncateAsWarning) > 0). WithIgnoreZeroInDate((flags & model.FlagIgnoreZeroInDate) > 0). WithAllowNegativeToUnsigned(!sc.InInsertStmt)) } // GetLockWaitStartTime returns the statement pessimistic lock wait start time func (sc *StatementContext) GetLockWaitStartTime() time.Time { startTime := atomic.LoadInt64(&sc.lockWaitStartTime) if startTime == 0 { startTime = time.Now().UnixNano() atomic.StoreInt64(&sc.lockWaitStartTime, startTime) } return time.Unix(0, startTime) } // RecordRangeFallback records range fallback. func (sc *StatementContext) RecordRangeFallback(rangeMaxSize int64) { // If range fallback happens, it means ether the query is unreasonable(for example, several long IN lists) or tidb_opt_range_max_size is too small // and the generated plan is probably suboptimal. In that case we don't put it into plan cache. if sc.UseCache { sc.SetSkipPlanCache(errors.Errorf("in-list is too long")) } if !sc.RangeFallback { sc.AppendWarning(errors.Errorf("Memory capacity of %v bytes for 'tidb_opt_range_max_size' exceeded when building ranges. Less accurate ranges such as full range are chosen", rangeMaxSize)) sc.RangeFallback = true } } // UseDynamicPartitionPrune indicates whether dynamic partition is used during the query func (sc *StatementContext) UseDynamicPartitionPrune() bool { return sc.UseDynamicPruneMode } // DetachMemDiskTracker detaches the memory and disk tracker from the sessionTracker. func (sc *StatementContext) DetachMemDiskTracker() { if sc == nil { return } if sc.MemTracker != nil { sc.MemTracker.Detach() } if sc.DiskTracker != nil { sc.DiskTracker.Detach() } } // SetStaleTSOProvider sets the stale TSO provider. func (sc *StatementContext) SetStaleTSOProvider(eval func() (uint64, error)) { sc.StaleTSOProvider.Lock() defer sc.StaleTSOProvider.Unlock() sc.StaleTSOProvider.value = nil sc.StaleTSOProvider.eval = eval } // GetStaleTSO returns the TSO for stale-read usage which calculate from PD's last response. func (sc *StatementContext) GetStaleTSO() (uint64, error) { sc.StaleTSOProvider.Lock() defer sc.StaleTSOProvider.Unlock() if sc.StaleTSOProvider.value != nil { return *sc.StaleTSOProvider.value, nil } if sc.StaleTSOProvider.eval == nil { return 0, nil } tso, err := sc.StaleTSOProvider.eval() if err != nil { return 0, err } sc.StaleTSOProvider.value = &tso return tso, nil } // AddSetVarHintRestore records the variables which are affected by SET_VAR hint. And restore them to the old value later. func (sc *StatementContext) AddSetVarHintRestore(name, val string) { if sc.SetVarHintRestore == nil { sc.SetVarHintRestore = make(map[string]string) } sc.SetVarHintRestore[name] = val } // CopTasksDetails collects some useful information of cop-tasks during execution. type CopTasksDetails struct { NumCopTasks int AvgProcessTime time.Duration P90ProcessTime time.Duration MaxProcessAddress string MaxProcessTime time.Duration AvgWaitTime time.Duration P90WaitTime time.Duration MaxWaitAddress string MaxWaitTime time.Duration MaxBackoffTime map[string]time.Duration MaxBackoffAddress map[string]string AvgBackoffTime map[string]time.Duration P90BackoffTime map[string]time.Duration TotBackoffTime map[string]time.Duration TotBackoffTimes map[string]int } // ToZapFields wraps the CopTasksDetails as zap.Fileds. func (d *CopTasksDetails) ToZapFields() (fields []zap.Field) { if d.NumCopTasks == 0 { return } fields = make([]zap.Field, 0, 10) fields = append(fields, zap.Int("num_cop_tasks", d.NumCopTasks)) fields = append(fields, zap.String("process_avg_time", strconv.FormatFloat(d.AvgProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_p90_time", strconv.FormatFloat(d.P90ProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_max_time", strconv.FormatFloat(d.MaxProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_max_addr", d.MaxProcessAddress)) fields = append(fields, zap.String("wait_avg_time", strconv.FormatFloat(d.AvgWaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_p90_time", strconv.FormatFloat(d.P90WaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_max_time", strconv.FormatFloat(d.MaxWaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_max_addr", d.MaxWaitAddress)) return fields } // GetUsedStatsInfo returns the map for recording the used stats during query. // If initIfNil is true, it will initialize it when this map is nil. func (sc *StatementContext) GetUsedStatsInfo(initIfNil bool) map[int64]*UsedStatsInfoForTable { if sc.usedStatsInfo == nil && initIfNil { sc.usedStatsInfo = make(map[int64]*UsedStatsInfoForTable) } return sc.usedStatsInfo } // RecordedStatsLoadStatusCnt returns the total number of recorded column/index stats status, which is not full loaded. func (sc *StatementContext) RecordedStatsLoadStatusCnt() (cnt int) { allStatus := sc.GetUsedStatsInfo(false) for _, status := range allStatus { if status == nil { continue } cnt += status.recordedColIdxCount() } return } // TypeCtxOrDefault returns the reference to the `TypeCtx` inside the statement context. // If the statement context is nil, it'll return a newly created default type context. // **don't** use this function if you can make sure the `sc` is not nil. We should limit the usage of this function as // little as possible. func (sc *StatementContext) TypeCtxOrDefault() types.Context { if sc != nil { return sc.typeCtx } return types.DefaultStmtNoWarningContext } // UsedStatsInfoForTable records stats that are used during query and their information. type UsedStatsInfoForTable struct { Name string TblInfo *model.TableInfo Version uint64 RealtimeCount int64 ModifyCount int64 ColumnStatsLoadStatus map[int64]string IndexStatsLoadStatus map[int64]string } // FormatForExplain format the content in the format expected to be printed in the execution plan. // case 1: if stats version is 0, print stats:pseudo. // case 2: if stats version is not 0, and there are column/index stats that are not full loaded, // print stats:partial, then print status of 3 column/index status at most. For the rest, only // the count will be printed, in the format like (more: 1 onlyCmsEvicted, 2 onlyHistRemained). func (s *UsedStatsInfoForTable) FormatForExplain() string { // statistics.PseudoVersion == 0 if s.Version == 0 { return "stats:pseudo" } var b strings.Builder if len(s.ColumnStatsLoadStatus)+len(s.IndexStatsLoadStatus) == 0 { return "" } b.WriteString("stats:partial") outputNumsLeft := 3 statusCnt := make(map[string]uint64, 1) var strs []string strs = append(strs, s.collectFromColOrIdxStatus(false, &outputNumsLeft, statusCnt)...) strs = append(strs, s.collectFromColOrIdxStatus(true, &outputNumsLeft, statusCnt)...) b.WriteString("[") b.WriteString(strings.Join(strs, ", ")) if len(statusCnt) > 0 { b.WriteString("...(more: ") keys := maps.Keys(statusCnt) slices.Sort(keys) var cntStrs []string for _, key := range keys { cntStrs = append(cntStrs, strconv.FormatUint(statusCnt[key], 10)+" "+key) } b.WriteString(strings.Join(cntStrs, ", ")) b.WriteString(")") } b.WriteString("]") return b.String() } // WriteToSlowLog format the content in the format expected to be printed to the slow log, then write to w. // The format is table name partition name:version[realtime row count;modify count][index load status][column load status]. func (s *UsedStatsInfoForTable) WriteToSlowLog(w io.Writer) { ver := "pseudo" // statistics.PseudoVersion == 0 if s.Version != 0 { ver = strconv.FormatUint(s.Version, 10) } fmt.Fprintf(w, "%s:%s[%d;%d]", s.Name, ver, s.RealtimeCount, s.ModifyCount) if ver == "pseudo" { return } if len(s.ColumnStatsLoadStatus)+len(s.IndexStatsLoadStatus) > 0 { fmt.Fprintf(w, "[%s][%s]", strings.Join(s.collectFromColOrIdxStatus(false, nil, nil), ","), strings.Join(s.collectFromColOrIdxStatus(true, nil, nil), ","), ) } } // collectFromColOrIdxStatus prints the status of column or index stats to a slice // of the string in the format of "col/idx name:status". // If outputNumsLeft is not nil, this function will output outputNumsLeft column/index // status at most, the rest will be counted in statusCnt, which is a map of status->count. func (s *UsedStatsInfoForTable) collectFromColOrIdxStatus( forColumn bool, outputNumsLeft *int, statusCnt map[string]uint64, ) []string { var status map[int64]string if forColumn { status = s.ColumnStatsLoadStatus } else { status = s.IndexStatsLoadStatus } keys := maps.Keys(status) slices.Sort(keys) strs := make([]string, 0, len(status)) for _, id := range keys { if outputNumsLeft == nil || *outputNumsLeft > 0 { var name string if s.TblInfo != nil { if forColumn { name = s.TblInfo.FindColumnNameByID(id) } else { name = s.TblInfo.FindIndexNameByID(id) } } if len(name) == 0 { name = "ID " + strconv.FormatInt(id, 10) } strs = append(strs, name+":"+status[id]) if outputNumsLeft != nil { *outputNumsLeft-- } } else if statusCnt != nil { statusCnt[status[id]] = statusCnt[status[id]] + 1 } } return strs } func (s *UsedStatsInfoForTable) recordedColIdxCount() int { return len(s.IndexStatsLoadStatus) + len(s.ColumnStatsLoadStatus) } // StatsLoadResult indicates result for StatsLoad type StatsLoadResult struct { Item model.TableItemID Error error } // HasError returns whether result has error func (r StatsLoadResult) HasError() bool { return r.Error != nil } // ErrorMsg returns StatsLoadResult err msg func (r StatsLoadResult) ErrorMsg() string { if r.Error == nil { return "" } b := bytes.NewBufferString("tableID:") b.WriteString(strconv.FormatInt(r.Item.TableID, 10)) b.WriteString(", id:") b.WriteString(strconv.FormatInt(r.Item.ID, 10)) b.WriteString(", isIndex:") b.WriteString(strconv.FormatBool(r.Item.IsIndex)) b.WriteString(", err:") b.WriteString(r.Error.Error()) return b.String() }
pkg/sessionctx/stmtctx/stmtctx.go
1
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.9977434873580933, 0.03946710005402565, 0.00016356153355445713, 0.0002680426696315408, 0.16162893176078796 ]
{ "id": 3, "code_window": [ "// TypeCtx returns the type context\n", "func (sc *StatementContext) TypeCtx() types.Context {\n", "\treturn sc.typeCtx\n", "}\n", "\n", "// ErrCtx returns the error context\n", "// TODO: add a cache to the `ErrCtx` if needed, though it's not a big burden to generate `ErrCtx` everytime.\n", "func (sc *StatementContext) ErrCtx() errctx.Context {\n", "\tctx := errctx.NewContext(sc)\n", "\n", "\tif sc.TypeFlags().IgnoreTruncateErr() {\n", "\t\tctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelIgnore)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "func (sc *StatementContext) initErrCtx() {\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "replace", "edit_start_line_idx": 481 }
CREATE TABLE t ( pk INT PRIMARY KEY AUTO_INCREMENT, x INT NULL, y INT NOT NULL DEFAULT 123, z DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP );
br/tests/lightning_default-columns/data/defcol.t-schema.sql
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00016704222070984542, 0.00016704222070984542, 0.00016704222070984542, 0.00016704222070984542, 0 ]
{ "id": 3, "code_window": [ "// TypeCtx returns the type context\n", "func (sc *StatementContext) TypeCtx() types.Context {\n", "\treturn sc.typeCtx\n", "}\n", "\n", "// ErrCtx returns the error context\n", "// TODO: add a cache to the `ErrCtx` if needed, though it's not a big burden to generate `ErrCtx` everytime.\n", "func (sc *StatementContext) ErrCtx() errctx.Context {\n", "\tctx := errctx.NewContext(sc)\n", "\n", "\tif sc.TypeFlags().IgnoreTruncateErr() {\n", "\t\tctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelIgnore)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "func (sc *StatementContext) initErrCtx() {\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "replace", "edit_start_line_idx": 481 }
# TestPlanCache set tidb_enable_prepared_plan_cache=1; set tidb_enable_ordered_result_mode=1; drop table if exists t; create table t (a int primary key, b int, c int, d int, key(b)); prepare s1 from 'select * from t where a > ? limit 10'; set @a = 10; execute s1 using @a; select @@last_plan_from_cache; execute s1 using @a; select @@last_plan_from_cache; set tidb_enable_prepared_plan_cache=DEFAULT; set tidb_enable_ordered_result_mode=DEFAULT; # TestSQLBinding set tidb_enable_ordered_result_mode=1; set tidb_opt_limit_push_down_threshold=0; drop table if exists t; create table t (a int primary key, b int, c int, d int, key(b)); explain select * from t where a > 0 limit 1; create session binding for select * from t where a>0 limit 1 using select * from t use index(b) where a>0 limit 1; explain select * from t where a > 0 limit 1; set tidb_enable_ordered_result_mode=DEFAULT; set tidb_opt_limit_push_down_threshold=DEFAULT; # TestClusteredIndex set tidb_enable_ordered_result_mode=1; set tidb_enable_clustered_index = 'ON'; drop table if exists t; CREATE TABLE t (a int,b int,c int, PRIMARY KEY (a,b)); explain format=brief select * from t limit 10; set tidb_enable_ordered_result_mode=DEFAULT; set tidb_enable_clustered_index = DEFAULT; # TestStableResultSwitch show variables where variable_name like 'tidb_enable_ordered_result_mode';
tests/integrationtest/t/planner/core/rule_result_reorder.test
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.000686106039211154, 0.0002731957065407187, 0.0001685201277723536, 0.0001706694019958377, 0.00020645691256504506 ]
{ "id": 3, "code_window": [ "// TypeCtx returns the type context\n", "func (sc *StatementContext) TypeCtx() types.Context {\n", "\treturn sc.typeCtx\n", "}\n", "\n", "// ErrCtx returns the error context\n", "// TODO: add a cache to the `ErrCtx` if needed, though it's not a big burden to generate `ErrCtx` everytime.\n", "func (sc *StatementContext) ErrCtx() errctx.Context {\n", "\tctx := errctx.NewContext(sc)\n", "\n", "\tif sc.TypeFlags().IgnoreTruncateErr() {\n", "\t\tctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelIgnore)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "func (sc *StatementContext) initErrCtx() {\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "replace", "edit_start_line_idx": 481 }
load("@io_bazel_rules_go//go:def.bzl", "go_test") go_test( name = "simpletest_test", timeout = "short", srcs = [ "main_test.go", "simple_test.go", ], flaky = True, race = "on", shard_count = 11, deps = [ "//pkg/config", "//pkg/parser/auth", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/server", "//pkg/session", "//pkg/sessionctx", "//pkg/store/mockstore", "//pkg/testkit", "//pkg/util/dbterror/exeerrors", "//pkg/util/globalconn", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", "@io_opencensus_go//stats/view", "@org_uber_go_goleak//:goleak", ], )
pkg/executor/test/simpletest/BUILD.bazel
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00017303520871791989, 0.00017110162298195064, 0.00016924783994909376, 0.00017106170707847923, 0.0000014494295328404405 ]
{ "id": 4, "code_window": [ "\t} else if sc.TypeFlags().TruncateAsWarning() {\n", "\t\tctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelWarn)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ "\tsc.errCtx = ctx\n", "}\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 491 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "stmtctx", srcs = ["stmtctx.go"], importpath = "github.com/pingcap/tidb/pkg/sessionctx/stmtctx", visibility = ["//visibility:public"], deps = [ "//pkg/domain/resourcegroup", "//pkg/errctx", "//pkg/parser", "//pkg/parser/ast", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/types", "//pkg/util/disk", "//pkg/util/execdetails", "//pkg/util/intest", "//pkg/util/linter/constructor", "//pkg/util/memory", "//pkg/util/nocopy", "//pkg/util/resourcegrouptag", "//pkg/util/topsql/stmtstats", "//pkg/util/tracing", "@com_github_pingcap_errors//:errors", "@com_github_tikv_client_go_v2//tikvrpc", "@com_github_tikv_client_go_v2//util", "@org_golang_x_exp//maps", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], ) go_test( name = "stmtctx_test", timeout = "short", srcs = [ "main_test.go", "stmtctx_test.go", ], embed = [":stmtctx"], flaky = True, shard_count = 11, deps = [ "//pkg/kv", "//pkg/sessionctx/variable", "//pkg/testkit", "//pkg/testkit/testsetup", "//pkg/types", "//pkg/util/execdetails", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//util", "@org_uber_go_goleak//:goleak", ], )
pkg/sessionctx/stmtctx/BUILD.bazel
1
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00024672283325344324, 0.00018827906751539558, 0.0001703283196548, 0.00017231234232895076, 0.000027677164325723425 ]
{ "id": 4, "code_window": [ "\t} else if sc.TypeFlags().TruncateAsWarning() {\n", "\t\tctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelWarn)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ "\tsc.errCtx = ctx\n", "}\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 491 }
create schema dup_resolve;
br/tests/lightning_duplicate_resolution_replace_one_unique_key_multiple_conflicts_nonclustered_pk/data/dup_resolve-schema-create.sql
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00017050252063199878, 0.00017050252063199878, 0.00017050252063199878, 0.00017050252063199878, 0 ]
{ "id": 4, "code_window": [ "\t} else if sc.TypeFlags().TruncateAsWarning() {\n", "\t\tctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelWarn)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ "\tsc.errCtx = ctx\n", "}\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 491 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package autoid_test import ( "context" "fmt" "math" "testing" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/autoid" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/store/mockstore" ) func BenchmarkAllocator_Alloc(b *testing.B) { b.StopTimer() store, err := mockstore.NewMockStore() if err != nil { return } defer func() { err := store.Close() if err != nil { b.Fatal(err) } }() dbID := int64(1) tblID := int64(2) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) if err != nil { return err } err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: model.NewCIStr("t")}) if err != nil { return err } return nil }) if err != nil { return } alloc := autoid.NewAllocator(mockRequirement{store}, 1, 2, false, autoid.RowIDAllocType) b.StartTimer() for i := 0; i < b.N; i++ { _, _, err := alloc.Alloc(ctx, 1, 1, 1) if err != nil { b.Fatal(err) } } } func BenchmarkAllocator_SequenceAlloc(b *testing.B) { b.StopTimer() store, err := mockstore.NewMockStore() if err != nil { return } defer func() { err := store.Close() if err != nil { b.Fatal(err) } }() var seq *model.SequenceInfo var sequenceBase int64 ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) if err != nil { return err } seq = &model.SequenceInfo{ Start: 1, Cycle: true, Cache: false, MinValue: -10, MaxValue: math.MaxInt64, Increment: 2, CacheValue: 2000000, } seqTable := &model.TableInfo{ ID: 1, Name: model.NewCIStr("seq"), Sequence: seq, } sequenceBase = seq.Start - 1 err = m.CreateSequenceAndSetSeqValue(1, seqTable, sequenceBase) return err }) if err != nil { return } alloc := autoid.NewSequenceAllocator(store, 1, 1, seq) b.StartTimer() for i := 0; i < b.N; i++ { _, _, _, err := alloc.AllocSeqCache() if err != nil { fmt.Println("err") } } } func BenchmarkAllocator_Seek(b *testing.B) { base := int64(21421948021) offset := int64(-351354365326) increment := int64(3) b.StartTimer() for i := 0; i < b.N; i++ { _, err := autoid.CalcSequenceBatchSize(base, 3, increment, offset, math.MinInt64, math.MaxInt64) if err != nil { b.Fatal(err) } } }
pkg/meta/autoid/bench_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.04725918918848038, 0.0035443748347461224, 0.00016720542043913156, 0.00017424093675799668, 0.012124324217438698 ]
{ "id": 4, "code_window": [ "\t} else if sc.TypeFlags().TruncateAsWarning() {\n", "\t\tctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelWarn)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ "\tsc.errCtx = ctx\n", "}\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 491 }
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Note: All the tests in this file will be executed sequentially. package executor_test import ( "bytes" "context" "fmt" "math" "runtime/pprof" "strconv" "strings" "sync" "sync/atomic" "testing" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/pkg/config" ddltestutil "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/autoid" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/store/copr" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testutil" "github.com/pingcap/tidb/pkg/util/gcutil" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/testutils" "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/tikvrpc" ) func TestEarlyClose(t *testing.T) { var cluster testutils.Cluster store, dom := testkit.CreateMockStoreAndDomain(t, mockstore.WithClusterInspector(func(c testutils.Cluster) { mockstore.BootstrapWithSingleStore(c) cluster = c })) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table earlyclose (id int primary key)") N := 100 // Insert N rows. var values []string for i := 0; i < N; i++ { values = append(values, fmt.Sprintf("(%d)", i)) } tk.MustExec("insert earlyclose values " + strings.Join(values, ",")) // Get table ID for split. is := dom.InfoSchema() tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("earlyclose")) require.NoError(t, err) tblID := tbl.Meta().ID // Split the table. tableStart := tablecodec.GenTableRecordPrefix(tblID) cluster.SplitKeys(tableStart, tableStart.PrefixNext(), N/2) ctx := context.Background() for i := 0; i < N/2; i++ { rss, err := tk.Session().Execute(ctx, "select * from earlyclose order by id") require.NoError(t, err) rs := rss[0] req := rs.NewChunk(nil) require.NoError(t, rs.Next(ctx, req)) require.NoError(t, rs.Close()) } // Goroutine should not leak when error happen. require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/store/copr/handleTaskOnceError", `return(true)`)) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/store/copr/handleTaskOnceError")) }() rss, err := tk.Session().Execute(ctx, "select * from earlyclose") require.NoError(t, err) rs := rss[0] req := rs.NewChunk(nil) err = rs.Next(ctx, req) require.Error(t, err) require.NoError(t, rs.Close()) } type stats struct { } func (s stats) GetScope(_ string) variable.ScopeFlag { return variable.DefaultStatusVarScopeFlag } func (s stats) Stats(_ *variable.SessionVars) (map[string]interface{}, error) { m := make(map[string]interface{}) var a, b interface{} b = "123" m["test_interface_nil"] = a m["test_interface"] = b m["test_interface_slice"] = []interface{}{"a", "b", "c"} return m, nil } func TestShow(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") testSQL := `drop table if exists show_test` tk.MustExec(testSQL) testSQL = `create table SHOW_test (id int PRIMARY KEY AUTO_INCREMENT, c1 int comment "c1_comment", c2 int, c3 int default 1, c4 text, c5 boolean, key idx_wide_c4(c3, c4(10))) ENGINE=InnoDB AUTO_INCREMENT=28934 DEFAULT CHARSET=utf8 COMMENT "table_comment";` tk.MustExec(testSQL) testSQL = "show columns from show_test;" result := tk.MustQuery(testSQL) require.Len(t, result.Rows(), 6) testSQL = "show create table show_test;" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row := result.Rows()[0] // For issue https://github.com/pingcap/tidb/issues/1061 expectedRow := []interface{}{ "SHOW_test", "CREATE TABLE `SHOW_test` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `c1` int(11) DEFAULT NULL COMMENT 'c1_comment',\n `c2` int(11) DEFAULT NULL,\n `c3` int(11) DEFAULT '1',\n `c4` text DEFAULT NULL,\n `c5` tinyint(1) DEFAULT NULL,\n PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */,\n KEY `idx_wide_c4` (`c3`,`c4`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=28934 COMMENT='table_comment'"} for i, r := range row { require.Equal(t, expectedRow[i], r) } // For issue https://github.com/pingcap/tidb/issues/1918 testSQL = `create table ptest( a int primary key, b double NOT NULL DEFAULT 2.0, c varchar(10) NOT NULL, d time unique, e timestamp NULL, f timestamp );` tk.MustExec(testSQL) testSQL = "show create table ptest;" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row = result.Rows()[0] expectedRow = []interface{}{ "ptest", "CREATE TABLE `ptest` (\n `a` int(11) NOT NULL,\n `b` double NOT NULL DEFAULT '2',\n `c` varchar(10) NOT NULL,\n `d` time DEFAULT NULL,\n `e` timestamp NULL DEFAULT NULL,\n `f` timestamp NULL DEFAULT NULL,\n PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */,\n UNIQUE KEY `d` (`d`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"} for i, r := range row { require.Equal(t, expectedRow[i], r) } // Issue #4684. tk.MustExec("drop table if exists `t1`") testSQL = "create table `t1` (" + "`c1` tinyint unsigned default null," + "`c2` smallint unsigned default null," + "`c3` mediumint unsigned default null," + "`c4` int unsigned default null," + "`c5` bigint unsigned default null);" tk.MustExec(testSQL) testSQL = "show create table t1" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row = result.Rows()[0] expectedRow = []interface{}{ "t1", "CREATE TABLE `t1` (\n" + " `c1` tinyint(3) unsigned DEFAULT NULL,\n" + " `c2` smallint(5) unsigned DEFAULT NULL,\n" + " `c3` mediumint(8) unsigned DEFAULT NULL,\n" + " `c4` int(10) unsigned DEFAULT NULL,\n" + " `c5` bigint(20) unsigned DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"} for i, r := range row { require.Equal(t, expectedRow[i], r) } // Issue #7665 tk.MustExec("drop table if exists `decimalschema`") testSQL = "create table `decimalschema` (`c1` decimal);" tk.MustExec(testSQL) testSQL = "show create table decimalschema" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row = result.Rows()[0] expectedRow = []interface{}{ "decimalschema", "CREATE TABLE `decimalschema` (\n" + " `c1` decimal(10,0) DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"} for i, r := range row { require.Equal(t, expectedRow[i], r) } tk.MustExec("drop table if exists `decimalschema`") testSQL = "create table `decimalschema` (`c1` decimal(15));" tk.MustExec(testSQL) testSQL = "show create table decimalschema" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row = result.Rows()[0] expectedRow = []interface{}{ "decimalschema", "CREATE TABLE `decimalschema` (\n" + " `c1` decimal(15,0) DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"} for i, r := range row { require.Equal(t, expectedRow[i], r) } // test SHOW CREATE TABLE with invisible index tk.MustExec("drop table if exists t") tk.MustExec(`create table t ( a int, b int, c int UNIQUE KEY, d int UNIQUE KEY, index invisible_idx_b (b) invisible, index (d) invisible)`) expected := "t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` int(11) DEFAULT NULL,\n" + " `c` int(11) DEFAULT NULL,\n" + " `d` int(11) DEFAULT NULL,\n" + " KEY `invisible_idx_b` (`b`) /*!80000 INVISIBLE */,\n" + " KEY `d` (`d`) /*!80000 INVISIBLE */,\n" + " UNIQUE KEY `c` (`c`),\n" + " UNIQUE KEY `d_2` (`d`)\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" tk.MustQuery("show create table t").Check(testkit.Rows(expected)) tk.MustExec("drop table t") testSQL = "SHOW VARIABLES LIKE 'character_set_results';" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) // Test case for index type and comment tk.MustExec(`create table show_index (id int, c int, primary key (id), index cIdx using hash (c) comment "index_comment_for_cIdx");`) tk.MustExec(`create index idx1 on show_index (id) using hash;`) tk.MustExec(`create index idx2 on show_index (id) comment 'idx';`) tk.MustExec(`create index idx3 on show_index (id) using hash comment 'idx';`) tk.MustExec(`alter table show_index add index idx4 (id) using btree comment 'idx';`) tk.MustExec(`create index idx5 using hash on show_index (id) using btree comment 'idx';`) tk.MustExec(`create index idx6 using hash on show_index (id);`) tk.MustExec(`create index idx7 on show_index (id);`) tk.MustExec(`create index idx8 on show_index (id) visible;`) tk.MustExec(`create index idx9 on show_index (id) invisible;`) tk.MustExec(`create index expr_idx on show_index ((id*2+1))`) testSQL = "SHOW index from show_index;" tk.MustQuery(testSQL).Check(testkit.RowsWithSep("|", "show_index|0|PRIMARY|1|id|A|0|<nil>|<nil>||BTREE| |YES|<nil>|YES", "show_index|1|cIdx|1|c|A|0|<nil>|<nil>|YES|HASH||index_comment_for_cIdx|YES|<nil>|NO", "show_index|1|idx1|1|id|A|0|<nil>|<nil>||HASH| |YES|<nil>|NO", "show_index|1|idx2|1|id|A|0|<nil>|<nil>||BTREE||idx|YES|<nil>|NO", "show_index|1|idx3|1|id|A|0|<nil>|<nil>||HASH||idx|YES|<nil>|NO", "show_index|1|idx4|1|id|A|0|<nil>|<nil>||BTREE||idx|YES|<nil>|NO", "show_index|1|idx5|1|id|A|0|<nil>|<nil>||BTREE||idx|YES|<nil>|NO", "show_index|1|idx6|1|id|A|0|<nil>|<nil>||HASH| |YES|<nil>|NO", "show_index|1|idx7|1|id|A|0|<nil>|<nil>||BTREE| |YES|<nil>|NO", "show_index|1|idx8|1|id|A|0|<nil>|<nil>||BTREE| |YES|<nil>|NO", "show_index|1|idx9|1|id|A|0|<nil>|<nil>||BTREE| |NO|<nil>|NO", "show_index|1|expr_idx|1|NULL|A|0|<nil>|<nil>||BTREE| |YES|`id` * 2 + 1|NO", )) // For show like with escape testSQL = `show tables like 'SHOW\_test'` result = tk.MustQuery(testSQL) rows := result.Rows() require.Len(t, rows, 1) require.Equal(t, []interface{}{"SHOW_test"}, rows[0]) var ss stats variable.RegisterStatistics(ss) testSQL = "show status like 'character_set_results';" result = tk.MustQuery(testSQL) require.NotNil(t, result.Rows()) tk.MustQuery("SHOW PROCEDURE STATUS WHERE Db='test'").Check(testkit.Rows()) tk.MustQuery("SHOW TRIGGERS WHERE `Trigger` ='test'").Check(testkit.Rows()) tk.MustQuery("SHOW PROCESSLIST;").Check(testkit.Rows(fmt.Sprintf("%d test Sleep 0 autocommit SHOW PROCESSLIST;", tk.Session().ShowProcess().ID))) tk.MustQuery("SHOW FULL PROCESSLIST;").Check(testkit.Rows(fmt.Sprintf("%d test Sleep 0 autocommit SHOW FULL PROCESSLIST;", tk.Session().ShowProcess().ID))) tk.MustQuery("SHOW EVENTS WHERE Db = 'test'").Check(testkit.Rows()) tk.MustQuery("SHOW PLUGINS").Check(testkit.Rows()) tk.MustQuery("SHOW PROFILES").Check(testkit.Rows()) // +-------------+--------------------+--------------+------------------+-------------------+ // | File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set | // +-------------+--------------------+--------------+------------------+-------------------+ // | tidb-binlog | 400668057259474944 | | | | // +-------------+--------------------+--------------+------------------+-------------------+ result = tk.MustQuery("SHOW MASTER STATUS") require.Len(t, result.Rows(), 1) row = result.Rows()[0] require.Len(t, row, 5) require.NotEqual(t, "0", row[1].(string)) tk.MustQuery("SHOW PRIVILEGES") // Test show create database testSQL = `create database show_test_DB` tk.MustExec(testSQL) testSQL = "show create database show_test_DB;" tk.MustQuery(testSQL).Check(testkit.RowsWithSep("|", "show_test_DB|CREATE DATABASE `show_test_DB` /*!40100 DEFAULT CHARACTER SET utf8mb4 */", )) testSQL = "show create database if not exists show_test_DB;" tk.MustQuery(testSQL).Check(testkit.RowsWithSep("|", "show_test_DB|CREATE DATABASE IF NOT EXISTS `show_test_DB` /*!40100 DEFAULT CHARACTER SET utf8mb4 */", )) tk.MustExec("use show_test_DB") result = tk.MustQuery("SHOW index from show_index from test where Column_name = 'c'") require.Len(t, result.Rows(), 1) // Test show full columns // for issue https://github.com/pingcap/tidb/issues/4224 tk.MustExec(`drop table if exists show_test_comment`) tk.MustExec(`create table show_test_comment (id int not null default 0 comment "show_test_comment_id")`) tk.MustQuery(`show full columns from show_test_comment`).Check(testkit.RowsWithSep("|", "id|int(11)|<nil>|NO||0||select,insert,update,references|show_test_comment_id", )) // Test show create table with AUTO_INCREMENT option // for issue https://github.com/pingcap/tidb/issues/3747 tk.MustExec(`drop table if exists show_auto_increment`) tk.MustExec(`create table show_auto_increment (id int key auto_increment) auto_increment=4`) tk.MustQuery(`show create table show_auto_increment`).Check(testkit.RowsWithSep("|", ""+ "show_auto_increment CREATE TABLE `show_auto_increment` (\n"+ " `id` int(11) NOT NULL AUTO_INCREMENT,\n"+ " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=4", )) // for issue https://github.com/pingcap/tidb/issues/4678 autoIDStep := autoid.GetStep() tk.MustExec("insert into show_auto_increment values(20)") autoID := autoIDStep + 21 tk.MustQuery(`show create table show_auto_increment`).Check(testkit.RowsWithSep("|", ""+ "show_auto_increment CREATE TABLE `show_auto_increment` (\n"+ " `id` int(11) NOT NULL AUTO_INCREMENT,\n"+ " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT="+strconv.Itoa(int(autoID)), )) tk.MustExec(`drop table show_auto_increment`) tk.MustExec(`create table show_auto_increment (id int primary key auto_increment)`) tk.MustQuery(`show create table show_auto_increment`).Check(testkit.RowsWithSep("|", ""+ "show_auto_increment CREATE TABLE `show_auto_increment` (\n"+ " `id` int(11) NOT NULL AUTO_INCREMENT,\n"+ " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", )) tk.MustExec("insert into show_auto_increment values(10)") autoID = autoIDStep + 11 tk.MustQuery(`show create table show_auto_increment`).Check(testkit.RowsWithSep("|", ""+ "show_auto_increment CREATE TABLE `show_auto_increment` (\n"+ " `id` int(11) NOT NULL AUTO_INCREMENT,\n"+ " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT="+strconv.Itoa(int(autoID)), )) // Test show table with column's comment contain escape character // for issue https://github.com/pingcap/tidb/issues/4411 tk.MustExec(`drop table if exists show_escape_character`) tk.MustExec(`create table show_escape_character(id int comment 'a\rb\nc\td\0ef')`) tk.MustQuery(`show create table show_escape_character`).Check(testkit.RowsWithSep("|", ""+ "show_escape_character CREATE TABLE `show_escape_character` (\n"+ " `id` int(11) DEFAULT NULL COMMENT 'a\\rb\\nc d\\0ef'\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", )) // for issue https://github.com/pingcap/tidb/issues/4424 tk.MustExec("drop table if exists show_test") testSQL = `create table show_test( a varchar(10) COMMENT 'a\nb\rc\td\0e' ) COMMENT='a\nb\rc\td\0e';` tk.MustExec(testSQL) testSQL = "show create table show_test;" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row = result.Rows()[0] expectedRow = []interface{}{ "show_test", "CREATE TABLE `show_test` (\n `a` varchar(10) DEFAULT NULL COMMENT 'a\\nb\\rc d\\0e'\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='a\\nb\\rc d\\0e'"} for i, r := range row { require.Equal(t, expectedRow[i], r) } // for issue https://github.com/pingcap/tidb/issues/4425 tk.MustExec("drop table if exists show_test") testSQL = `create table show_test( a varchar(10) DEFAULT 'a\nb\rc\td\0e' );` tk.MustExec(testSQL) testSQL = "show create table show_test;" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row = result.Rows()[0] expectedRow = []interface{}{ "show_test", "CREATE TABLE `show_test` (\n `a` varchar(10) DEFAULT 'a\\nb\\rc d\\0e'\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"} for i, r := range row { require.Equal(t, expectedRow[i], r) } // for issue https://github.com/pingcap/tidb/issues/4426 tk.MustExec("drop table if exists show_test") testSQL = `create table show_test( a bit(1), b bit(32) DEFAULT 0b0, c bit(1) DEFAULT 0b1, d bit(10) DEFAULT 0b1010 );` tk.MustExec(testSQL) testSQL = "show create table show_test;" result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row = result.Rows()[0] expectedRow = []interface{}{ "show_test", "CREATE TABLE `show_test` (\n `a` bit(1) DEFAULT NULL,\n `b` bit(32) DEFAULT b'0',\n `c` bit(1) DEFAULT b'1',\n `d` bit(10) DEFAULT b'1010'\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"} for i, r := range row { require.Equal(t, expectedRow[i], r) } // for issue #4255 result = tk.MustQuery(`show function status like '%'`) result.Check(result.Rows()) result = tk.MustQuery(`show plugins like '%'`) result.Check(result.Rows()) // for issue #4740 testSQL = `drop table if exists t` tk.MustExec(testSQL) testSQL = `create table t (a int1, b int2, c int3, d int4, e int8)` tk.MustExec(testSQL) testSQL = `show create table t;` result = tk.MustQuery(testSQL) require.Len(t, result.Rows(), 1) row = result.Rows()[0] expectedRow = []interface{}{ "t", "CREATE TABLE `t` (\n" + " `a` tinyint(4) DEFAULT NULL,\n" + " `b` smallint(6) DEFAULT NULL,\n" + " `c` mediumint(9) DEFAULT NULL,\n" + " `d` int(11) DEFAULT NULL,\n" + " `e` bigint(20) DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", } for i, r := range row { require.Equal(t, expectedRow[i], r) } // Test get default collate for a specified charset. tk.MustExec(`drop table if exists t`) tk.MustExec(`create table t (a int) default charset=utf8mb4`) tk.MustQuery(`show create table t`).Check(testkit.RowsWithSep("|", "t CREATE TABLE `t` (\n"+ " `a` int(11) DEFAULT NULL\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", )) // Test range partition tk.MustExec(`drop table if exists t`) tk.MustExec(`CREATE TABLE t (a int) PARTITION BY RANGE(a) ( PARTITION p0 VALUES LESS THAN (10), PARTITION p1 VALUES LESS THAN (20), PARTITION p2 VALUES LESS THAN (MAXVALUE))`) tk.MustQuery("show create table t").Check(testkit.RowsWithSep("|", "t CREATE TABLE `t` (\n"+ " `a` int(11) DEFAULT NULL\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"+"\nPARTITION BY RANGE (`a`)\n(PARTITION `p0` VALUES LESS THAN (10),\n PARTITION `p1` VALUES LESS THAN (20),\n PARTITION `p2` VALUES LESS THAN (MAXVALUE))", )) tk.MustExec(`drop table if exists t`) tk.MustExecToErr(`CREATE TABLE t (x int, y char) PARTITION BY RANGE(y) ( PARTITION p0 VALUES LESS THAN (10), PARTITION p1 VALUES LESS THAN (20), PARTITION p2 VALUES LESS THAN (MAXVALUE))`) // Test range columns partition tk.MustExec(`drop table if exists t`) tk.MustExec(`CREATE TABLE t (a int, b int, c varchar(25), d int) PARTITION BY RANGE COLUMNS(a,d,c) ( PARTITION p0 VALUES LESS THAN (5,10,'ggg'), PARTITION p1 VALUES LESS THAN (10,20,'mmm'), PARTITION p2 VALUES LESS THAN (15,30,'sss'), PARTITION p3 VALUES LESS THAN (50,MAXVALUE,MAXVALUE))`) tk.MustQuery("show warnings").Check(testkit.Rows()) tk.MustQuery("show create table t").Check(testkit.RowsWithSep("|", "t CREATE TABLE `t` (\n"+ " `a` int(11) DEFAULT NULL,\n"+ " `b` int(11) DEFAULT NULL,\n"+ " `c` varchar(25) DEFAULT NULL,\n"+ " `d` int(11) DEFAULT NULL\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n"+ "PARTITION BY RANGE COLUMNS(`a`,`d`,`c`)\n"+ "(PARTITION `p0` VALUES LESS THAN (5,10,'ggg'),\n"+ " PARTITION `p1` VALUES LESS THAN (10,20,'mmm'),\n"+ " PARTITION `p2` VALUES LESS THAN (15,30,'sss'),\n"+ " PARTITION `p3` VALUES LESS THAN (50,MAXVALUE,MAXVALUE))")) // Test hash partition tk.MustExec(`drop table if exists t`) tk.MustExec(`CREATE TABLE t (a int) PARTITION BY HASH(a) PARTITIONS 4`) tk.MustQuery("show create table t").Check(testkit.RowsWithSep("|", "t CREATE TABLE `t` (\n"+ " `a` int(11) DEFAULT NULL\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n"+ "PARTITION BY HASH (`a`) PARTITIONS 4")) // Test show create table compression type. tk.MustExec(`drop table if exists t1`) tk.MustExec(`CREATE TABLE t1 (c1 INT) COMPRESSION="zlib";`) tk.MustQuery("show create table t1").Check(testkit.RowsWithSep("|", "t1 CREATE TABLE `t1` (\n"+ " `c1` int(11) DEFAULT NULL\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMPRESSION='zlib'", )) // Test show create table year type tk.MustExec(`drop table if exists t`) tk.MustExec(`create table t(y year unsigned signed zerofill zerofill, x int, primary key(y) nonclustered);`) tk.MustQuery(`show create table t`).Check(testkit.RowsWithSep("|", "t CREATE TABLE `t` (\n"+ " `y` year(4) NOT NULL,\n"+ " `x` int(11) DEFAULT NULL,\n"+ " PRIMARY KEY (`y`) /*T![clustered_index] NONCLUSTERED */\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) // Test show create table with zerofill flag tk.MustExec(`drop table if exists t`) tk.MustExec(`create table t(id int primary key, val tinyint(10) zerofill);`) tk.MustQuery(`show create table t`).Check(testkit.RowsWithSep("|", "t CREATE TABLE `t` (\n"+ " `id` int(11) NOT NULL,\n"+ " `val` tinyint(10) unsigned zerofill DEFAULT NULL,\n"+ " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */\n"+ ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) // Test show columns with different types of default value tk.MustExec(`drop table if exists t`) tk.MustExec(`create table t( c0 int default 1, c1 int default b'010', c2 bigint default x'A7', c3 bit(8) default b'00110001', c4 varchar(6) default b'00110001', c5 varchar(6) default '\'C6\'', c6 enum('s', 'm', 'l', 'xl') default 'xl', c7 set('a', 'b', 'c', 'd') default 'a,c,c', c8 datetime default current_timestamp on update current_timestamp, c9 year default '2014', c10 enum('2', '3', '4') default 2 );`) tk.MustQuery(`show columns from t`).Check(testkit.RowsWithSep("|", "c0|int(11)|YES||1|", "c1|int(11)|YES||2|", "c2|bigint(20)|YES||167|", "c3|bit(8)|YES||b'110001'|", "c4|varchar(6)|YES||1|", "c5|varchar(6)|YES||'C6'|", "c6|enum('s','m','l','xl')|YES||xl|", "c7|set('a','b','c','d')|YES||a,c|", "c8|datetime|YES||CURRENT_TIMESTAMP|DEFAULT_GENERATED on update CURRENT_TIMESTAMP", "c9|year(4)|YES||2014|", "c10|enum('2','3','4')|YES||3|", )) // Test if 'show [status|variables]' is sorted by Variable_name (#14542) sqls := []string{ "show global status;", "show session status;", "show global variables", "show session variables"} for _, sql := range sqls { res := tk.MustQuery(sql) require.NotNil(t, res) sorted := res.Sort() require.Equal(t, sorted, res) } } func TestShowStatsHealthy(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int)") tk.MustExec("create index idx on t(a)") tk.MustExec("analyze table t") tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 100")) tk.MustExec("insert into t values (1), (2)") do, _ := session.GetDomain(store) err := do.StatsHandle().DumpStatsDeltaToKV(true) require.NoError(t, err) tk.MustExec("analyze table t") tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 100")) tk.MustExec("insert into t values (3), (4), (5), (6), (7), (8), (9), (10)") err = do.StatsHandle().DumpStatsDeltaToKV(true) require.NoError(t, err) err = do.StatsHandle().Update(do.InfoSchema()) require.NoError(t, err) tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 0")) tk.MustExec("analyze table t") tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 100")) tk.MustExec("delete from t") err = do.StatsHandle().DumpStatsDeltaToKV(true) require.NoError(t, err) err = do.StatsHandle().Update(do.InfoSchema()) require.NoError(t, err) tk.MustQuery("show stats_healthy").Check(testkit.Rows("test t 0")) } // TestIndexDoubleReadClose checks that when a index double read returns before reading all the rows, the goroutine doesn't // leak. For testing distsql with multiple regions, we need to manually split a mock TiKV. func TestIndexDoubleReadClose(t *testing.T) { store := testkit.CreateMockStore(t) if _, ok := store.GetClient().(*copr.CopClient); !ok { // Make sure the store is tikv store. return } originSize := atomic.LoadInt32(&executor.LookupTableTaskChannelSize) atomic.StoreInt32(&executor.LookupTableTaskChannelSize, 1) tk := testkit.NewTestKit(t, store) tk.MustExec("set @@tidb_index_lookup_size = '10'") tk.MustExec("use test") tk.MustExec("create table dist (id int primary key, c_idx int, c_col int, index (c_idx))") // Insert 100 rows. var values []string for i := 0; i < 100; i++ { values = append(values, fmt.Sprintf("(%d, %d, %d)", i, i, i)) } tk.MustExec("insert dist values " + strings.Join(values, ",")) rs, err := tk.Exec("select * from dist where c_idx between 0 and 100") require.NoError(t, err) req := rs.NewChunk(nil) err = rs.Next(context.Background(), req) require.NoError(t, err) require.NoError(t, err) keyword := "pickAndExecTask" require.NoError(t, rs.Close()) require.Eventually(t, func() bool { return !checkGoroutineExists(keyword) }, time.Millisecond*100, time.Millisecond*10) atomic.StoreInt32(&executor.LookupTableTaskChannelSize, originSize) } // TestIndexMergeReaderClose checks that when a partial index worker failed to start, the goroutine doesn't // leak. func TestIndexMergeReaderClose(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int, b int)") tk.MustExec("create index idx1 on t(a)") tk.MustExec("create index idx2 on t(b)") require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/startPartialIndexWorkerErr", "return")) err := tk.QueryToErr("select /*+ USE_INDEX_MERGE(t, idx1, idx2) */ * from t where a > 10 or b < 100") require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/executor/startPartialIndexWorkerErr")) require.Error(t, err) require.Eventually(t, func() bool { return !checkGoroutineExists("fetchLoop") }, 5*time.Second, 100*time.Microsecond) require.Eventually(t, func() bool { return !checkGoroutineExists("fetchHandles") }, 5*time.Second, 100*time.Microsecond) require.Eventually(t, func() bool { return !checkGoroutineExists("waitPartialWorkersAndCloseFetchChan") }, 5*time.Second, 100*time.Microsecond) } func TestParallelHashAggClose(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec(`use test`) tk.MustExec(`drop table if exists t`) tk.MustExec("create table t(a int, b int)") tk.MustExec("insert into t values(1,1),(2,2)") // desc select sum(a) from (select cast(t.a as signed) as a, b from t) t group by b // HashAgg_8 | 2.40 | root | group by:t.b, funcs:sum(t.a) // └─Projection_9 | 3.00 | root | cast(test.t.a), test.t.b // └─TableReader_11 | 3.00 | root | data:TableFullScan_10 // └─TableFullScan_10 | 3.00 | cop[tikv] | table:t, keep order:fa$se, stats:pseudo | // Goroutine should not leak when error happen. require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/aggregate/parallelHashAggError", `return(true)`)) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/executor/aggregate/parallelHashAggError")) }() ctx := context.Background() rss, err := tk.Session().Execute(ctx, "select sum(a) from (select cast(t.a as signed) as a, b from t) t group by b;") require.NoError(t, err) rs := rss[0] req := rs.NewChunk(nil) err = rs.Next(ctx, req) require.EqualError(t, err, "HashAggExec.parallelExec error") } func TestUnparallelHashAggClose(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec(`use test`) tk.MustExec(`drop table if exists t`) tk.MustExec("create table t(a int, b int)") tk.MustExec("insert into t values(1,1),(2,2)") // Goroutine should not leak when error happen. require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/aggregate/unparallelHashAggError", `return(true)`)) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/executor/aggregate/unparallelHashAggError")) }() ctx := context.Background() rss, err := tk.Session().Execute(ctx, "select sum(distinct a) from (select cast(t.a as signed) as a, b from t) t group by b;") require.NoError(t, err) rs := rss[0] req := rs.NewChunk(nil) err = rs.Next(ctx, req) require.EqualError(t, err, "HashAggExec.unparallelExec error") } func checkGoroutineExists(keyword string) bool { buf := new(bytes.Buffer) profile := pprof.Lookup("goroutine") err := profile.WriteTo(buf, 1) if err != nil { panic(err) } str := buf.String() return strings.Contains(str, keyword) } func TestAdminShowNextID(t *testing.T) { store := testkit.CreateMockStore(t) HelperTestAdminShowNextID(t, store, `admin show `) HelperTestAdminShowNextID(t, store, `show table `) } func HelperTestAdminShowNextID(t *testing.T, store kv.Storage, str string) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/meta/autoid/mockAutoIDChange", `return(true)`)) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/meta/autoid/mockAutoIDChange")) }() step := int64(10) autoIDStep := autoid.GetStep() autoid.SetStep(step) defer autoid.SetStep(autoIDStep) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t,tt") tk.MustExec("create table t(id int, c int)") // Start handle is 1. r := tk.MustQuery(str + " t next_row_id") r.Check(testkit.Rows("test t _tidb_rowid 1 _TIDB_ROWID")) // Row ID is step + 1. tk.MustExec("insert into t values(1, 1)") r = tk.MustQuery(str + " t next_row_id") r.Check(testkit.Rows("test t _tidb_rowid 11 _TIDB_ROWID")) // Row ID is original + step. for i := 0; i < int(step); i++ { tk.MustExec("insert into t values(10000, 1)") } r = tk.MustQuery(str + " t next_row_id") r.Check(testkit.Rows("test t _tidb_rowid 21 _TIDB_ROWID")) tk.MustExec("drop table t") // test for a table with the primary key tk.MustExec("create table tt(id int primary key auto_increment, c int)") // Start handle is 1. r = tk.MustQuery(str + " tt next_row_id") r.Check(testkit.Rows("test tt id 1 _TIDB_ROWID", "test tt id 1 AUTO_INCREMENT")) // After rebasing auto ID, row ID is 20 + step + 1. tk.MustExec("insert into tt values(20, 1)") r = tk.MustQuery(str + " tt next_row_id") r.Check(testkit.Rows("test tt id 31 _TIDB_ROWID", "test tt id 1 AUTO_INCREMENT")) // test for renaming the table tk.MustExec("drop database if exists test1") tk.MustExec("create database test1") tk.MustExec("rename table test.tt to test1.tt") tk.MustExec("use test1") r = tk.MustQuery(str + " tt next_row_id") r.Check(testkit.Rows("test1 tt id 31 _TIDB_ROWID", "test1 tt id 1 AUTO_INCREMENT")) tk.MustQuery(`select * from tt`).Sort().Check(testkit.Rows("20 1")) tk.MustExec("insert test1.tt values ()") r = tk.MustQuery(str + " tt next_row_id") r.Check(testkit.Rows("test1 tt id 31 _TIDB_ROWID", "test1 tt id 1 AUTO_INCREMENT")) tk.MustQuery(`select * from tt`).Sort().Check(testkit.Rows("20 1", "21 <nil>")) tk.MustExec("drop table tt") tk.MustExec("drop table if exists t;") tk.MustExec("create table t (a int auto_increment primary key nonclustered, b int);") tk.MustQuery("show table t next_row_id;").Check(testkit.Rows( "test1 t _tidb_rowid 1 _TIDB_ROWID", "test1 t _tidb_rowid 1 AUTO_INCREMENT")) tk.MustExec("set @@allow_auto_random_explicit_insert = true") // Test for a table with auto_random primary key. tk.MustExec("create table t3(id bigint primary key clustered auto_random(5), c int)") // Start handle is 1. r = tk.MustQuery(str + " t3 next_row_id") r.Check(testkit.Rows("test1 t3 id 1 AUTO_RANDOM")) // Insert some rows. tk.MustExec("insert into t3 (c) values (1), (2);") r = tk.MustQuery(str + " t3 next_row_id") r.Check(testkit.Rows("test1 t3 id 11 AUTO_RANDOM")) // Rebase. tk.MustExec("insert into t3 (id, c) values (103, 3);") r = tk.MustQuery(str + " t3 next_row_id") r.Check(testkit.Rows("test1 t3 id 114 AUTO_RANDOM")) // Test for a sequence. tk.MustExec("create sequence seq1 start 15 cache 57") r = tk.MustQuery(str + " seq1 next_row_id") r.Check(testkit.Rows("test1 seq1 _tidb_rowid 1 _TIDB_ROWID", "test1 seq1 15 SEQUENCE")) r = tk.MustQuery("select nextval(seq1)") r.Check(testkit.Rows("15")) r = tk.MustQuery(str + " seq1 next_row_id") r.Check(testkit.Rows("test1 seq1 _tidb_rowid 1 _TIDB_ROWID", "test1 seq1 72 SEQUENCE")) r = tk.MustQuery("select nextval(seq1)") r.Check(testkit.Rows("16")) r = tk.MustQuery(str + " seq1 next_row_id") r.Check(testkit.Rows("test1 seq1 _tidb_rowid 1 _TIDB_ROWID", "test1 seq1 72 SEQUENCE")) r = tk.MustQuery("select setval(seq1, 96)") r.Check(testkit.Rows("96")) r = tk.MustQuery(str + " seq1 next_row_id") r.Check(testkit.Rows("test1 seq1 _tidb_rowid 1 _TIDB_ROWID", "test1 seq1 97 SEQUENCE")) } func TestNoHistoryWhenDisableRetry(t *testing.T) { store := testkit.CreateMockStore(t) setTxnTk := testkit.NewTestKit(t, store) setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists history") tk.MustExec("create table history (a int)") tk.MustExec("set @@autocommit = 0") // retry_limit = 0 will not add history. tk.MustExec("set @@tidb_retry_limit = 0") tk.MustExec("insert history values (1)") require.Equal(t, 0, session.GetHistory(tk.Session()).Count()) // Disable auto_retry will add history for auto committed only tk.MustExec("set @@autocommit = 1") tk.MustExec("set @@tidb_retry_limit = 10") tk.MustExec("set @@tidb_disable_txn_auto_retry = 1") require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/session/keepHistory", `return(true)`)) tk.MustExec("insert history values (1)") require.Equal(t, 1, session.GetHistory(tk.Session()).Count()) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/session/keepHistory")) tk.MustExec("begin") tk.MustExec("insert history values (1)") require.Equal(t, 0, session.GetHistory(tk.Session()).Count()) tk.MustExec("commit") // Enable auto_retry will add history for both. tk.MustExec("set @@tidb_disable_txn_auto_retry = 0") require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/session/keepHistory", `return(true)`)) tk.MustExec("insert history values (1)") require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/session/keepHistory")) require.Equal(t, 1, session.GetHistory(tk.Session()).Count()) tk.MustExec("begin") tk.MustExec("insert history values (1)") require.Equal(t, 2, session.GetHistory(tk.Session()).Count()) tk.MustExec("commit") } func TestPrepareMaxParamCountCheck(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (v int)") normalSQL, normalParams := generateBatchSQL(math.MaxUint16) _, err := tk.Exec(normalSQL, normalParams...) require.NoError(t, err) bigSQL, bigParams := generateBatchSQL(math.MaxUint16 + 2) err = tk.ExecToErr(bigSQL, bigParams...) require.Error(t, err) require.EqualError(t, err, "[executor:1390]Prepared statement contains too many placeholders") } func generateBatchSQL(paramCount int) (sql string, paramSlice []interface{}) { params := make([]interface{}, 0, paramCount) placeholders := make([]string, 0, paramCount) for i := 0; i < paramCount; i++ { params = append(params, i) placeholders = append(placeholders, "(?)") } return "insert into t values " + strings.Join(placeholders, ","), params } func TestCartesianProduct(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(c1 int)") plannercore.AllowCartesianProduct.Store(false) err := tk.ExecToErr("select * from t t1, t t2") require.True(t, plannercore.ErrCartesianProductUnsupported.Equal(err)) err = tk.ExecToErr("select * from t t1 left join t t2 on 1") require.True(t, plannercore.ErrCartesianProductUnsupported.Equal(err)) err = tk.ExecToErr("select * from t t1 right join t t2 on 1") require.True(t, plannercore.ErrCartesianProductUnsupported.Equal(err)) plannercore.AllowCartesianProduct.Store(true) } func TestBatchInsertDelete(t *testing.T) { store := testkit.CreateMockStore(t) originLimit := kv.TxnTotalSizeLimit.Load() defer func() { kv.TxnTotalSizeLimit.Store(originLimit) }() // Set the limitation to a small value, make it easier to reach the limitation. kv.TxnTotalSizeLimit.Store(6000) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists batch_insert") tk.MustExec("create table batch_insert (c int)") tk.MustExec("drop table if exists batch_insert_on_duplicate") tk.MustExec("create table batch_insert_on_duplicate (id int primary key, c int)") // Insert 10 rows. tk.MustExec("insert into batch_insert values (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)") r := tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("10")) // Insert 10 rows. tk.MustExec("insert into batch_insert (c) select * from batch_insert;") r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("20")) // Insert 20 rows. tk.MustExec("insert into batch_insert (c) select * from batch_insert;") r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("40")) // Insert 40 rows. tk.MustExec("insert into batch_insert (c) select * from batch_insert;") r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("80")) // Insert 80 rows. tk.MustExec("insert into batch_insert (c) select * from batch_insert;") r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("160")) tk.MustExec("insert into batch_insert (c) select * from batch_insert;") r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("320")) // for on duplicate key for i := 0; i < 320; i++ { tk.MustExec(fmt.Sprintf("insert into batch_insert_on_duplicate values(%d, %d);", i, i)) } r = tk.MustQuery("select count(*) from batch_insert_on_duplicate;") r.Check(testkit.Rows("320")) // This will meet txn too large error. _, err := tk.Exec("insert into batch_insert (c) select * from batch_insert;") require.Error(t, err) require.True(t, kv.ErrTxnTooLarge.Equal(err)) r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("320")) // Test tidb_batch_insert could not work if enable-batch-dml is disabled. tk.MustExec("set @@session.tidb_batch_insert=1;") tk.MustGetErrCode("insert into batch_insert (c) select * from batch_insert;", errno.ErrTxnTooLarge) tk.MustExec("set @@session.tidb_batch_insert=0;") // for on duplicate key tk.MustGetErrCode(`insert into batch_insert_on_duplicate select * from batch_insert_on_duplicate as tt on duplicate key update batch_insert_on_duplicate.id=batch_insert_on_duplicate.id+1000;`, errno.ErrTxnTooLarge) r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("320")) tk.MustExec("SET GLOBAL tidb_enable_batch_dml = 1") defer tk.MustExec("SET GLOBAL tidb_enable_batch_dml = 0") // Change to batch insert mode and batch size to 50. tk.MustExec("set @@session.tidb_batch_insert=1;") tk.MustExec("set @@session.tidb_dml_batch_size=50;") tk.MustExec("insert into batch_insert (c) select * from batch_insert;") r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("640")) // Enlarge the batch size to 150 which is larger than the txn limitation (100). // So the insert will meet error. tk.MustExec("set @@session.tidb_dml_batch_size=600;") _, err = tk.Exec("insert into batch_insert (c) select * from batch_insert;") require.Error(t, err) require.True(t, kv.ErrTxnTooLarge.Equal(err)) r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("640")) // Set it back to 50. tk.MustExec("set @@session.tidb_dml_batch_size=50;") // for on duplicate key tk.MustExec(`insert into batch_insert_on_duplicate select * from batch_insert_on_duplicate as tt on duplicate key update batch_insert_on_duplicate.id=batch_insert_on_duplicate.id+1000;`) r = tk.MustQuery("select count(*) from batch_insert_on_duplicate;") r.Check(testkit.Rows("320")) // Disable BachInsert mode in transition. tk.MustExec("begin;") tk.MustGetErrCode("insert into batch_insert (c) select * from batch_insert;", errno.ErrTxnTooLarge) tk.MustExec("rollback;") r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("640")) tk.MustExec("drop table if exists com_batch_insert") tk.MustExec("create table com_batch_insert (c int)") sql := "insert into com_batch_insert values " values := make([]string, 0, 200) for i := 0; i < 200; i++ { values = append(values, "(1)") } sql = sql + strings.Join(values, ",") tk.MustExec(sql) tk.MustQuery("select count(*) from com_batch_insert;").Check(testkit.Rows("200")) // Test case for batch delete. // This will meet txn too large error. tk.MustGetErrCode("delete from batch_insert;", errno.ErrTxnTooLarge) r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("640")) // Enable batch delete and set batch size to 50. tk.MustExec("set @@session.tidb_batch_delete=on;") tk.MustExec("set @@session.tidb_dml_batch_size=50;") tk.MustExec("delete from batch_insert;") // Make sure that all rows are gone. r = tk.MustQuery("select count(*) from batch_insert;") r.Check(testkit.Rows("0")) } type checkPrioClient struct { tikv.Client priority kvrpcpb.CommandPri mu struct { sync.RWMutex checkPrio bool } } func (c *checkPrioClient) setCheckPriority(priority kvrpcpb.CommandPri) { atomic.StoreInt32((*int32)(&c.priority), int32(priority)) } func (c *checkPrioClient) getCheckPriority() kvrpcpb.CommandPri { return (kvrpcpb.CommandPri)(atomic.LoadInt32((*int32)(&c.priority))) } func (c *checkPrioClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { resp, err := c.Client.SendRequest(ctx, addr, req, timeout) c.mu.RLock() defer func() { c.mu.RUnlock() }() if c.mu.checkPrio { switch req.Type { case tikvrpc.CmdCop: if ctx.Value(c) != nil { if c.getCheckPriority() != req.Priority { return nil, errors.New("fail to set priority") } } } } return resp, err } func TestCoprocessorPriority(t *testing.T) { cli := &checkPrioClient{} store := testkit.CreateMockStore(t, mockstore.WithClientHijacker(func(c tikv.Client) tikv.Client { cli.Client = c return cli })) ctx := context.WithValue(context.Background(), cli, 42) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table t (id int primary key)") tk.MustExec("create table t1 (id int, v int, unique index i_id (id))") defer tk.MustExec("drop table t") defer tk.MustExec("drop table t1") tk.MustExec("insert into t values (1)") // Insert some data to make sure plan build IndexLookup for t1. for i := 0; i < 10; i++ { tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i)) } cli.mu.Lock() cli.mu.checkPrio = true cli.mu.Unlock() cli.setCheckPriority(kvrpcpb.CommandPri_High) tk.MustQueryWithContext(ctx, "select id from t where id = 1") tk.MustQueryWithContext(ctx, "select * from t1 where id = 1") cli.setCheckPriority(kvrpcpb.CommandPri_Normal) tk.MustQueryWithContext(ctx, "select count(*) from t") tk.MustExecWithContext(ctx, "update t set id = 3") tk.MustExecWithContext(ctx, "delete from t") tk.MustExecWithContext(ctx, "insert into t select * from t limit 2") tk.MustExecWithContext(ctx, "delete from t") // Insert some data to make sure plan build IndexLookup for t. tk.MustExecWithContext(ctx, "insert into t values (1), (2)") defer config.RestoreFunc()() config.UpdateGlobal(func(conf *config.Config) { conf.Log.ExpensiveThreshold = 0 }) cli.setCheckPriority(kvrpcpb.CommandPri_High) tk.MustQueryWithContext(ctx, "select id from t where id = 1") tk.MustQueryWithContext(ctx, "select * from t1 where id = 1") tk.MustExecWithContext(ctx, "delete from t where id = 2") tk.MustExecWithContext(ctx, "update t set id = 2 where id = 1") cli.setCheckPriority(kvrpcpb.CommandPri_Low) tk.MustQueryWithContext(ctx, "select count(*) from t") tk.MustExecWithContext(ctx, "delete from t") tk.MustExecWithContext(ctx, "insert into t values (3)") // Test priority specified by SQL statement. cli.setCheckPriority(kvrpcpb.CommandPri_High) tk.MustQueryWithContext(ctx, "select HIGH_PRIORITY * from t") cli.setCheckPriority(kvrpcpb.CommandPri_Low) tk.MustQueryWithContext(ctx, "select LOW_PRIORITY id from t where id = 1") cli.setCheckPriority(kvrpcpb.CommandPri_High) tk.MustExecWithContext(ctx, "set tidb_force_priority = 'HIGH_PRIORITY'") tk.MustQueryWithContext(ctx, "select * from t").Check(testkit.Rows("3")) tk.MustExecWithContext(ctx, "update t set id = id + 1") tk.MustQueryWithContext(ctx, "select v from t1 where id = 0 or id = 1").Check(testkit.Rows("0", "1")) cli.setCheckPriority(kvrpcpb.CommandPri_Low) tk.MustExecWithContext(ctx, "set tidb_force_priority = 'LOW_PRIORITY'") tk.MustQueryWithContext(ctx, "select * from t").Check(testkit.Rows("4")) tk.MustExecWithContext(ctx, "update t set id = id + 1") tk.MustQueryWithContext(ctx, "select v from t1 where id = 0 or id = 1").Check(testkit.Rows("0", "1")) cli.setCheckPriority(kvrpcpb.CommandPri_Normal) tk.MustExecWithContext(ctx, "set tidb_force_priority = 'DELAYED'") tk.MustQueryWithContext(ctx, "select * from t").Check(testkit.Rows("5")) tk.MustExecWithContext(ctx, "update t set id = id + 1") tk.MustQueryWithContext(ctx, "select v from t1 where id = 0 or id = 1").Check(testkit.Rows("0", "1")) cli.setCheckPriority(kvrpcpb.CommandPri_Low) tk.MustExecWithContext(ctx, "set tidb_force_priority = 'NO_PRIORITY'") tk.MustQueryWithContext(ctx, "select * from t").Check(testkit.Rows("6")) tk.MustExecWithContext(ctx, "update t set id = id + 1") tk.MustQueryWithContext(ctx, "select v from t1 where id = 0 or id = 1").Check(testkit.Rows("0", "1")) cli.mu.Lock() cli.mu.checkPrio = false cli.mu.Unlock() } func TestAutoIncIDInRetry(t *testing.T) { store := testkit.CreateMockStore(t) setTxnTk := testkit.NewTestKit(t, store) setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t;") tk.MustExec("create table t (id int not null auto_increment primary key)") tk.MustExec("set @@tidb_disable_txn_auto_retry = 0") tk.MustExec("begin") tk.MustExec("insert into t values ()") tk.MustExec("insert into t values (),()") tk.MustExec("insert into t values ()") require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/session/mockCommitRetryForAutoIncID", `return(true)`)) tk.MustExec("commit") require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/session/mockCommitRetryForAutoIncID")) tk.MustExec("insert into t values ()") tk.MustQuery(`select * from t`).Check(testkit.Rows("1", "2", "3", "4", "5")) } func TestPessimisticConflictRetryAutoID(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t;") tk.MustExec("create table t (id int not null auto_increment unique key, idx int unique key, c int);") concurrency := 2 var wg sync.WaitGroup var err []error wg.Add(concurrency) err = make([]error, concurrency) for i := 0; i < concurrency; i++ { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("set tidb_txn_mode = 'pessimistic'") tk.MustExec("set autocommit = 1") go func(idx int) { for i := 0; i < 10; i++ { sql := fmt.Sprintf("insert into t(idx, c) values (1, %[1]d) on duplicate key update c = %[1]d", i) _, e := tk.Exec(sql) if e != nil { err[idx] = e wg.Done() return } } wg.Done() }(i) } wg.Wait() for _, e := range err { require.NoError(t, e) } } func TestInsertFromSelectConflictRetryAutoID(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t;") tk.MustExec("create table t (id int not null auto_increment unique key, idx int unique key, c int);") tk.MustExec("create table src (a int);") concurrency := 2 var wg sync.WaitGroup var err []error wgCount := concurrency + 1 wg.Add(wgCount) err = make([]error, concurrency) for i := 0; i < concurrency; i++ { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") go func(idx int) { for i := 0; i < 10; i++ { sql := fmt.Sprintf("insert into t(idx, c) select 1 as idx, 1 as c from src on duplicate key update c = %[1]d", i) _, e := tk.Exec(sql) if e != nil { err[idx] = e wg.Done() return } } wg.Done() }(i) } var insertErr error go func() { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") for i := 0; i < 10; i++ { _, e := tk.Exec("insert into src values (null);") if e != nil { insertErr = e wg.Done() return } } wg.Done() }() wg.Wait() for _, e := range err { require.NoError(t, e) } require.NoError(t, insertErr) } func TestAutoRandIDRetry(t *testing.T) { store := testkit.CreateMockStore(t) setTxnTk := testkit.NewTestKit(t, store) setTxnTk.MustExec("set global tidb_txn_mode=''") tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create database if not exists auto_random_retry") tk.MustExec("use auto_random_retry") tk.MustExec("drop table if exists t") tk.MustExec("create table t (id bigint auto_random(3) primary key clustered)") extractMaskedOrderedHandles := func() []int64 { handles, err := ddltestutil.ExtractAllTableHandles(tk.Session(), "auto_random_retry", "t") require.NoError(t, err) return testutil.MaskSortHandles(handles, 3, mysql.TypeLong) } tk.MustExec("set @@tidb_disable_txn_auto_retry = 0") tk.MustExec("set @@tidb_retry_limit = 10") tk.MustExec("begin") tk.MustExec("insert into t values ()") tk.MustExec("insert into t values (),()") tk.MustExec("insert into t values ()") session.ResetMockAutoRandIDRetryCount(5) fpName := "github.com/pingcap/tidb/pkg/session/mockCommitRetryForAutoRandID" require.NoError(t, failpoint.Enable(fpName, `return(true)`)) tk.MustExec("commit") require.NoError(t, failpoint.Disable(fpName)) tk.MustExec("insert into t values ()") maskedHandles := extractMaskedOrderedHandles() require.Equal(t, []int64{1, 2, 3, 4, 5}, maskedHandles) session.ResetMockAutoRandIDRetryCount(11) tk.MustExec("begin") tk.MustExec("insert into t values ()") require.NoError(t, failpoint.Enable(fpName, `return(true)`)) // Insertion failure will skip the 6 in retryInfo. tk.MustGetErrCode("commit", errno.ErrTxnRetryable) require.NoError(t, failpoint.Disable(fpName)) tk.MustExec("insert into t values ()") maskedHandles = extractMaskedOrderedHandles() require.Equal(t, []int64{1, 2, 3, 4, 5, 7}, maskedHandles) } func TestAutoRandRecoverTable(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("create database if not exists test_recover") tk.MustExec("use test_recover") tk.MustExec("drop table if exists t_recover_auto_rand") defer func(originGC bool) { if originGC { util.EmulatorGCEnable() } else { util.EmulatorGCDisable() } }(util.IsEmulatorGCEnable()) // Disable emulator GC. // Otherwise, emulator GC will delete table record as soon as possible after execute drop table ddl. util.EmulatorGCDisable() gcTimeFormat := "20060102-15:04:05 -0700 MST" timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat) safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '') ON DUPLICATE KEY UPDATE variable_value = '%[1]s'` // Set GC safe point. tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) err := gcutil.EnableGC(tk.Session()) require.NoError(t, err) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/meta/autoid/mockAutoIDChange", `return(true)`)) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/meta/autoid/mockAutoIDChange")) }() const autoRandIDStep = 5000 stp := autoid.GetStep() autoid.SetStep(autoRandIDStep) defer autoid.SetStep(stp) // Check rebase auto_random id. tk.MustExec("create table t_recover_auto_rand (a bigint auto_random(5) primary key clustered);") tk.MustExec("insert into t_recover_auto_rand values (),(),()") tk.MustExec("drop table t_recover_auto_rand") tk.MustExec("recover table t_recover_auto_rand") tk.MustExec("insert into t_recover_auto_rand values (),(),()") hs, err := ddltestutil.ExtractAllTableHandles(tk.Session(), "test_recover", "t_recover_auto_rand") require.NoError(t, err) ordered := testutil.MaskSortHandles(hs, 5, mysql.TypeLong) require.Equal(t, []int64{1, 2, 3, autoRandIDStep + 1, autoRandIDStep + 2, autoRandIDStep + 3}, ordered) } func TestOOMPanicInHashJoinWhenFetchBuildRows(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(c1 int, c2 int)") tk.MustExec("insert into t values(1,1),(2,2)") fpName := "github.com/pingcap/tidb/pkg/executor/errorFetchBuildSideRowsMockOOMPanic" require.NoError(t, failpoint.Enable(fpName, `panic("ERROR 1105 (HY000): Out Of Memory Quota![conn=1]")`)) defer func() { require.NoError(t, failpoint.Disable(fpName)) }() err := tk.QueryToErr("select * from t as t2 join t as t1 where t1.c1=t2.c1") require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn=1]") } func TestIssue18744(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec(`use test;`) tk.MustExec(`drop table if exists t, t1;`) tk.MustExec(`CREATE TABLE t ( id int(11) NOT NULL, a bigint(20) DEFAULT NULL, b char(20) DEFAULT NULL, c datetime DEFAULT NULL, d double DEFAULT NULL, e json DEFAULT NULL, f decimal(40,6) DEFAULT NULL, PRIMARY KEY (id), KEY a (a), KEY b (b), KEY c (c), KEY d (d), KEY f (f) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;`) tk.MustExec(`CREATE TABLE t1 ( id int(11) NOT NULL, a bigint(20) DEFAULT NULL, b char(20) DEFAULT NULL, c datetime DEFAULT NULL, d double DEFAULT NULL, e json DEFAULT NULL, f decimal(40,6) DEFAULT NULL, PRIMARY KEY (id), KEY a (a), KEY b (b), KEY c (c), KEY d (d), KEY f (f) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;`) tk.MustExec(`insert into t1(id) values(0),(1),(2);`) tk.MustExec(`insert into t values(0, 2010, "2010-01-01 01:01:00" , "2010-01-01 01:01:00" , 2010 , 2010 , 2010.000000);`) tk.MustExec(`insert into t values(1 , NULL , NULL , NULL , NULL , NULL , NULL);`) tk.MustExec(`insert into t values(2 , 2012 , "2012-01-01 01:01:00" , "2012-01-01 01:01:00" , 2012 , 2012 , 2012.000000);`) tk.MustExec(`set tidb_index_lookup_join_concurrency=1`) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/testIndexHashJoinOuterWorkerErr", "return")) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/executor/testIndexHashJoinOuterWorkerErr")) }() err := tk.QueryToErr(`select /*+ inl_hash_join(t2) */ t1.id, t2.id from t1 join t t2 on t1.a = t2.a order by t1.a ASC limit 1;`) require.EqualError(t, err, "mockIndexHashJoinOuterWorkerErr") } func TestAnalyzeNextRawErrorNoLeak(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t1") tk.MustExec("create table t1(id int, c varchar(32))") tk.MustExec("set @@session.tidb_analyze_version = 2") require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/distsql/mockNextRawError", `return(true)`)) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/distsql/mockNextRawError")) }() tk.MustGetErrMsg("analyze table t1", "mockNextRawError") }
pkg/executor/test/seqtest/seq_executor_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.017428116872906685, 0.0004494450113270432, 0.00016053601575549692, 0.00017133596702478826, 0.001660287962295115 ]
{ "id": 5, "code_window": [ "\n", "\treturn ctx\n", "}\n", "\n", "// TypeFlags returns the type flags\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "// ErrCtx returns the error context\n", "func (sc *StatementContext) ErrCtx() errctx.Context {\n", "\treturn sc.errCtx\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "replace", "edit_start_line_idx": 492 }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stmtctx_test import ( "context" "encoding/json" "fmt" "math/rand" "reflect" "sort" "testing" "time" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/util" ) func TestCopTasksDetails(t *testing.T) { ctx := stmtctx.NewStmtCtx() backoffs := []string{"tikvRPC", "pdRPC", "regionMiss"} for i := 0; i < 100; i++ { d := &execdetails.ExecDetails{ DetailsNeedP90: execdetails.DetailsNeedP90{ CalleeAddress: fmt.Sprintf("%v", i+1), BackoffSleep: make(map[string]time.Duration), BackoffTimes: make(map[string]int), TimeDetail: util.TimeDetail{ ProcessTime: time.Second * time.Duration(i+1), WaitTime: time.Millisecond * time.Duration(i+1), }, }, } for _, backoff := range backoffs { d.BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(i+1) d.BackoffTimes[backoff] = i + 1 } ctx.MergeExecDetails(d, nil) } d := ctx.CopTasksDetails() require.Equal(t, 100, d.NumCopTasks) require.Equal(t, time.Second*101/2, d.AvgProcessTime) require.Equal(t, time.Second*91, d.P90ProcessTime) require.Equal(t, time.Second*100, d.MaxProcessTime) require.Equal(t, "100", d.MaxProcessAddress) require.Equal(t, time.Millisecond*101/2, d.AvgWaitTime) require.Equal(t, time.Millisecond*91, d.P90WaitTime) require.Equal(t, time.Millisecond*100, d.MaxWaitTime) require.Equal(t, "100", d.MaxWaitAddress) fields := d.ToZapFields() require.Equal(t, 9, len(fields)) for _, backoff := range backoffs { require.Equal(t, "100", d.MaxBackoffAddress[backoff]) require.Equal(t, 100*time.Millisecond*100, d.MaxBackoffTime[backoff]) require.Equal(t, time.Millisecond*100*91, d.P90BackoffTime[backoff]) require.Equal(t, time.Millisecond*100*101/2, d.AvgBackoffTime[backoff]) require.Equal(t, 101*50, d.TotBackoffTimes[backoff]) require.Equal(t, 101*50*100*time.Millisecond, d.TotBackoffTime[backoff]) } } func TestStatementContextPushDownFLags(t *testing.T) { newStmtCtx := func(fn func(*stmtctx.StatementContext)) *stmtctx.StatementContext { sc := stmtctx.NewStmtCtx() fn(sc) return sc } testCases := []struct { in *stmtctx.StatementContext out uint64 }{ {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InInsertStmt = true }), 8}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InUpdateStmt = true }), 16}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InDeleteStmt = true }), 16}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InSelectStmt = true }), 32}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) }), 1}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) }), 66}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) }), 128}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.DividedByZeroAsWarning = true }), 256}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InLoadDataStmt = true }), 1024}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InSelectStmt = true sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) }), 98}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.DividedByZeroAsWarning = true sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) }), 257}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InUpdateStmt = true sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) sc.InLoadDataStmt = true }), 1168}, } for _, tt := range testCases { got := tt.in.PushDownFlags() require.Equal(t, tt.out, got) } } func TestWeakConsistencyRead(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(id int primary key, c int, c1 int, unique index i(c))") execAndCheck := func(sql string, rows [][]interface{}, isolationLevel kv.IsoLevel) { ctx := context.WithValue(context.Background(), "CheckSelectRequestHook", func(req *kv.Request) { require.Equal(t, req.IsolationLevel, isolationLevel) }) rss, err := tk.Session().Execute(ctx, sql) require.Nil(t, err) for _, rs := range rss { rs.Close() } if rows != nil { tk.MustQuery(sql).Check(rows) } lastWeakConsistency := tk.Session().GetSessionVars().StmtCtx.WeakConsistency require.Equal(t, lastWeakConsistency, isolationLevel == kv.RC) } // strict execAndCheck("insert into t values(1, 1, 1)", nil, kv.SI) execAndCheck("select * from t", testkit.Rows("1 1 1"), kv.SI) tk.MustExec("prepare s from 'select * from t'") tk.MustExec("prepare u from 'update t set c1 = id + 1'") execAndCheck("execute s", testkit.Rows("1 1 1"), kv.SI) execAndCheck("execute u", nil, kv.SI) execAndCheck("admin check table t", nil, kv.SI) // weak tk.MustExec("set tidb_read_consistency = weak") execAndCheck("insert into t values(2, 2, 2)", nil, kv.SI) execAndCheck("select * from t", testkit.Rows("1 1 2", "2 2 2"), kv.RC) execAndCheck("execute s", testkit.Rows("1 1 2", "2 2 2"), kv.RC) execAndCheck("execute u", nil, kv.SI) // non-read-only queries should be strict execAndCheck("admin check table t", nil, kv.SI) execAndCheck("update t set c = c + 1 where id = 2", nil, kv.SI) execAndCheck("delete from t where id = 2", nil, kv.SI) // in-transaction queries should be strict tk.MustExec("begin") execAndCheck("select * from t", testkit.Rows("1 1 2"), kv.SI) execAndCheck("execute s", testkit.Rows("1 1 2"), kv.SI) tk.MustExec("rollback") } func TestMarshalSQLWarn(t *testing.T) { warns := []stmtctx.SQLWarn{ { Level: stmtctx.WarnLevelError, Err: errors.New("any error"), }, { Level: stmtctx.WarnLevelError, Err: errors.Trace(errors.New("any error")), }, { Level: stmtctx.WarnLevelWarning, Err: variable.ErrUnknownSystemVar.GenWithStackByArgs("unknown"), }, { Level: stmtctx.WarnLevelWarning, Err: errors.Trace(variable.ErrUnknownSystemVar.GenWithStackByArgs("unknown")), }, } store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) // First query can trigger loading global variables, which produces warnings. tk.MustQuery("select 1") tk.Session().GetSessionVars().StmtCtx.SetWarnings(warns) rows := tk.MustQuery("show warnings").Rows() require.Equal(t, len(warns), len(rows)) // The unmarshalled result doesn't need to be exactly the same with the original one. // We only need that the results of `show warnings` are the same. bytes, err := json.Marshal(warns) require.NoError(t, err) var newWarns []stmtctx.SQLWarn err = json.Unmarshal(bytes, &newWarns) require.NoError(t, err) tk.Session().GetSessionVars().StmtCtx.SetWarnings(newWarns) tk.MustQuery("show warnings").Check(rows) } func TestApproxRuntimeInfo(t *testing.T) { var n = rand.Intn(19000) + 1000 var valRange = rand.Int31n(10000) + 1000 backoffs := []string{"tikvRPC", "pdRPC", "regionMiss"} details := []*execdetails.ExecDetails{} for i := 0; i < n; i++ { d := &execdetails.ExecDetails{ DetailsNeedP90: execdetails.DetailsNeedP90{ CalleeAddress: fmt.Sprintf("%v", i+1), BackoffSleep: make(map[string]time.Duration), BackoffTimes: make(map[string]int), TimeDetail: util.TimeDetail{ ProcessTime: time.Second * time.Duration(rand.Int31n(valRange)), WaitTime: time.Millisecond * time.Duration(rand.Int31n(valRange)), }, }, } details = append(details, d) for _, backoff := range backoffs { d.BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(rand.Int31n(valRange)) d.BackoffTimes[backoff] = rand.Intn(int(valRange)) } } // Make CalleeAddress for each max value is deterministic. details[rand.Intn(n)].DetailsNeedP90.TimeDetail.ProcessTime = time.Second * time.Duration(valRange) details[rand.Intn(n)].DetailsNeedP90.TimeDetail.WaitTime = time.Millisecond * time.Duration(valRange) for _, backoff := range backoffs { details[rand.Intn(n)].BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(valRange) } ctx := stmtctx.NewStmtCtx() for i := 0; i < n; i++ { ctx.MergeExecDetails(details[i], nil) } d := ctx.CopTasksDetails() require.Equal(t, d.NumCopTasks, n) sort.Slice(details, func(i, j int) bool { return details[i].TimeDetail.ProcessTime.Nanoseconds() < details[j].TimeDetail.ProcessTime.Nanoseconds() }) var timeSum time.Duration for _, detail := range details { timeSum += detail.TimeDetail.ProcessTime } require.Equal(t, d.AvgProcessTime, timeSum/time.Duration(n)) require.InEpsilon(t, d.P90ProcessTime.Nanoseconds(), details[n*9/10].TimeDetail.ProcessTime.Nanoseconds(), 0.05) require.Equal(t, d.MaxProcessTime, details[n-1].TimeDetail.ProcessTime) require.Equal(t, d.MaxProcessAddress, details[n-1].CalleeAddress) sort.Slice(details, func(i, j int) bool { return details[i].TimeDetail.WaitTime.Nanoseconds() < details[j].TimeDetail.WaitTime.Nanoseconds() }) timeSum = 0 for _, detail := range details { timeSum += detail.TimeDetail.WaitTime } require.Equal(t, d.AvgWaitTime, timeSum/time.Duration(n)) require.InEpsilon(t, d.P90WaitTime.Nanoseconds(), details[n*9/10].TimeDetail.WaitTime.Nanoseconds(), 0.05) require.Equal(t, d.MaxWaitTime, details[n-1].TimeDetail.WaitTime) require.Equal(t, d.MaxWaitAddress, details[n-1].CalleeAddress) fields := d.ToZapFields() require.Equal(t, 9, len(fields)) for _, backoff := range backoffs { sort.Slice(details, func(i, j int) bool { return details[i].BackoffSleep[backoff].Nanoseconds() < details[j].BackoffSleep[backoff].Nanoseconds() }) timeSum = 0 var timesSum = 0 for _, detail := range details { timeSum += detail.BackoffSleep[backoff] timesSum += detail.BackoffTimes[backoff] } require.Equal(t, d.MaxBackoffAddress[backoff], details[n-1].CalleeAddress) require.Equal(t, d.MaxBackoffTime[backoff], details[n-1].BackoffSleep[backoff]) require.InEpsilon(t, d.P90BackoffTime[backoff], details[n*9/10].BackoffSleep[backoff], 0.1) require.Equal(t, d.AvgBackoffTime[backoff], timeSum/time.Duration(n)) require.Equal(t, d.TotBackoffTimes[backoff], timesSum) require.Equal(t, d.TotBackoffTime[backoff], timeSum) } } func TestStmtHintsClone(t *testing.T) { hints := stmtctx.StmtHints{} value := reflect.ValueOf(&hints).Elem() for i := 0; i < value.NumField(); i++ { field := value.Field(i) switch field.Kind() { case reflect.Int, reflect.Int32, reflect.Int64: field.SetInt(1) case reflect.Uint, reflect.Uint32, reflect.Uint64: field.SetUint(1) case reflect.Uint8: // byte field.SetUint(1) case reflect.Bool: field.SetBool(true) case reflect.String: field.SetString("test") default: } } require.Equal(t, hints, *hints.Clone()) } func TestNewStmtCtx(t *testing.T) { sc := stmtctx.NewStmtCtx() require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.Same(t, time.UTC, sc.TimeZone()) require.Same(t, time.UTC, sc.TimeZone()) sc.AppendWarning(errors.New("err1")) warnings := sc.GetWarnings() require.Equal(t, 1, len(warnings)) require.Equal(t, stmtctx.WarnLevelWarning, warnings[0].Level) require.Equal(t, "err1", warnings[0].Err.Error()) tz := time.FixedZone("UTC+1", 2*60*60) sc = stmtctx.NewStmtCtxWithTimeZone(tz) require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.Same(t, tz, sc.TimeZone()) require.Same(t, tz, sc.TimeZone()) sc.AppendWarning(errors.New("err2")) warnings = sc.GetWarnings() require.Equal(t, 1, len(warnings)) require.Equal(t, stmtctx.WarnLevelWarning, warnings[0].Level) require.Equal(t, "err2", warnings[0].Err.Error()) } func TestSetStmtCtxTimeZone(t *testing.T) { sc := stmtctx.NewStmtCtx() require.Same(t, time.UTC, sc.TimeZone()) tz := time.FixedZone("UTC+1", 2*60*60) sc.SetTimeZone(tz) require.Same(t, tz, sc.TimeZone()) } func TestSetStmtCtxTypeFlags(t *testing.T) { sc := stmtctx.NewStmtCtx() require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) sc.SetTypeFlags(types.FlagAllowNegativeToUnsigned | types.FlagSkipASCIICheck) require.Equal(t, types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck, sc.TypeFlags()) require.Equal(t, sc.TypeFlags(), sc.TypeFlags()) sc.SetTypeFlags(types.FlagSkipASCIICheck | types.FlagSkipUTF8Check | types.FlagTruncateAsWarning) require.Equal(t, types.FlagSkipASCIICheck|types.FlagSkipUTF8Check|types.FlagTruncateAsWarning, sc.TypeFlags()) require.Equal(t, sc.TypeFlags(), sc.TypeFlags()) } func TestResetStmtCtx(t *testing.T) { sc := stmtctx.NewStmtCtx() require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) tz := time.FixedZone("UTC+1", 2*60*60) sc.SetTimeZone(tz) sc.SetTypeFlags(types.FlagAllowNegativeToUnsigned | types.FlagSkipASCIICheck) sc.AppendWarning(errors.New("err1")) sc.InRestrictedSQL = true sc.StmtType = "Insert" require.Same(t, tz, sc.TimeZone()) require.Equal(t, types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck, sc.TypeFlags()) require.Equal(t, 1, len(sc.GetWarnings())) sc.Reset() require.Same(t, time.UTC, sc.TimeZone()) require.Same(t, time.UTC, sc.TimeZone()) require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.False(t, sc.InRestrictedSQL) require.Empty(t, sc.StmtType) require.Equal(t, 0, len(sc.GetWarnings())) sc.AppendWarning(errors.New("err2")) warnings := sc.GetWarnings() require.Equal(t, 1, len(warnings)) require.Equal(t, stmtctx.WarnLevelWarning, warnings[0].Level) require.Equal(t, "err2", warnings[0].Err.Error()) } func TestStmtCtxID(t *testing.T) { sc := stmtctx.NewStmtCtx() currentID := sc.CtxID() cases := []struct { fn func() *stmtctx.StatementContext }{ {func() *stmtctx.StatementContext { return stmtctx.NewStmtCtx() }}, {func() *stmtctx.StatementContext { return stmtctx.NewStmtCtxWithTimeZone(time.Local) }}, {func() *stmtctx.StatementContext { sc.Reset() return sc }}, } for _, c := range cases { ctxID := c.fn().CtxID() require.Greater(t, ctxID, currentID) currentID = ctxID } } func BenchmarkErrCtx(b *testing.B) { sc := stmtctx.NewStmtCtx() for i := 0; i < b.N; i++ { sc.ErrCtx() } }
pkg/sessionctx/stmtctx/stmtctx_test.go
1
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.985210120677948, 0.10811609029769897, 0.00016533401503693312, 0.00039306815597228706, 0.2818942070007324 ]
{ "id": 5, "code_window": [ "\n", "\treturn ctx\n", "}\n", "\n", "// TypeFlags returns the type flags\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "// ErrCtx returns the error context\n", "func (sc *StatementContext) ErrCtx() errctx.Context {\n", "\treturn sc.errCtx\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "replace", "edit_start_line_idx": 492 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package label import ( "testing" "github.com/pingcap/tidb/pkg/testkit/testsetup" "go.uber.org/goleak" ) func TestMain(m *testing.M) { testsetup.SetupForCommonTest() opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), } goleak.VerifyTestMain(m, opts...) }
pkg/ddl/label/main_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00017367888358421624, 0.0001686666946625337, 0.00016529082495253533, 0.00016784851322881877, 0.000003078557938351878 ]
{ "id": 5, "code_window": [ "\n", "\treturn ctx\n", "}\n", "\n", "// TypeFlags returns the type flags\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "// ErrCtx returns the error context\n", "func (sc *StatementContext) ErrCtx() errctx.Context {\n", "\treturn sc.errCtx\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "replace", "edit_start_line_idx": 492 }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ddl_test import ( "fmt" "math" "strings" "testing" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testutil" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/testutils" ) func getTableMaxHandle(t *testing.T, d ddl.DDL, tbl table.Table, store kv.Storage) (kv.Handle, bool) { ver, err := store.CurrentVersion(kv.GlobalTxnScope) require.NoError(t, err) maxHandle, emptyTable, err := d.GetTableMaxHandle(ddl.NewJobContext(), ver.Ver, tbl.(table.PhysicalTable)) require.NoError(t, err) return maxHandle, emptyTable } func checkTableMaxHandle(t *testing.T, d ddl.DDL, tbl table.Table, store kv.Storage, expectedEmpty bool, expectedMaxHandle kv.Handle) { maxHandle, emptyHandle := getTableMaxHandle(t, d, tbl, store) require.Equal(t, expectedEmpty, emptyHandle) if expectedEmpty { require.True(t, emptyHandle) require.Nil(t, maxHandle) } else { require.False(t, emptyHandle) testutil.HandleEqual(t, expectedMaxHandle, maxHandle) } } func TestMultiRegionGetTableEndHandle(t *testing.T) { var cluster testutils.Cluster store := testkit.CreateMockStore(t, mockstore.WithClusterInspector(func(c testutils.Cluster) { mockstore.BootstrapWithSingleStore(c) cluster = c })) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table t(a bigint PRIMARY KEY, b int)") var builder strings.Builder _, _ = fmt.Fprintf(&builder, "insert into t values ") for i := 0; i < 1000; i++ { _, _ = fmt.Fprintf(&builder, "(%v, %v),", i, i) } sql := builder.String() tk.MustExec(sql[:len(sql)-1]) // Get table ID for split. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) require.NoError(t, err) d := dom.DDL() // Split the table. tableStart := tablecodec.GenTableRecordPrefix(tbl.Meta().ID) cluster.SplitKeys(tableStart, tableStart.PrefixNext(), 100) checkTableMaxHandle(t, d, tbl, store, false, kv.IntHandle(999)) tk.MustExec("insert into t values(10000, 1000)") checkTableMaxHandle(t, d, tbl, store, false, kv.IntHandle(10000)) tk.MustExec("insert into t values(-1, 1000)") checkTableMaxHandle(t, d, tbl, store, false, kv.IntHandle(10000)) } func TestGetTableEndHandle(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) // TestGetTableEndHandle test ddl.GetTableMaxHandle method, which will return the max row id of the table. tk := testkit.NewTestKit(t, store) tk.MustExec("use test") // PK is handle. tk.MustExec("create table t(a bigint PRIMARY KEY, b int)") is := dom.InfoSchema() d := dom.DDL() tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) require.NoError(t, err) // test empty table checkTableMaxHandle(t, d, tbl, store, true, nil) tk.MustExec("insert into t values(-1, 1)") checkTableMaxHandle(t, d, tbl, store, false, kv.IntHandle(-1)) tk.MustExec("insert into t values(9223372036854775806, 1)") checkTableMaxHandle(t, d, tbl, store, false, kv.IntHandle(9223372036854775806)) tk.MustExec("insert into t values(9223372036854775807, 1)") checkTableMaxHandle(t, d, tbl, store, false, kv.IntHandle(9223372036854775807)) tk.MustExec("insert into t values(10, 1)") tk.MustExec("insert into t values(102149142, 1)") checkTableMaxHandle(t, d, tbl, store, false, kv.IntHandle(9223372036854775807)) tk.MustExec("create table t1(a bigint PRIMARY KEY, b int)") var builder strings.Builder _, _ = fmt.Fprintf(&builder, "insert into t1 values ") for i := 0; i < 1000; i++ { _, _ = fmt.Fprintf(&builder, "(%v, %v),", i, i) } sql := builder.String() tk.MustExec(sql[:len(sql)-1]) is = dom.InfoSchema() tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) require.NoError(t, err) checkTableMaxHandle(t, d, tbl, store, false, kv.IntHandle(999)) // Test PK is not handle tk.MustExec("create table t2(a varchar(255))") is = dom.InfoSchema() tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t2")) require.NoError(t, err) checkTableMaxHandle(t, d, tbl, store, true, nil) builder.Reset() _, _ = fmt.Fprintf(&builder, "insert into t2 values ") for i := 0; i < 1000; i++ { _, _ = fmt.Fprintf(&builder, "(%v),", i) } sql = builder.String() tk.MustExec(sql[:len(sql)-1]) result := tk.MustQuery("select MAX(_tidb_rowid) from t2") maxHandle, emptyTable := getTableMaxHandle(t, d, tbl, store) result.Check(testkit.Rows(fmt.Sprintf("%v", maxHandle.IntValue()))) require.False(t, emptyTable) tk.MustExec("insert into t2 values(100000)") result = tk.MustQuery("select MAX(_tidb_rowid) from t2") maxHandle, emptyTable = getTableMaxHandle(t, d, tbl, store) result.Check(testkit.Rows(fmt.Sprintf("%v", maxHandle.IntValue()))) require.False(t, emptyTable) tk.MustExec(fmt.Sprintf("insert into t2 values(%v)", math.MaxInt64-1)) result = tk.MustQuery("select MAX(_tidb_rowid) from t2") maxHandle, emptyTable = getTableMaxHandle(t, d, tbl, store) result.Check(testkit.Rows(fmt.Sprintf("%v", maxHandle.IntValue()))) require.False(t, emptyTable) tk.MustExec(fmt.Sprintf("insert into t2 values(%v)", math.MaxInt64)) result = tk.MustQuery("select MAX(_tidb_rowid) from t2") maxHandle, emptyTable = getTableMaxHandle(t, d, tbl, store) result.Check(testkit.Rows(fmt.Sprintf("%v", maxHandle.IntValue()))) require.False(t, emptyTable) tk.MustExec("insert into t2 values(100)") result = tk.MustQuery("select MAX(_tidb_rowid) from t2") maxHandle, emptyTable = getTableMaxHandle(t, d, tbl, store) result.Check(testkit.Rows(fmt.Sprintf("%v", maxHandle.IntValue()))) require.False(t, emptyTable) } func TestMultiRegionGetTableEndCommonHandle(t *testing.T) { var cluster testutils.Cluster store := testkit.CreateMockStore(t, mockstore.WithClusterInspector(func(c testutils.Cluster) { mockstore.BootstrapWithSingleStore(c) cluster = c })) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn tk.MustExec("create table t(a varchar(20), b int, c float, d bigint, primary key (a, b, c))") var builder strings.Builder _, _ = fmt.Fprintf(&builder, "insert into t values ") for i := 0; i < 1000; i++ { _, _ = fmt.Fprintf(&builder, "('%v', %v, %v, %v),", i, i, i, i) } sql := builder.String() tk.MustExec(sql[:len(sql)-1]) // Get table ID for split. dom := domain.GetDomain(tk.Session()) is := dom.InfoSchema() tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) require.NoError(t, err) d := dom.DDL() // Split the table. tableStart := tablecodec.GenTableRecordPrefix(tbl.Meta().ID) cluster.SplitKeys(tableStart, tableStart.PrefixNext(), 100) checkTableMaxHandle(t, d, tbl, store, false, testutil.MustNewCommonHandle(t, "999", 999, 999)) tk.MustExec("insert into t values('a', 1, 1, 1)") checkTableMaxHandle(t, d, tbl, store, false, testutil.MustNewCommonHandle(t, "a", 1, 1)) tk.MustExec("insert into t values('0000', 1, 1, 1)") checkTableMaxHandle(t, d, tbl, store, false, testutil.MustNewCommonHandle(t, "a", 1, 1)) } func TestGetTableEndCommonHandle(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn tk.MustExec("create table t(a varchar(15), b bigint, c int, primary key (a, b))") tk.MustExec("create table t1(a varchar(15), b bigint, c int, primary key (a(2), b))") is := dom.InfoSchema() d := dom.DDL() tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) require.NoError(t, err) // test empty table checkTableMaxHandle(t, d, tbl, store, true, nil) tk.MustExec("insert into t values('abc', 1, 10)") checkTableMaxHandle(t, d, tbl, store, false, testutil.MustNewCommonHandle(t, "abc", 1)) tk.MustExec("insert into t values('abchzzzzzzzz', 1, 10)") checkTableMaxHandle(t, d, tbl, store, false, testutil.MustNewCommonHandle(t, "abchzzzzzzzz", 1)) tk.MustExec("insert into t values('a', 1, 10)") tk.MustExec("insert into t values('ab', 1, 10)") checkTableMaxHandle(t, d, tbl, store, false, testutil.MustNewCommonHandle(t, "abchzzzzzzzz", 1)) // Test MaxTableRowID with prefixed primary key. tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) require.NoError(t, err) d = dom.DDL() checkTableMaxHandle(t, d, tbl, store, true, nil) tk.MustExec("insert into t1 values('abccccc', 1, 10)") checkTableMaxHandle(t, d, tbl, store, false, testutil.MustNewCommonHandle(t, "ab", 1)) tk.MustExec("insert into t1 values('azzzz', 1, 10)") checkTableMaxHandle(t, d, tbl, store, false, testutil.MustNewCommonHandle(t, "az", 1)) } func TestCreateClusteredIndex(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn tk.MustExec("CREATE TABLE t1 (a int primary key, b int)") tk.MustExec("CREATE TABLE t2 (a varchar(255) primary key, b int)") tk.MustExec("CREATE TABLE t3 (a int, b int, c int, primary key (a, b))") tk.MustExec("CREATE TABLE t4 (a int, b int, c int)") is := domain.GetDomain(tk.Session()).InfoSchema() tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) require.NoError(t, err) require.True(t, tbl.Meta().PKIsHandle) require.False(t, tbl.Meta().IsCommonHandle) tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t2")) require.NoError(t, err) require.True(t, tbl.Meta().IsCommonHandle) tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t3")) require.NoError(t, err) require.True(t, tbl.Meta().IsCommonHandle) tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t4")) require.NoError(t, err) require.False(t, tbl.Meta().IsCommonHandle) tk.MustExec("CREATE TABLE t5 (a varchar(255) primary key nonclustered, b int)") tk.MustExec("CREATE TABLE t6 (a int, b int, c int, primary key (a, b) nonclustered)") is = domain.GetDomain(tk.Session()).InfoSchema() tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t5")) require.NoError(t, err) require.False(t, tbl.Meta().IsCommonHandle) tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t6")) require.NoError(t, err) require.False(t, tbl.Meta().IsCommonHandle) tk.MustExec("CREATE TABLE t21 like t2") tk.MustExec("CREATE TABLE t31 like t3") is = domain.GetDomain(tk.Session()).InfoSchema() tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t21")) require.NoError(t, err) require.True(t, tbl.Meta().IsCommonHandle) tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t31")) require.NoError(t, err) require.True(t, tbl.Meta().IsCommonHandle) tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly tk.MustExec("CREATE TABLE t7 (a varchar(255) primary key, b int)") is = domain.GetDomain(tk.Session()).InfoSchema() tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t7")) require.NoError(t, err) require.False(t, tbl.Meta().IsCommonHandle) }
pkg/ddl/primary_key_handle_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.004280522465705872, 0.0003153795260004699, 0.000163882490596734, 0.00016745431639719754, 0.0007140175439417362 ]
{ "id": 5, "code_window": [ "\n", "\treturn ctx\n", "}\n", "\n", "// TypeFlags returns the type flags\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "// ErrCtx returns the error context\n", "func (sc *StatementContext) ErrCtx() errctx.Context {\n", "\treturn sc.errCtx\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "replace", "edit_start_line_idx": 492 }
// Copyright 2021 PingCAP, Inc. Licensed under Apache-2.0. package build import ( "bytes" "fmt" "runtime" "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/util/israce" "github.com/pingcap/tidb/pkg/util/versioninfo" "go.uber.org/zap" ) // Version information. var ( ReleaseVersion = getReleaseVersion() BuildTS = versioninfo.TiDBBuildTS GitHash = versioninfo.TiDBGitHash GitBranch = versioninfo.TiDBGitBranch goVersion = runtime.Version() ) func getReleaseVersion() string { if mysql.TiDBReleaseVersion != "None" { return mysql.TiDBReleaseVersion } return "v7.0.0-master" } // AppName is a name of a built binary. type AppName string var ( // BR is the name of BR binary. BR AppName = "Backup & Restore (BR)" // Lightning is the name of Lightning binary. Lightning AppName = "TiDB-Lightning" ) // LogInfo logs version information. func LogInfo(name AppName) { oldLevel := log.GetLevel() log.SetLevel(zap.InfoLevel) defer log.SetLevel(oldLevel) log.Info(fmt.Sprintf("Welcome to %s", name), zap.String("release-version", ReleaseVersion), zap.String("git-hash", GitHash), zap.String("git-branch", GitBranch), zap.String("go-version", goVersion), zap.String("utc-build-time", BuildTS), zap.Bool("race-enabled", israce.RaceEnabled)) } // Info returns version information. func Info() string { buf := bytes.Buffer{} fmt.Fprintf(&buf, "Release Version: %s\n", ReleaseVersion) fmt.Fprintf(&buf, "Git Commit Hash: %s\n", GitHash) fmt.Fprintf(&buf, "Git Branch: %s\n", GitBranch) fmt.Fprintf(&buf, "Go Version: %s\n", goVersion) fmt.Fprintf(&buf, "UTC Build Time: %s\n", BuildTS) fmt.Fprintf(&buf, "Race Enabled: %t", israce.RaceEnabled) return buf.String() }
br/pkg/version/build/info.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00048720609629526734, 0.0002216650900663808, 0.00016683163994457573, 0.0001757201098371297, 0.00010915343591477722 ]
{ "id": 6, "code_window": [ "}\n", "\n", "// SetTypeFlags sets the type flags\n", "func (sc *StatementContext) SetTypeFlags(flags types.Flags) {\n", "\tsc.typeCtx = sc.typeCtx.WithFlags(flags)\n", "}\n", "\n", "// HandleTruncate ignores or returns the error based on the TypeContext inside.\n", "// TODO: replace this function with `HandleError`, for `TruncatedError` they should have the same effect.\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 503 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stmtctx import ( "bytes" "encoding/json" "fmt" "io" "math" "slices" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/domain/resourcegroup" "github.com/pingcap/tidb/pkg/errctx" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/disk" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/linter/constructor" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/nocopy" "github.com/pingcap/tidb/pkg/util/resourcegrouptag" "github.com/pingcap/tidb/pkg/util/topsql/stmtstats" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/tikv/client-go/v2/tikvrpc" "github.com/tikv/client-go/v2/util" atomic2 "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/exp/maps" ) const ( // WarnLevelError represents level "Error" for 'SHOW WARNINGS' syntax. WarnLevelError = "Error" // WarnLevelWarning represents level "Warning" for 'SHOW WARNINGS' syntax. WarnLevelWarning = "Warning" // WarnLevelNote represents level "Note" for 'SHOW WARNINGS' syntax. WarnLevelNote = "Note" ) var taskIDAlloc uint64 // AllocateTaskID allocates a new unique ID for a statement execution func AllocateTaskID() uint64 { return atomic.AddUint64(&taskIDAlloc, 1) } // SQLWarn relates a sql warning and it's level. type SQLWarn struct { Level string Err error } type jsonSQLWarn struct { Level string `json:"level"` SQLErr *terror.Error `json:"err,omitempty"` Msg string `json:"msg,omitempty"` } // MarshalJSON implements the Marshaler.MarshalJSON interface. func (warn *SQLWarn) MarshalJSON() ([]byte, error) { w := &jsonSQLWarn{ Level: warn.Level, } e := errors.Cause(warn.Err) switch x := e.(type) { case *terror.Error: // Omit outter errors because only the most inner error matters. w.SQLErr = x default: w.Msg = e.Error() } return json.Marshal(w) } // UnmarshalJSON implements the Unmarshaler.UnmarshalJSON interface. func (warn *SQLWarn) UnmarshalJSON(data []byte) error { var w jsonSQLWarn if err := json.Unmarshal(data, &w); err != nil { return err } warn.Level = w.Level if w.SQLErr != nil { warn.Err = w.SQLErr } else { warn.Err = errors.New(w.Msg) } return nil } // ReferenceCount indicates the reference count of StmtCtx. type ReferenceCount int32 const ( // ReferenceCountIsFrozen indicates the current StmtCtx is resetting, it'll refuse all the access from other sessions. ReferenceCountIsFrozen int32 = -1 // ReferenceCountNoReference indicates the current StmtCtx is not accessed by other sessions. ReferenceCountNoReference int32 = 0 ) // TryIncrease tries to increase the reference count. // There is a small chance that TryIncrease returns true while TryFreeze and // UnFreeze are invoked successfully during the execution of TryIncrease. func (rf *ReferenceCount) TryIncrease() bool { refCnt := atomic.LoadInt32((*int32)(rf)) for ; refCnt != ReferenceCountIsFrozen && !atomic.CompareAndSwapInt32((*int32)(rf), refCnt, refCnt+1); refCnt = atomic.LoadInt32((*int32)(rf)) { } return refCnt != ReferenceCountIsFrozen } // Decrease decreases the reference count. func (rf *ReferenceCount) Decrease() { for refCnt := atomic.LoadInt32((*int32)(rf)); !atomic.CompareAndSwapInt32((*int32)(rf), refCnt, refCnt-1); refCnt = atomic.LoadInt32((*int32)(rf)) { } } // TryFreeze tries to freeze the StmtCtx to frozen before resetting the old StmtCtx. func (rf *ReferenceCount) TryFreeze() bool { return atomic.LoadInt32((*int32)(rf)) == ReferenceCountNoReference && atomic.CompareAndSwapInt32((*int32)(rf), ReferenceCountNoReference, ReferenceCountIsFrozen) } // UnFreeze unfreeze the frozen StmtCtx thus the other session can access this StmtCtx. func (rf *ReferenceCount) UnFreeze() { atomic.StoreInt32((*int32)(rf), ReferenceCountNoReference) } var stmtCtxIDGenerator atomic.Uint64 // StatementContext contains variables for a statement. // It should be reset before executing a statement. type StatementContext struct { // NoCopy indicates that this struct cannot be copied because // copying this object will make the copied TypeCtx field to refer a wrong `AppendWarnings` func. _ nocopy.NoCopy _ constructor.Constructor `ctor:"NewStmtCtx,NewStmtCtxWithTimeZone,Reset"` ctxID uint64 // typeCtx is used to indicate how to make the type conversation. typeCtx types.Context // errCtx is used to indicate how to handle the errors errCtx errctx.Context // Set the following variables before execution StmtHints // IsDDLJobInQueue is used to mark whether the DDL job is put into the queue. // If IsDDLJobInQueue is true, it means the DDL job is in the queue of storage, and it can be handled by the DDL worker. IsDDLJobInQueue bool DDLJobID int64 InInsertStmt bool InUpdateStmt bool InDeleteStmt bool InSelectStmt bool InLoadDataStmt bool InExplainStmt bool InExplainAnalyzeStmt bool ExplainFormat string InCreateOrAlterStmt bool InSetSessionStatesStmt bool InPreparedPlanBuilding bool DupKeyAsWarning bool BadNullAsWarning bool DividedByZeroAsWarning bool ErrAutoincReadFailedAsWarning bool InShowWarning bool UseCache bool CacheType PlanCacheType BatchCheck bool InNullRejectCheck bool IgnoreNoPartition bool IgnoreExplainIDSuffix bool MultiSchemaInfo *model.MultiSchemaInfo // If the select statement was like 'select * from t as of timestamp ...' or in a stale read transaction // or is affected by the tidb_read_staleness session variable, then the statement will be makred as isStaleness // in stmtCtx IsStaleness bool InRestrictedSQL bool ViewDepth int32 // mu struct holds variables that change during execution. mu struct { sync.Mutex affectedRows uint64 foundRows uint64 /* following variables are ported from 'COPY_INFO' struct of MySQL server source, they are used to count rows for INSERT/REPLACE/UPDATE queries: If a row is inserted then the copied variable is incremented. If a row is updated by the INSERT ... ON DUPLICATE KEY UPDATE and the new data differs from the old one then the copied and the updated variables are incremented. The touched variable is incremented if a row was touched by the update part of the INSERT ... ON DUPLICATE KEY UPDATE no matter whether the row was actually changed or not. see https://github.com/mysql/mysql-server/blob/d2029238d6d9f648077664e4cdd611e231a6dc14/sql/sql_data_change.h#L60 for more details */ records uint64 deleted uint64 updated uint64 copied uint64 touched uint64 message string warnings []SQLWarn // extraWarnings record the extra warnings and are only used by the slow log only now. // If a warning is expected to be output only under some conditions (like in EXPLAIN or EXPLAIN VERBOSE) but it's // not under such conditions now, it is considered as an extra warning. // extraWarnings would not be printed through SHOW WARNINGS, but we want to always output them through the slow // log to help diagnostics, so we store them here separately. extraWarnings []SQLWarn execDetails execdetails.ExecDetails detailsSummary execdetails.P90Summary } // PrevAffectedRows is the affected-rows value(DDL is 0, DML is the number of affected rows). PrevAffectedRows int64 // PrevLastInsertID is the last insert ID of previous statement. PrevLastInsertID uint64 // LastInsertID is the auto-generated ID in the current statement. LastInsertID uint64 // InsertID is the given insert ID of an auto_increment column. InsertID uint64 BaseRowID int64 MaxRowID int64 // Copied from SessionVars.TimeZone. Priority mysql.PriorityEnum NotFillCache bool MemTracker *memory.Tracker DiskTracker *disk.Tracker // per statement resource group name // hint /* +ResourceGroup(name) */ can change the statement group name ResourceGroupName string RunawayChecker *resourcegroup.RunawayChecker IsTiFlash atomic2.Bool RuntimeStatsColl *execdetails.RuntimeStatsColl TableIDs []int64 IndexNames []string StmtType string OriginalSQL string digestMemo struct { sync.Once normalized string digest *parser.Digest } // BindSQL used to construct the key for plan cache. It records the binding used by the stmt. // If the binding is not used by the stmt, the value is empty BindSQL string // The several fields below are mainly for some diagnostic features, like stmt summary and slow query. // We cache the values here to avoid calculating them multiple times. // Note: // Avoid accessing these fields directly, use their Setter/Getter methods instead. // Other fields should be the zero value or be consistent with the plan field. // TODO: more clearly distinguish between the value is empty and the value has not been set planNormalized string planDigest *parser.Digest encodedPlan string planHint string planHintSet bool binaryPlan string // To avoid cycle import, we use interface{} for the following two fields. // flatPlan should be a *plannercore.FlatPhysicalPlan if it's not nil flatPlan interface{} // plan should be a plannercore.Plan if it's not nil plan interface{} Tables []TableEntry PointExec bool // for point update cached execution, Constant expression need to set "paramMarker" lockWaitStartTime int64 // LockWaitStartTime stores the pessimistic lock wait start time PessimisticLockWaited int32 LockKeysDuration int64 LockKeysCount int32 LockTableIDs map[int64]struct{} // table IDs need to be locked, empty for lock all tables TblInfo2UnionScan map[*model.TableInfo]bool TaskID uint64 // unique ID for an execution of a statement TaskMapBakTS uint64 // counter for // stmtCache is used to store some statement-related values. // add mutex to protect stmtCache concurrent access // https://github.com/pingcap/tidb/issues/36159 stmtCache struct { mu sync.Mutex data map[StmtCacheKey]interface{} } // Map to store all CTE storages of current SQL. // Will clean up at the end of the execution. CTEStorageMap interface{} SetVarHintRestore map[string]string // If the statement read from table cache, this flag is set. ReadFromTableCache bool // cache is used to reduce object allocation. cache struct { execdetails.RuntimeStatsColl MemTracker memory.Tracker DiskTracker disk.Tracker LogOnExceed [2]memory.LogOnExceed } // InVerboseExplain indicates the statement is "explain format='verbose' ...". InVerboseExplain bool // EnableOptimizeTrace indicates whether enable optimizer trace by 'trace plan statement' EnableOptimizeTrace bool // OptimizeTracer indicates the tracer for optimize OptimizeTracer *tracing.OptimizeTracer // EnableOptimizerCETrace indicate if cardinality estimation internal process needs to be traced. // CE Trace is currently a submodule of the optimizer trace and is controlled by a separated option. EnableOptimizerCETrace bool OptimizerCETrace []*tracing.CETraceRecord EnableOptimizerDebugTrace bool OptimizerDebugTrace interface{} // WaitLockLeaseTime is the duration of cached table read lease expiration time. WaitLockLeaseTime time.Duration // KvExecCounter is created from SessionVars.StmtStats to count the number of SQL // executions of the kv layer during the current execution of the statement. // Its life cycle is limited to this execution, and a new KvExecCounter is // always created during each statement execution. KvExecCounter *stmtstats.KvExecCounter // WeakConsistency is true when read consistency is weak and in a read statement and not in a transaction. WeakConsistency bool StatsLoad struct { // Timeout to wait for sync-load Timeout time.Duration // NeededItems stores the columns/indices whose stats are needed for planner. NeededItems []model.TableItemID // ResultCh to receive stats loading results ResultCh chan StatsLoadResult // LoadStartTime is to record the load start time to calculate latency LoadStartTime time.Time } // SysdateIsNow indicates whether sysdate() is an alias of now() in this statement SysdateIsNow bool // RCCheckTS indicates the current read-consistency read select statement will use `RCCheckTS` path. RCCheckTS bool // IsSQLRegistered uses to indicate whether the SQL has been registered for TopSQL. IsSQLRegistered atomic2.Bool // IsSQLAndPlanRegistered uses to indicate whether the SQL and plan has been registered for TopSQL. IsSQLAndPlanRegistered atomic2.Bool // IsReadOnly uses to indicate whether the SQL is read-only. IsReadOnly bool // usedStatsInfo records version of stats of each table used in the query. // It's a map of table physical id -> *UsedStatsInfoForTable usedStatsInfo map[int64]*UsedStatsInfoForTable // IsSyncStatsFailed indicates whether any failure happened during sync stats IsSyncStatsFailed bool // UseDynamicPruneMode indicates whether use UseDynamicPruneMode in query stmt UseDynamicPruneMode bool // ColRefFromPlan mark the column ref used by assignment in update statement. ColRefFromUpdatePlan []int64 // RangeFallback indicates that building complete ranges exceeds the memory limit so it falls back to less accurate ranges such as full range. RangeFallback bool // IsExplainAnalyzeDML is true if the statement is "explain analyze DML executors", before responding the explain // results to the client, the transaction should be committed first. See issue #37373 for more details. IsExplainAnalyzeDML bool // InHandleForeignKeyTrigger indicates currently are handling foreign key trigger. InHandleForeignKeyTrigger bool // ForeignKeyTriggerCtx is the contain information for foreign key cascade execution. ForeignKeyTriggerCtx struct { // The SavepointName is use to do rollback when handle foreign key cascade failed. SavepointName string HasFKCascades bool } // MPPQueryInfo stores some id and timestamp of current MPP query statement. MPPQueryInfo struct { QueryID atomic2.Uint64 QueryTS atomic2.Uint64 AllocatedMPPTaskID atomic2.Int64 AllocatedMPPGatherID atomic2.Uint64 } // TableStats stores the visited runtime table stats by table id during query TableStats map[int64]interface{} // useChunkAlloc indicates whether statement use chunk alloc useChunkAlloc bool // Check if TiFlash read engine is removed due to strict sql mode. TiFlashEngineRemovedDueToStrictSQLMode bool // StaleTSOProvider is used to provide stale timestamp oracle for read-only transactions. StaleTSOProvider struct { sync.Mutex value *uint64 eval func() (uint64, error) } } // NewStmtCtx creates a new statement context func NewStmtCtx() *StatementContext { return NewStmtCtxWithTimeZone(time.UTC) } // NewStmtCtxWithTimeZone creates a new StatementContext with the given timezone func NewStmtCtxWithTimeZone(tz *time.Location) *StatementContext { intest.AssertNotNil(tz) sc := &StatementContext{ ctxID: stmtCtxIDGenerator.Add(1), } sc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc) return sc } // Reset resets a statement context func (sc *StatementContext) Reset() { *sc = StatementContext{ ctxID: stmtCtxIDGenerator.Add(1), typeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc), } } // CtxID returns the context id of the statement func (sc *StatementContext) CtxID() uint64 { return sc.ctxID } // TimeZone returns the timezone of the type context func (sc *StatementContext) TimeZone() *time.Location { intest.AssertNotNil(sc) if sc == nil { return time.UTC } return sc.typeCtx.Location() } // SetTimeZone sets the timezone func (sc *StatementContext) SetTimeZone(tz *time.Location) { intest.AssertNotNil(tz) sc.typeCtx = sc.typeCtx.WithLocation(tz) } // TypeCtx returns the type context func (sc *StatementContext) TypeCtx() types.Context { return sc.typeCtx } // ErrCtx returns the error context // TODO: add a cache to the `ErrCtx` if needed, though it's not a big burden to generate `ErrCtx` everytime. func (sc *StatementContext) ErrCtx() errctx.Context { ctx := errctx.NewContext(sc) if sc.TypeFlags().IgnoreTruncateErr() { ctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelIgnore) } else if sc.TypeFlags().TruncateAsWarning() { ctx = ctx.WithErrGroupLevel(errctx.ErrGroupTruncate, errctx.LevelWarn) } return ctx } // TypeFlags returns the type flags func (sc *StatementContext) TypeFlags() types.Flags { return sc.typeCtx.Flags() } // SetTypeFlags sets the type flags func (sc *StatementContext) SetTypeFlags(flags types.Flags) { sc.typeCtx = sc.typeCtx.WithFlags(flags) } // HandleTruncate ignores or returns the error based on the TypeContext inside. // TODO: replace this function with `HandleError`, for `TruncatedError` they should have the same effect. func (sc *StatementContext) HandleTruncate(err error) error { return sc.typeCtx.HandleTruncate(err) } // HandleError handles the error based on `ErrCtx()` func (sc *StatementContext) HandleError(err error) error { intest.AssertNotNil(sc) if sc == nil { return err } errCtx := sc.ErrCtx() return errCtx.HandleError(err) } // HandleErrorWithAlias handles the error based on `ErrCtx()` func (sc *StatementContext) HandleErrorWithAlias(internalErr, err, warnErr error) error { intest.AssertNotNil(sc) if sc == nil { return err } errCtx := sc.ErrCtx() return errCtx.HandleErrorWithAlias(internalErr, err, warnErr) } // StmtHints are SessionVars related sql hints. type StmtHints struct { // Hint Information MemQuotaQuery int64 MaxExecutionTime uint64 ReplicaRead byte AllowInSubqToJoinAndAgg bool NoIndexMergeHint bool StraightJoinOrder bool // EnableCascadesPlanner is use cascades planner for a single query only. EnableCascadesPlanner bool // ForceNthPlan indicates the PlanCounterTp number for finding physical plan. // -1 for disable. ForceNthPlan int64 ResourceGroup string // Hint flags HasAllowInSubqToJoinAndAggHint bool HasMemQuotaHint bool HasReplicaReadHint bool HasMaxExecutionTime bool HasEnableCascadesPlannerHint bool HasResourceGroup bool SetVars map[string]string // the original table hints OriginalTableHints []*ast.TableOptimizerHint } // TaskMapNeedBackUp indicates that whether we need to back up taskMap during physical optimizing. func (sh *StmtHints) TaskMapNeedBackUp() bool { return sh.ForceNthPlan != -1 } // Clone the StmtHints struct and returns the pointer of the new one. func (sh *StmtHints) Clone() *StmtHints { var ( vars map[string]string tableHints []*ast.TableOptimizerHint ) if len(sh.SetVars) > 0 { vars = make(map[string]string, len(sh.SetVars)) for k, v := range sh.SetVars { vars[k] = v } } if len(sh.OriginalTableHints) > 0 { tableHints = make([]*ast.TableOptimizerHint, len(sh.OriginalTableHints)) copy(tableHints, sh.OriginalTableHints) } return &StmtHints{ MemQuotaQuery: sh.MemQuotaQuery, MaxExecutionTime: sh.MaxExecutionTime, ReplicaRead: sh.ReplicaRead, AllowInSubqToJoinAndAgg: sh.AllowInSubqToJoinAndAgg, NoIndexMergeHint: sh.NoIndexMergeHint, StraightJoinOrder: sh.StraightJoinOrder, EnableCascadesPlanner: sh.EnableCascadesPlanner, ForceNthPlan: sh.ForceNthPlan, ResourceGroup: sh.ResourceGroup, HasAllowInSubqToJoinAndAggHint: sh.HasAllowInSubqToJoinAndAggHint, HasMemQuotaHint: sh.HasMemQuotaHint, HasReplicaReadHint: sh.HasReplicaReadHint, HasMaxExecutionTime: sh.HasMaxExecutionTime, HasEnableCascadesPlannerHint: sh.HasEnableCascadesPlannerHint, HasResourceGroup: sh.HasResourceGroup, SetVars: vars, OriginalTableHints: tableHints, } } // StmtCacheKey represents the key type in the StmtCache. type StmtCacheKey int const ( // StmtNowTsCacheKey is a variable for now/current_timestamp calculation/cache of one stmt. StmtNowTsCacheKey StmtCacheKey = iota // StmtSafeTSCacheKey is a variable for safeTS calculation/cache of one stmt. StmtSafeTSCacheKey // StmtExternalTSCacheKey is a variable for externalTS calculation/cache of one stmt. StmtExternalTSCacheKey ) // GetOrStoreStmtCache gets the cached value of the given key if it exists, otherwise stores the value. func (sc *StatementContext) GetOrStoreStmtCache(key StmtCacheKey, value interface{}) interface{} { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() if sc.stmtCache.data == nil { sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } if _, ok := sc.stmtCache.data[key]; !ok { sc.stmtCache.data[key] = value } return sc.stmtCache.data[key] } // GetOrEvaluateStmtCache gets the cached value of the given key if it exists, otherwise calculate the value. func (sc *StatementContext) GetOrEvaluateStmtCache(key StmtCacheKey, valueEvaluator func() (interface{}, error)) (interface{}, error) { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() if sc.stmtCache.data == nil { sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } if _, ok := sc.stmtCache.data[key]; !ok { value, err := valueEvaluator() if err != nil { return nil, err } sc.stmtCache.data[key] = value } return sc.stmtCache.data[key], nil } // ResetInStmtCache resets the cache of given key. func (sc *StatementContext) ResetInStmtCache(key StmtCacheKey) { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() delete(sc.stmtCache.data, key) } // ResetStmtCache resets all cached values. func (sc *StatementContext) ResetStmtCache() { sc.stmtCache.mu.Lock() defer sc.stmtCache.mu.Unlock() sc.stmtCache.data = make(map[StmtCacheKey]interface{}) } // SQLDigest gets normalized and digest for provided sql. // it will cache result after first calling. func (sc *StatementContext) SQLDigest() (normalized string, sqlDigest *parser.Digest) { sc.digestMemo.Do(func() { sc.digestMemo.normalized, sc.digestMemo.digest = parser.NormalizeDigest(sc.OriginalSQL) }) return sc.digestMemo.normalized, sc.digestMemo.digest } // InitSQLDigest sets the normalized and digest for sql. func (sc *StatementContext) InitSQLDigest(normalized string, digest *parser.Digest) { sc.digestMemo.Do(func() { sc.digestMemo.normalized, sc.digestMemo.digest = normalized, digest }) } // ResetSQLDigest sets the normalized and digest for sql anyway, **DO NOT USE THIS UNLESS YOU KNOW WHAT YOU ARE DOING NOW**. func (sc *StatementContext) ResetSQLDigest(s string) { sc.digestMemo.normalized, sc.digestMemo.digest = parser.NormalizeDigest(s) } // GetPlanDigest gets the normalized plan and plan digest. func (sc *StatementContext) GetPlanDigest() (normalized string, planDigest *parser.Digest) { return sc.planNormalized, sc.planDigest } // GetPlan gets the plan field of stmtctx func (sc *StatementContext) GetPlan() interface{} { return sc.plan } // SetPlan sets the plan field of stmtctx func (sc *StatementContext) SetPlan(plan interface{}) { sc.plan = plan } // GetFlatPlan gets the flatPlan field of stmtctx func (sc *StatementContext) GetFlatPlan() interface{} { return sc.flatPlan } // SetFlatPlan sets the flatPlan field of stmtctx func (sc *StatementContext) SetFlatPlan(flat interface{}) { sc.flatPlan = flat } // GetBinaryPlan gets the binaryPlan field of stmtctx func (sc *StatementContext) GetBinaryPlan() string { return sc.binaryPlan } // SetBinaryPlan sets the binaryPlan field of stmtctx func (sc *StatementContext) SetBinaryPlan(binaryPlan string) { sc.binaryPlan = binaryPlan } // GetResourceGroupTagger returns the implementation of tikvrpc.ResourceGroupTagger related to self. func (sc *StatementContext) GetResourceGroupTagger() tikvrpc.ResourceGroupTagger { normalized, digest := sc.SQLDigest() planDigest := sc.planDigest return func(req *tikvrpc.Request) { if req == nil { return } if len(normalized) == 0 { return } req.ResourceGroupTag = resourcegrouptag.EncodeResourceGroupTag(digest, planDigest, resourcegrouptag.GetResourceGroupLabelByKey(resourcegrouptag.GetFirstKeyFromRequest(req))) } } // SetUseChunkAlloc set use chunk alloc status func (sc *StatementContext) SetUseChunkAlloc() { sc.useChunkAlloc = true } // ClearUseChunkAlloc clear useChunkAlloc status func (sc *StatementContext) ClearUseChunkAlloc() { sc.useChunkAlloc = false } // GetUseChunkAllocStatus returns useChunkAlloc status func (sc *StatementContext) GetUseChunkAllocStatus() bool { return sc.useChunkAlloc } // SetPlanDigest sets the normalized plan and plan digest. func (sc *StatementContext) SetPlanDigest(normalized string, planDigest *parser.Digest) { if planDigest != nil { sc.planNormalized, sc.planDigest = normalized, planDigest } } // GetEncodedPlan gets the encoded plan, it is used to avoid repeated encode. func (sc *StatementContext) GetEncodedPlan() string { return sc.encodedPlan } // SetEncodedPlan sets the encoded plan, it is used to avoid repeated encode. func (sc *StatementContext) SetEncodedPlan(encodedPlan string) { sc.encodedPlan = encodedPlan } // GetPlanHint gets the hint string generated from the plan. func (sc *StatementContext) GetPlanHint() (string, bool) { return sc.planHint, sc.planHintSet } // InitDiskTracker initializes the sc.DiskTracker, use cache to avoid allocation. func (sc *StatementContext) InitDiskTracker(label int, bytesLimit int64) { memory.InitTracker(&sc.cache.DiskTracker, label, bytesLimit, &sc.cache.LogOnExceed[0]) sc.DiskTracker = &sc.cache.DiskTracker } // InitMemTracker initializes the sc.MemTracker, use cache to avoid allocation. func (sc *StatementContext) InitMemTracker(label int, bytesLimit int64) { memory.InitTracker(&sc.cache.MemTracker, label, bytesLimit, &sc.cache.LogOnExceed[1]) sc.MemTracker = &sc.cache.MemTracker } // SetPlanHint sets the hint for the plan. func (sc *StatementContext) SetPlanHint(hint string) { sc.planHintSet = true sc.planHint = hint } // PlanCacheType is the flag of plan cache type PlanCacheType int const ( // DefaultNoCache no cache DefaultNoCache PlanCacheType = iota // SessionPrepared session prepared plan cache SessionPrepared // SessionNonPrepared session non-prepared plan cache SessionNonPrepared ) // SetSkipPlanCache sets to skip the plan cache and records the reason. func (sc *StatementContext) SetSkipPlanCache(reason error) { if !sc.UseCache { return // avoid unnecessary warnings } sc.UseCache = false switch sc.CacheType { case DefaultNoCache: sc.AppendWarning(errors.NewNoStackError("unknown cache type")) case SessionPrepared: sc.AppendWarning(errors.NewNoStackErrorf("skip prepared plan-cache: %s", reason.Error())) case SessionNonPrepared: if sc.InExplainStmt && sc.ExplainFormat == "plan_cache" { // use "plan_cache" rather than types.ExplainFormatPlanCache to avoid import cycle sc.AppendWarning(errors.NewNoStackErrorf("skip non-prepared plan-cache: %s", reason.Error())) } } } // TableEntry presents table in db. type TableEntry struct { DB string Table string } // AddAffectedRows adds affected rows. func (sc *StatementContext) AddAffectedRows(rows uint64) { if sc.InHandleForeignKeyTrigger { // For compatibility with MySQL, not add the affected row cause by the foreign key trigger. return } sc.mu.Lock() defer sc.mu.Unlock() sc.mu.affectedRows += rows } // SetAffectedRows sets affected rows. func (sc *StatementContext) SetAffectedRows(rows uint64) { sc.mu.Lock() sc.mu.affectedRows = rows sc.mu.Unlock() } // AffectedRows gets affected rows. func (sc *StatementContext) AffectedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.affectedRows } // FoundRows gets found rows. func (sc *StatementContext) FoundRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.foundRows } // AddFoundRows adds found rows. func (sc *StatementContext) AddFoundRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.foundRows += rows } // RecordRows is used to generate info message func (sc *StatementContext) RecordRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.records } // AddRecordRows adds record rows. func (sc *StatementContext) AddRecordRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.records += rows } // DeletedRows is used to generate info message func (sc *StatementContext) DeletedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.deleted } // AddDeletedRows adds record rows. func (sc *StatementContext) AddDeletedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.deleted += rows } // UpdatedRows is used to generate info message func (sc *StatementContext) UpdatedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.updated } // AddUpdatedRows adds updated rows. func (sc *StatementContext) AddUpdatedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.updated += rows } // CopiedRows is used to generate info message func (sc *StatementContext) CopiedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.copied } // AddCopiedRows adds copied rows. func (sc *StatementContext) AddCopiedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.copied += rows } // TouchedRows is used to generate info message func (sc *StatementContext) TouchedRows() uint64 { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.touched } // AddTouchedRows adds touched rows. func (sc *StatementContext) AddTouchedRows(rows uint64) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.touched += rows } // GetMessage returns the extra message of the last executed command, if there is no message, it returns empty string func (sc *StatementContext) GetMessage() string { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.message } // SetMessage sets the info message generated by some commands func (sc *StatementContext) SetMessage(msg string) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.message = msg } // GetWarnings gets warnings. func (sc *StatementContext) GetWarnings() []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.warnings } // TruncateWarnings truncates warnings begin from start and returns the truncated warnings. func (sc *StatementContext) TruncateWarnings(start int) []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() sz := len(sc.mu.warnings) - start if sz <= 0 { return nil } ret := make([]SQLWarn, sz) copy(ret, sc.mu.warnings[start:]) sc.mu.warnings = sc.mu.warnings[:start] return ret } // WarningCount gets warning count. func (sc *StatementContext) WarningCount() uint16 { if sc.InShowWarning { return 0 } sc.mu.Lock() defer sc.mu.Unlock() return uint16(len(sc.mu.warnings)) } // NumErrorWarnings gets warning and error count. func (sc *StatementContext) NumErrorWarnings() (ec uint16, wc int) { sc.mu.Lock() defer sc.mu.Unlock() for _, w := range sc.mu.warnings { if w.Level == WarnLevelError { ec++ } } wc = len(sc.mu.warnings) return } // SetWarnings sets warnings. func (sc *StatementContext) SetWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.warnings = warns } // AppendWarning appends a warning with level 'Warning'. func (sc *StatementContext) AppendWarning(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelWarning, warn}) } } // AppendWarnings appends some warnings. func (sc *StatementContext) AppendWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, warns...) } } // AppendNote appends a warning with level 'Note'. func (sc *StatementContext) AppendNote(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelNote, warn}) } } // AppendError appends a warning with level 'Error'. func (sc *StatementContext) AppendError(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.warnings) < math.MaxUint16 { sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelError, warn}) } } // GetExtraWarnings gets extra warnings. func (sc *StatementContext) GetExtraWarnings() []SQLWarn { sc.mu.Lock() defer sc.mu.Unlock() return sc.mu.extraWarnings } // SetExtraWarnings sets extra warnings. func (sc *StatementContext) SetExtraWarnings(warns []SQLWarn) { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.extraWarnings = warns } // AppendExtraWarning appends an extra warning with level 'Warning'. func (sc *StatementContext) AppendExtraWarning(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelWarning, warn}) } } // AppendExtraNote appends an extra warning with level 'Note'. func (sc *StatementContext) AppendExtraNote(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelNote, warn}) } } // AppendExtraError appends an extra warning with level 'Error'. func (sc *StatementContext) AppendExtraError(warn error) { sc.mu.Lock() defer sc.mu.Unlock() if len(sc.mu.extraWarnings) < math.MaxUint16 { sc.mu.extraWarnings = append(sc.mu.extraWarnings, SQLWarn{WarnLevelError, warn}) } } // resetMuForRetry resets the changed states of sc.mu during execution. func (sc *StatementContext) resetMuForRetry() { sc.mu.Lock() defer sc.mu.Unlock() sc.mu.affectedRows = 0 sc.mu.foundRows = 0 sc.mu.records = 0 sc.mu.deleted = 0 sc.mu.updated = 0 sc.mu.copied = 0 sc.mu.touched = 0 sc.mu.message = "" sc.mu.warnings = nil sc.mu.execDetails = execdetails.ExecDetails{} sc.mu.detailsSummary.Reset() } // ResetForRetry resets the changed states during execution. func (sc *StatementContext) ResetForRetry() { sc.resetMuForRetry() sc.MaxRowID = 0 sc.BaseRowID = 0 sc.TableIDs = sc.TableIDs[:0] sc.IndexNames = sc.IndexNames[:0] sc.TaskID = AllocateTaskID() } // MergeExecDetails merges a single region execution details into self, used to print // the information in slow query log. func (sc *StatementContext) MergeExecDetails(details *execdetails.ExecDetails, commitDetails *util.CommitDetails) { sc.mu.Lock() defer sc.mu.Unlock() if details != nil { sc.mu.execDetails.CopTime += details.CopTime sc.mu.execDetails.BackoffTime += details.BackoffTime sc.mu.execDetails.RequestCount++ sc.MergeScanDetail(details.ScanDetail) sc.MergeTimeDetail(details.TimeDetail) detail := &execdetails.DetailsNeedP90{ BackoffSleep: details.BackoffSleep, BackoffTimes: details.BackoffTimes, CalleeAddress: details.CalleeAddress, TimeDetail: details.TimeDetail, } sc.mu.detailsSummary.Merge(detail) } if commitDetails != nil { if sc.mu.execDetails.CommitDetail == nil { sc.mu.execDetails.CommitDetail = commitDetails } else { sc.mu.execDetails.CommitDetail.Merge(commitDetails) } } } // MergeScanDetail merges scan details into self. func (sc *StatementContext) MergeScanDetail(scanDetail *util.ScanDetail) { // Currently TiFlash cop task does not fill scanDetail, so need to skip it if scanDetail is nil if scanDetail == nil { return } if sc.mu.execDetails.ScanDetail == nil { sc.mu.execDetails.ScanDetail = &util.ScanDetail{} } sc.mu.execDetails.ScanDetail.Merge(scanDetail) } // MergeTimeDetail merges time details into self. func (sc *StatementContext) MergeTimeDetail(timeDetail util.TimeDetail) { sc.mu.execDetails.TimeDetail.ProcessTime += timeDetail.ProcessTime sc.mu.execDetails.TimeDetail.WaitTime += timeDetail.WaitTime } // MergeLockKeysExecDetails merges lock keys execution details into self. func (sc *StatementContext) MergeLockKeysExecDetails(lockKeys *util.LockKeysDetails) { sc.mu.Lock() defer sc.mu.Unlock() if sc.mu.execDetails.LockKeysDetail == nil { sc.mu.execDetails.LockKeysDetail = lockKeys } else { sc.mu.execDetails.LockKeysDetail.Merge(lockKeys) } } // GetExecDetails gets the execution details for the statement. func (sc *StatementContext) GetExecDetails() execdetails.ExecDetails { var details execdetails.ExecDetails sc.mu.Lock() defer sc.mu.Unlock() details = sc.mu.execDetails details.LockKeysDuration = time.Duration(atomic.LoadInt64(&sc.LockKeysDuration)) return details } // PushDownFlags converts StatementContext to tipb.SelectRequest.Flags. func (sc *StatementContext) PushDownFlags() uint64 { var flags uint64 if sc.InInsertStmt { flags |= model.FlagInInsertStmt } else if sc.InUpdateStmt || sc.InDeleteStmt { flags |= model.FlagInUpdateOrDeleteStmt } else if sc.InSelectStmt { flags |= model.FlagInSelectStmt } if sc.TypeFlags().IgnoreTruncateErr() { flags |= model.FlagIgnoreTruncate } else if sc.TypeFlags().TruncateAsWarning() { flags |= model.FlagTruncateAsWarning // TODO: remove this flag from TiKV. flags |= model.FlagOverflowAsWarning } if sc.TypeFlags().IgnoreZeroInDate() { flags |= model.FlagIgnoreZeroInDate } if sc.DividedByZeroAsWarning { flags |= model.FlagDividedByZeroAsWarning } if sc.InLoadDataStmt { flags |= model.FlagInLoadDataStmt } if sc.InRestrictedSQL { flags |= model.FlagInRestrictedSQL } return flags } // CopTasksDetails returns some useful information of cop-tasks during execution. func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { sc.mu.Lock() defer sc.mu.Unlock() n := sc.mu.detailsSummary.NumCopTasks d := &CopTasksDetails{ NumCopTasks: n, MaxBackoffTime: make(map[string]time.Duration), AvgBackoffTime: make(map[string]time.Duration), P90BackoffTime: make(map[string]time.Duration), TotBackoffTime: make(map[string]time.Duration), TotBackoffTimes: make(map[string]int), MaxBackoffAddress: make(map[string]string), } if n == 0 { return d } d.AvgProcessTime = sc.mu.execDetails.TimeDetail.ProcessTime / time.Duration(n) d.AvgWaitTime = sc.mu.execDetails.TimeDetail.WaitTime / time.Duration(n) d.P90ProcessTime = time.Duration((sc.mu.detailsSummary.ProcessTimePercentile.GetPercentile(0.9))) d.MaxProcessTime = sc.mu.detailsSummary.ProcessTimePercentile.GetMax().D d.MaxProcessAddress = sc.mu.detailsSummary.ProcessTimePercentile.GetMax().Addr d.P90WaitTime = time.Duration((sc.mu.detailsSummary.WaitTimePercentile.GetPercentile(0.9))) d.MaxWaitTime = sc.mu.detailsSummary.WaitTimePercentile.GetMax().D d.MaxWaitAddress = sc.mu.detailsSummary.WaitTimePercentile.GetMax().Addr for backoff, items := range sc.mu.detailsSummary.BackoffInfo { if items == nil { continue } n := items.ReqTimes d.MaxBackoffAddress[backoff] = items.BackoffPercentile.GetMax().Addr d.MaxBackoffTime[backoff] = items.BackoffPercentile.GetMax().D d.P90BackoffTime[backoff] = time.Duration(items.BackoffPercentile.GetPercentile(0.9)) d.AvgBackoffTime[backoff] = items.TotBackoffTime / time.Duration(n) d.TotBackoffTime[backoff] = items.TotBackoffTime d.TotBackoffTimes[backoff] = items.TotBackoffTimes } return d } // InitFromPBFlagAndTz set the flag and timezone of StatementContext from a `tipb.SelectRequest.Flags` and `*time.Location`. func (sc *StatementContext) InitFromPBFlagAndTz(flags uint64, tz *time.Location) { sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 sc.InDeleteStmt = (flags & model.FlagInUpdateOrDeleteStmt) > 0 sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 sc.SetTimeZone(tz) sc.SetTypeFlags(types.DefaultStmtFlags. WithIgnoreTruncateErr((flags & model.FlagIgnoreTruncate) > 0). WithTruncateAsWarning((flags & model.FlagTruncateAsWarning) > 0). WithIgnoreZeroInDate((flags & model.FlagIgnoreZeroInDate) > 0). WithAllowNegativeToUnsigned(!sc.InInsertStmt)) } // GetLockWaitStartTime returns the statement pessimistic lock wait start time func (sc *StatementContext) GetLockWaitStartTime() time.Time { startTime := atomic.LoadInt64(&sc.lockWaitStartTime) if startTime == 0 { startTime = time.Now().UnixNano() atomic.StoreInt64(&sc.lockWaitStartTime, startTime) } return time.Unix(0, startTime) } // RecordRangeFallback records range fallback. func (sc *StatementContext) RecordRangeFallback(rangeMaxSize int64) { // If range fallback happens, it means ether the query is unreasonable(for example, several long IN lists) or tidb_opt_range_max_size is too small // and the generated plan is probably suboptimal. In that case we don't put it into plan cache. if sc.UseCache { sc.SetSkipPlanCache(errors.Errorf("in-list is too long")) } if !sc.RangeFallback { sc.AppendWarning(errors.Errorf("Memory capacity of %v bytes for 'tidb_opt_range_max_size' exceeded when building ranges. Less accurate ranges such as full range are chosen", rangeMaxSize)) sc.RangeFallback = true } } // UseDynamicPartitionPrune indicates whether dynamic partition is used during the query func (sc *StatementContext) UseDynamicPartitionPrune() bool { return sc.UseDynamicPruneMode } // DetachMemDiskTracker detaches the memory and disk tracker from the sessionTracker. func (sc *StatementContext) DetachMemDiskTracker() { if sc == nil { return } if sc.MemTracker != nil { sc.MemTracker.Detach() } if sc.DiskTracker != nil { sc.DiskTracker.Detach() } } // SetStaleTSOProvider sets the stale TSO provider. func (sc *StatementContext) SetStaleTSOProvider(eval func() (uint64, error)) { sc.StaleTSOProvider.Lock() defer sc.StaleTSOProvider.Unlock() sc.StaleTSOProvider.value = nil sc.StaleTSOProvider.eval = eval } // GetStaleTSO returns the TSO for stale-read usage which calculate from PD's last response. func (sc *StatementContext) GetStaleTSO() (uint64, error) { sc.StaleTSOProvider.Lock() defer sc.StaleTSOProvider.Unlock() if sc.StaleTSOProvider.value != nil { return *sc.StaleTSOProvider.value, nil } if sc.StaleTSOProvider.eval == nil { return 0, nil } tso, err := sc.StaleTSOProvider.eval() if err != nil { return 0, err } sc.StaleTSOProvider.value = &tso return tso, nil } // AddSetVarHintRestore records the variables which are affected by SET_VAR hint. And restore them to the old value later. func (sc *StatementContext) AddSetVarHintRestore(name, val string) { if sc.SetVarHintRestore == nil { sc.SetVarHintRestore = make(map[string]string) } sc.SetVarHintRestore[name] = val } // CopTasksDetails collects some useful information of cop-tasks during execution. type CopTasksDetails struct { NumCopTasks int AvgProcessTime time.Duration P90ProcessTime time.Duration MaxProcessAddress string MaxProcessTime time.Duration AvgWaitTime time.Duration P90WaitTime time.Duration MaxWaitAddress string MaxWaitTime time.Duration MaxBackoffTime map[string]time.Duration MaxBackoffAddress map[string]string AvgBackoffTime map[string]time.Duration P90BackoffTime map[string]time.Duration TotBackoffTime map[string]time.Duration TotBackoffTimes map[string]int } // ToZapFields wraps the CopTasksDetails as zap.Fileds. func (d *CopTasksDetails) ToZapFields() (fields []zap.Field) { if d.NumCopTasks == 0 { return } fields = make([]zap.Field, 0, 10) fields = append(fields, zap.Int("num_cop_tasks", d.NumCopTasks)) fields = append(fields, zap.String("process_avg_time", strconv.FormatFloat(d.AvgProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_p90_time", strconv.FormatFloat(d.P90ProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_max_time", strconv.FormatFloat(d.MaxProcessTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("process_max_addr", d.MaxProcessAddress)) fields = append(fields, zap.String("wait_avg_time", strconv.FormatFloat(d.AvgWaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_p90_time", strconv.FormatFloat(d.P90WaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_max_time", strconv.FormatFloat(d.MaxWaitTime.Seconds(), 'f', -1, 64)+"s")) fields = append(fields, zap.String("wait_max_addr", d.MaxWaitAddress)) return fields } // GetUsedStatsInfo returns the map for recording the used stats during query. // If initIfNil is true, it will initialize it when this map is nil. func (sc *StatementContext) GetUsedStatsInfo(initIfNil bool) map[int64]*UsedStatsInfoForTable { if sc.usedStatsInfo == nil && initIfNil { sc.usedStatsInfo = make(map[int64]*UsedStatsInfoForTable) } return sc.usedStatsInfo } // RecordedStatsLoadStatusCnt returns the total number of recorded column/index stats status, which is not full loaded. func (sc *StatementContext) RecordedStatsLoadStatusCnt() (cnt int) { allStatus := sc.GetUsedStatsInfo(false) for _, status := range allStatus { if status == nil { continue } cnt += status.recordedColIdxCount() } return } // TypeCtxOrDefault returns the reference to the `TypeCtx` inside the statement context. // If the statement context is nil, it'll return a newly created default type context. // **don't** use this function if you can make sure the `sc` is not nil. We should limit the usage of this function as // little as possible. func (sc *StatementContext) TypeCtxOrDefault() types.Context { if sc != nil { return sc.typeCtx } return types.DefaultStmtNoWarningContext } // UsedStatsInfoForTable records stats that are used during query and their information. type UsedStatsInfoForTable struct { Name string TblInfo *model.TableInfo Version uint64 RealtimeCount int64 ModifyCount int64 ColumnStatsLoadStatus map[int64]string IndexStatsLoadStatus map[int64]string } // FormatForExplain format the content in the format expected to be printed in the execution plan. // case 1: if stats version is 0, print stats:pseudo. // case 2: if stats version is not 0, and there are column/index stats that are not full loaded, // print stats:partial, then print status of 3 column/index status at most. For the rest, only // the count will be printed, in the format like (more: 1 onlyCmsEvicted, 2 onlyHistRemained). func (s *UsedStatsInfoForTable) FormatForExplain() string { // statistics.PseudoVersion == 0 if s.Version == 0 { return "stats:pseudo" } var b strings.Builder if len(s.ColumnStatsLoadStatus)+len(s.IndexStatsLoadStatus) == 0 { return "" } b.WriteString("stats:partial") outputNumsLeft := 3 statusCnt := make(map[string]uint64, 1) var strs []string strs = append(strs, s.collectFromColOrIdxStatus(false, &outputNumsLeft, statusCnt)...) strs = append(strs, s.collectFromColOrIdxStatus(true, &outputNumsLeft, statusCnt)...) b.WriteString("[") b.WriteString(strings.Join(strs, ", ")) if len(statusCnt) > 0 { b.WriteString("...(more: ") keys := maps.Keys(statusCnt) slices.Sort(keys) var cntStrs []string for _, key := range keys { cntStrs = append(cntStrs, strconv.FormatUint(statusCnt[key], 10)+" "+key) } b.WriteString(strings.Join(cntStrs, ", ")) b.WriteString(")") } b.WriteString("]") return b.String() } // WriteToSlowLog format the content in the format expected to be printed to the slow log, then write to w. // The format is table name partition name:version[realtime row count;modify count][index load status][column load status]. func (s *UsedStatsInfoForTable) WriteToSlowLog(w io.Writer) { ver := "pseudo" // statistics.PseudoVersion == 0 if s.Version != 0 { ver = strconv.FormatUint(s.Version, 10) } fmt.Fprintf(w, "%s:%s[%d;%d]", s.Name, ver, s.RealtimeCount, s.ModifyCount) if ver == "pseudo" { return } if len(s.ColumnStatsLoadStatus)+len(s.IndexStatsLoadStatus) > 0 { fmt.Fprintf(w, "[%s][%s]", strings.Join(s.collectFromColOrIdxStatus(false, nil, nil), ","), strings.Join(s.collectFromColOrIdxStatus(true, nil, nil), ","), ) } } // collectFromColOrIdxStatus prints the status of column or index stats to a slice // of the string in the format of "col/idx name:status". // If outputNumsLeft is not nil, this function will output outputNumsLeft column/index // status at most, the rest will be counted in statusCnt, which is a map of status->count. func (s *UsedStatsInfoForTable) collectFromColOrIdxStatus( forColumn bool, outputNumsLeft *int, statusCnt map[string]uint64, ) []string { var status map[int64]string if forColumn { status = s.ColumnStatsLoadStatus } else { status = s.IndexStatsLoadStatus } keys := maps.Keys(status) slices.Sort(keys) strs := make([]string, 0, len(status)) for _, id := range keys { if outputNumsLeft == nil || *outputNumsLeft > 0 { var name string if s.TblInfo != nil { if forColumn { name = s.TblInfo.FindColumnNameByID(id) } else { name = s.TblInfo.FindIndexNameByID(id) } } if len(name) == 0 { name = "ID " + strconv.FormatInt(id, 10) } strs = append(strs, name+":"+status[id]) if outputNumsLeft != nil { *outputNumsLeft-- } } else if statusCnt != nil { statusCnt[status[id]] = statusCnt[status[id]] + 1 } } return strs } func (s *UsedStatsInfoForTable) recordedColIdxCount() int { return len(s.IndexStatsLoadStatus) + len(s.ColumnStatsLoadStatus) } // StatsLoadResult indicates result for StatsLoad type StatsLoadResult struct { Item model.TableItemID Error error } // HasError returns whether result has error func (r StatsLoadResult) HasError() bool { return r.Error != nil } // ErrorMsg returns StatsLoadResult err msg func (r StatsLoadResult) ErrorMsg() string { if r.Error == nil { return "" } b := bytes.NewBufferString("tableID:") b.WriteString(strconv.FormatInt(r.Item.TableID, 10)) b.WriteString(", id:") b.WriteString(strconv.FormatInt(r.Item.ID, 10)) b.WriteString(", isIndex:") b.WriteString(strconv.FormatBool(r.Item.IsIndex)) b.WriteString(", err:") b.WriteString(r.Error.Error()) return b.String() }
pkg/sessionctx/stmtctx/stmtctx.go
1
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.9983134269714355, 0.0720970630645752, 0.0001632204803172499, 0.0006975044379942119, 0.18653543293476105 ]
{ "id": 6, "code_window": [ "}\n", "\n", "// SetTypeFlags sets the type flags\n", "func (sc *StatementContext) SetTypeFlags(flags types.Flags) {\n", "\tsc.typeCtx = sc.typeCtx.WithFlags(flags)\n", "}\n", "\n", "// HandleTruncate ignores or returns the error based on the TypeContext inside.\n", "// TODO: replace this function with `HandleError`, for `TruncatedError` they should have the same effect.\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 503 }
create table bar2 (a int auto_increment, key (`a`));
br/tests/lightning_tool_135/data/tool_135.bar2-schema.sql
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00017481432587374002, 0.00017481432587374002, 0.00017481432587374002, 0.00017481432587374002, 0 ]
{ "id": 6, "code_window": [ "}\n", "\n", "// SetTypeFlags sets the type flags\n", "func (sc *StatementContext) SetTypeFlags(flags types.Flags) {\n", "\tsc.typeCtx = sc.typeCtx.WithFlags(flags)\n", "}\n", "\n", "// HandleTruncate ignores or returns the error based on the TypeContext inside.\n", "// TODO: replace this function with `HandleError`, for `TruncatedError` they should have the same effect.\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 503 }
# TiDB Log-based Incremental Backup - Author(s): @kennytm - Discussion: N/A - Tracking Issue: [#29501](https://github.com/pingcap/tidb/issues/29501) ## Table of Contents * [Introduction](#introduction) * [Background](#background) * [Goals](#goals) + [Non-Goals for Version 1](#non-goals-for-version-1) * [User Scenarios / Story](#user-scenarios--story) * [Design](#design) + [BR](#br) - [Start task](#start-task) - [Check task status](#check-task-status) - [Stop task](#stop-task) - [TiKV](#tikv) + [Stop](#stop) + [Pause and Resume](#pause-and-resume) + [Output format](#output-format) + [Restore](#restore) - [Restore backups](#restore-backups) + [Merge (out of scope in v1)](#merge-out-of-scope-in-v1) - [Merge backups](#merge-backups) + [Error scenarios](#error-scenarios) - [Scaling out](#scaling-out) - [Crashing](#crashing) - [Network partition](#network-partition) + [Task management](#task-management) * [Alternatives & Rationale](#alternatives--rationale) * [Trivia](#trivia) ## Introduction <img src="imgs/log-backup-architecture.jpg" style="width:6.26772in;height:4.69444in" /> This document introduces a novel solution for backing up incremental transactional data within the TiKV server. ## Background The ability of commercial databases to handle emergencies and accidents is a basic requirement. When a disaster occurs, we must restore the database with minimal data loss and recovery time. Various factors such as exercise, recovery time, operability and operation and maintenance. ## Goals * Support 200T TiDB cluster backup (66 TiKVs) * Combine log-based backup and full backup to reach - RTO < 6h - RPO < 5 min * Support Point in Time Recovery * Log-based backup has nearly non impact on TiDB clusters. * Support cluster/table level backup * Support encrypt and compress data. ### Non-Goals for Version 1 * We do not support restoring into existing tables. - In version 1, we require a non-empty cluster or an empty table. * Restore requires a completely empty cluster to begin with, so that the Table IDs can match up and the StartTS/CommitTS are correct. * The initial "full restore" must retain the original table ID. * We can perform RewritePrefix/TS but it slows the process down. We do not expect normal public service until the restore is complete - The data may be in an inconsistent state, and the cluster is configured to "import mode". ## User Scenarios / Story * Users want to ensure business continuity - when the database fails and cannot be restored within a certain period of time. The user quickly deploys a database of the same version as the production database, and restores the backup data to the database to provide external services; * User services need to be able to restore the data at a certain point in the past to the new cluster to meet the business needs of judicial inspections or audits ## Detailed Design ### BR A new subcommand is added to BR to control incremental backup tasks. (Provide a TiDB HTTP API as well, see the spec.) #### Start task ```console $ br stream start [task_name] -u 10.0.0.1:2379 -s 's3://bucket/path' [--start-ts 123456789] ... Starting task <task_name> from ts=123456789... Store 1: ok (next_backup_ts =222222222). Store 2: ok (next_backup_ts =333333333). Store 3: ok (next_backup_ts =444444444). Store 8: ok (next_backup_ts =123456789). Started! ``` #### Check task status ```console $ br stream status task_name -u 10.0.0.1:2379 Checking status of <task_name>... Store 1: error, task completed or not found. Store 2: ok (next_backup_ts =900000000). Store 3: ok (next_backup_ts =911111111). Store 8: ok (next_backup_ts =922222222). ``` #### Stop task ```console $ br stream stop task_name -u 10.0.0.1:2379 Stopping task <task_name>... Store 1: error, task completed or not found. Store 2: ok (next_backup_ts =987654321). Store 3: ok (next_backup_ts =989898989). Store 8: ok (next_backup_ts =987987987). Stopped at --lastbackupts=987654321. $ br stream stop task_name -u 10.0.0.1:2379 Stopping task <task_name>... Store 1: error, task completed or not found. Store 2: error, task completed or not found. Store 3: error, task completed or not found. Store 8: error, task completed or not found. Failed to stop the task, maybe it is already stopped. Check the TiKV logs for details. ``` #### TiKV * We will reuse the CmdObserver<E> trait. We may either embed the logic entirely inside cdc:: Delegate, or create a new StreamBackupObserver. Prefering the latter for separation of concern. - We probably don't need to implement RoleObserver + RegionChangeObserver. Both are used to monitor region-not-found or leader-changed errors to propagate to downstream delegates. In backup streaming we simply stop writing to the buffers. - If possible we may reuse the resolved_ts crate to fetch the ResolvedTS instead of making an observer our own to reduce impact. * Support table filtering - We choose to enable table filtering during backup - The problem about table filtering is we only have a <Table ID → Table Name> map with txn in meta.go during log backup.(e.g. mDB1_Table20 -> []byte(tableInfo)), and some operations may change the map. For example, RENAME then TRUNCATE table. we may get such sequences. * time 1: <21, Test_A> * time 2: <21, Test_B>(RENAME) * time 3: <22, Test_B>(TRUNCATE) - To Solve this problem, we need TiKV to watch all meta key changes during a task. * BR starts a task * BR puts a table-filter into a task * BR calculates the key range from the given table-filter. and puts the key range space into PD/Etcd. * BR puts the task into PD/Etcd. * Every TiKV node watches the task and key range space in the task. * when the task changes key range. such as TRUNCATE. TiKV should be aware of this change and stop observing old ranges and start observing new key ranges. when the new key range starts. TiKV needs an incremental scan to get all related keys. * Meta Key * Every Task should watch the meta key range carefully. We had such meta keys for each table. * DB key * Table key * AutoTableID key(ignore. calculate later) * AutoRandom key(ignore.calculate later) * Sequence/Sequence cycle key(ignore for version 1) * Policy key (ignore for version 1) * DDL history key (ignore for not use) * BR will calculate meta key range at the beginning of the task. * Handle TiDB DDL * Create Table/Index within table-filter * update the task and calculate a new range. * Truncate Table within table-filter * update the task and calculate a new range. * Drop Table/Index * update the task. * Rename Table * rename table out of table-filter * ignore. let restore handle this situation. * rename table into table-filter. * ignore. since we didn’t backup this table at the beginning. * Memory control - Every TiKV node flush events independently. - Flush to local storage directly. * if backup to s3, we have a separate thread push data from local storage to s3 at every 5 minutes. - Need a flush control when the disk is full. <img src="imgs/log-backup-data-flow.jpg" style="width:6.26772in;height:4.69444in" /> * Rollback - The Rollbacks do the real delete in rocksdb according to the txn. - The Rollbacks only have the StartTS. - So we must apply rollback events at the end of one restore task. we must ensure that the prewrite happened before rollback. ### Stop Stopping a task causes all collected events not yet flushed to be lost. ### Pause and Resume Pausing a task will make it stop observing the TiKV changes, record the ResolvedTS of every involved region, and immediately flush the log files into external storage. Then it sleeps. Resuming a task is like re-subscribing to a CDC stream, like running br stream start «TaskName» --backupts «PrevRTS». All KVs between PrevRTS and current CommitTS will be scanned out and logged as a single batch. ### Output format - There are kinds of events we can send out: "BR-like" or "CDC-like". - "BR-like" generates events below the MVCC/Percolator abstraction, and is closer to BR's SST format with content like - `Put(cf="default", key="zt1\_r1«StartTS»", value="encoded\_value")` - `Put(cf="write", key="zt1\_r1«CommitTS»", value="P«StartTS»")` - `Delete(cf="default", key="zt1\_r1«StartTS»")` - `Put(cf="write", key="zt1\_r1«CommitTS»", value="D«StartTS»")` - "CDC-like" is above the MVCC abstraction and is closer to CDC's format with content like - `Prewrite(start\_ts=«StartTS», op=Put(key="t1\_r1", value="encoded\_value"))` - `Commit(start\_ts=«StartTS», commit\_ts=«CommitTS», key="t1\_r1")` - `Prewrite(start\_ts=«StartTS», op=Delete(key="t1\_r1"))` - `Commit(start\_ts=«StartTS», commit\_ts=«CommitTS», key="t1\_r1")` - We choose the BR-like form since it is compatible with RawKV and is easier to merge into a Full Backup. But there seems to be worry that if a lock\_cf lock is exposed in the middle it will break the SI guarantee? - The received events are appended to local log files per task (+ fsync), sharded by prefix of the key. - If the first byte of the key is "t" ("zt"), shard by the first 9 (11) bytes. - Otherwise ("m"), place it into its own file. - We do not distinguish by region (shall we?). - Content should be encoded as a stream of protobuf messages or other efficient binary format. - The current ResolvedTS is attached to the set of local log files. - Every 5 minutes or 128 MiB of total data (whichever sooner), we move the log files into external storage atomically. - May need to ensure we don't break a transaction into two files? (what if huge transactions?) - Files are renamed «Storage://Prefix»/«PhysicalTableID:%02x»/«RegionIDAndMinTSOfTheLog:016x»/«StoreID».log - The file should be generated for each region. If the region contains multi tables, split it by table. - The metadata would be the index of those files. - We may compress (producing .log.lz4) or encrypt the file (producing .log.lz4.aes256ctr) on demand (out of scope for now). - Additionally we include a metadata file «Storage://Prefix»/stream-metas/«???:016x».meta recording the initial SHA-256 checksum, encryption IV, number of entries, range of ResolvedTS/StartTS/CommitTS and range of keys of every file involved. ```protobuf enum FileType { Delete = 0; Put = 1; } message DataFileInfo { // SHA256 of the file. bytes sha_256 = 1; // Path of the file. string path = 2; int64 number_of_entries = 3; /// Below are extra information of the file, for better filtering files. // The min ts of the keys in the file. uint64 min_ts = 4; // The max ts of the keys in the file. uint64 max_ts = 5; // The resolved ts of the region when saving the file. uint64 resolved_ts = 6; // The region of the file. int64 region_id = 7; // The key range of the file. // Encoded. bytes start_key = 8; bytes end_key = 9; // The column family of the file. string cf = 10; // The operation type of the file. FileType type = 11; // Whether the data file contains meta keys(m prefixed keys) only. bool is_meta = 12; // The table ID of the file contains, when `is_meta` is true, would be ignored. int64 table_id = 13; // It may support encrypting at future. reserved "iv"; } ``` - Not embedding metadata into \*.log files, so the BR controller can dispatch the restore tasks without downloading tons of files. - Similar to backup meta, we may split each \*.meta file into multiple levels if it becomes too large. - **What happens if upload still fails after several retries?** - Pause the task and report an error which br stream status can see and Prometheus can receive. ### Restore #### Restore backups ```console $ br restore full -s 's3://bucket/snapshot-path' -u 10.0.5.1:2379 $ br stream restore -u 10.0.5.1:2379 \ -s 's3://bucket/path' \ [--startts 123456789] \ [--endts 987654321] \ [--checksum] \ [-f 'db3.*'] ``` 1. Restoring the full backup is unchanged, using the br restore command. 2. To restore the BR-like incremental backup, the restore API has to perform the following: 1. Reads the last «MRTS».meta file just before --endts to obtain the region distribution of the keys. 2. Perform batch-split so region distribution is the same as the end state (only do this if the existing number of regions &lt;&lt; number of regions in the archive). 3. "Lock" the scheduler, disallow region moving/splitting/merging. 4. For every «MRTS».meta file (in parallel), 1. Translates the -f table name filter into key range filter. 2. For every log file (in parallel), 1. If a log file's MaxCommitTS ≤ --startts or MinStartTS > --endts, skip the log file. 2. If a log file's key range is filtered out, skip the log file. 3. Gets all regions intersecting with the file's key range. 4. Tell the ***leader*** of every region to download and apply the log file, along with: 1. --startts and --endts 2. Key range filter 3. SHA-256 checksum (null if --checksum=false) 5. Done. 3. On the TiKV side, after receiving the restore-file command, 1. Download the file 2. If the SHA-256 checksum is provided, perform checksum comparison. 3. Iterate the KV pairs, keeping only those with StartTS or CommitTS between (--startts, --endts\], and key inside the given key range filter. 4. Append all the KV pairs into a BatchCommandsRequest. 5. Perform the batch commands and let Raft replicates this to all peers. 4. Determine whether to perform restore externally on BR, or internally on TiKV. ### Merge (out of scope in v1) #### Merge backups ```console $ br stream merge \ -s 's3://bucket/path/' \ [--read-snapshot-storage 's3://bucket/snapshot-path'] \ --write-snapshot-storage 's3://bucket/snapshot-path-new' \ [--startts 123456789] \ [--endts 987654321] ``` The "merge" operation performs an offline compaction. It applies the KV events on top of an existing snapshot, and produces a new set of SST files. ### Error scenarios #### Scaling out If BR (or TiDB API) directly sends the tasks to TiKV services (i.e. "push-based interface"), when we add new TiKV stores they will not know there is a backup task, causing information loss. I think this suggests that either - There has to be some kind of "BR-stream-master" (like DM-master or CDC-master) which maintains the task list and push it to any new members, or - The task list should be stored on etcd / PD and every TiKV uses [<u>Watch</u>](https://etcd.io/docs/v3.5/tutorials/how-to-watch-keys/) to pull new tasks. I very much prefer the stateless, pull-based approach, but afaik TiKV has never introduced any etcdv3 dependency before (TiDB o.t.o.h. uses etcdv3 extensively, esp in DDL sync). (Since TiKV itself is also a KV store, we could as well use RawKV in a non-watched keyspace to share the task list. But it seems CDC *is* TiKV's Watch 🤔) #### Crashing After a TiKV store has successfully uploaded content to external storage, it should report to the master / etcdv3 / S3 like "in task T, in store 8, in region 432, we have backed up until TS=123456789". In case of a crash, the new leader of region 432 should initialize itself from this progress report, and start scanning from TS=123456789 rather than TS=0 or Now(). This also means there is a communication cost of fetching the initial TS. Because we need to ensure everything in the TS range 123456789..Now() are intact, we have to extend the GC lifetime to include 123456789. Setting a GC safepoint has a global effect, however, whereas we only want to prevent GC from happening in this particular region. Therefore, we'd like to instead change the [<u>gc\_worker.rs</u> <u>implementation in TiKV</u>](https://github.com/tikv/tikv/blob/5552758d277b0ab2761deb88c9a82525ecac8980/src/server/gc_worker/gc_worker.rs#L224). A keyrange can indicate if it is ready to be GC'ed. If a leader has just been elected for &lt; 5 minutes, we indicate the region as not-GC-ready. #### Network partition Consider split-brain, where a region's peers are split into two groups (say <u>1</u>,2,3 / 3,4,<u>5</u>) and both sides have leader (<u>1</u>, <u>5</u>), and both leaders do back up (inconsistent) data to S3. Raft should be able to avoid this, the bridge node (3) being aware of both sides should prevent any modification (Put/Delete) on one side from taking place and thus the back up can never diverge. So this is equivalent to a normal partition where the minority side is considered dead (<u>1</u>,2,3 / <s>4,5</s>). ### Task management As stated above we need a place reachable from all TiKVs to store the task status. **Starting task** BR pushes these keys: - TaskInfo:(task\_name) → ( all info about the task ) TiKV, on initialization, scans for every KV in the keyspace TaskInfo: to learn what backup tasks have been scheduled. After it is done, TiKV watches the keyspace for any new tasks. When a new task is seen by TiKV, and the task's end\_ts &gt; current\_ts, it will do ```rust for region in self.regions { if self.is_leader(region.id) { put!( "NextBackupTS:({task.name}, {self.store_id}, {region_id})", task.start_ts, ); } } ``` Stop the task if end\_ts &lt;= current\_ts. **Stopping task** Delete the TaskInfo key. Watcher should be able to do cleanup. **Configurating, resuming and pausing task** Update the TaskInfo key. The KV API is equivalent to starting a task. Pausing changes the --start-ts to the last key's CommitTS, and set "paused" to true. Resuming restores "paused" to false. **Initialization, leader change and region split/merge** For every new region, we assume all keys between NextBackupTS:\* to the current TS is not yet backed up. So we initiate a full scan on the region for CommitTS between these two numbers. **Flushing** For every region which this store is still a leader of, update NextBackupTS:\* to the ResolvedTS of the region. **Risks** - Still need to sort out all potential crash scenarios - Scanning TxnEntry after changing leaders, no matter when it happened, may reduce the cluster's performance. - Events such as "DeleteRange" and "IngestSST" are not handled. - Should we backup lockcf - How to deal with Rollback (esp across 5min point) - may lead to dangling default cf entries if the KVDelete is not handled - in some scenarios there may be a lot of rollbacks? - The Rollback key will carry start ts. For each key with start ts we need to ensure the delete happened after the put. ## Alternatives & Rationale **Scanning RaftDB** Since all modifications must go through Raft consensus, we can capture the changes on RaftDB instead of TiKVDB for the incremental changes. We can even physically copy the WAL file to minimize computation. Another advantage of scanning RaftDB is that, if we want to batch copy events between two timestamps (e.g. after changing leader), we don't need to scan the entire region key space, but just those entries indexed by TS. In fact, TiCDC also considered this approach. It was eventually abandoned, however, due to the difficulty of actually utilizing the raft log. Given the previous failure, we will not attempt to touch the raft log unless the TxnEntry scan proved to have a really bad effect on performance. - Scan the raft log directly. The current raftstore implementation needs to be adjusted, such as raft log gc. In addition, it is also necessary to ensure that the scan is performed during the leader’s lifetime, otherwise the complete data may not be seen. - Also need to pay attention to region epoch check, raft log commit != apply successfully - To support the interpretation of raftlog, I think I need to implement most of the logic of raftstore now, especially some operations such as split/merge/conf change. Due to the existence of these operations, the original total order raft log in a region has a sloping relationship between different regions. I think it is not easy to solve. This is also the reason why cdc did not use raftlog or wal at the time.
docs/design/2021-12-09-TiDB-log-based-incremental-backup.md
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.0002083148283418268, 0.00017065659631043673, 0.0001592817425262183, 0.00017058482626453042, 0.000006711204150633421 ]
{ "id": 6, "code_window": [ "}\n", "\n", "// SetTypeFlags sets the type flags\n", "func (sc *StatementContext) SetTypeFlags(flags types.Flags) {\n", "\tsc.typeCtx = sc.typeCtx.WithFlags(flags)\n", "}\n", "\n", "// HandleTruncate ignores or returns the error based on the TypeContext inside.\n", "// TODO: replace this function with `HandleError`, for `TruncatedError` they should have the same effect.\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tsc.initErrCtx()\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx.go", "type": "add", "edit_start_line_idx": 503 }
#!/bin/bash # # Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. # parameter 1: config file for sync_diff_inspector # parameter 2: max check times conf=$1 check_time=${2-10} LOG=$DUMPLING_OUTPUT_DIR/sync_diff_inspector.log # change output dir "./output" to "$DUMPLING_OUTPUT_DIR/output" DUMPLING_OUTPUT_DIR_REGEX=$(echo "$DUMPLING_OUTPUT_DIR/output" | sed -e 's/\//\\\//g') sed "s/.\/output/${DUMPLING_OUTPUT_DIR_REGEX}/g" $conf > $DUMPLING_OUTPUT_DIR/diff_config.toml conf=$DUMPLING_OUTPUT_DIR/diff_config.toml i=0 while [ $i -lt $check_time ] do bin/sync_diff_inspector --config=$conf >> $LOG 2>&1 ret=$? if [ "$ret" == 0 ]; then echo "check diff successfully" break fi ((i++)) echo "check diff failed $i-th time, retry later" sleep 2 done if [ $i -ge $check_time ]; then echo "check data failed, some data are different!!" # show \n and other blanks printf "$(cat $LOG)\n" exit 1 fi cd $PWD
dumpling/tests/_utils/check_sync_diff
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.00017549975018482655, 0.00017342607316095382, 0.0001688806078163907, 0.0001746619527693838, 0.000002651888735272223 ]
{ "id": 7, "code_window": [ "\t}\n", "}\n", "\n", "func BenchmarkErrCtx(b *testing.B) {\n", "\tsc := stmtctx.NewStmtCtx()\n", "\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "func TestErrCtx(t *testing.T) {\n", "\tsc := stmtctx.NewStmtCtx()\n", "\t// the default errCtx\n", "\terr := types.ErrTruncated\n", "\trequire.Error(t, sc.HandleError(err))\n", "\n", "\t// reset the types flags will re-initialize the error flag\n", "\tsc.SetTypeFlags(types.DefaultStmtFlags | types.FlagTruncateAsWarning)\n", "\trequire.NoError(t, sc.HandleError(err))\n", "}\n", "\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx_test.go", "type": "add", "edit_start_line_idx": 410 }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stmtctx_test import ( "context" "encoding/json" "fmt" "math/rand" "reflect" "sort" "testing" "time" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/util" ) func TestCopTasksDetails(t *testing.T) { ctx := stmtctx.NewStmtCtx() backoffs := []string{"tikvRPC", "pdRPC", "regionMiss"} for i := 0; i < 100; i++ { d := &execdetails.ExecDetails{ DetailsNeedP90: execdetails.DetailsNeedP90{ CalleeAddress: fmt.Sprintf("%v", i+1), BackoffSleep: make(map[string]time.Duration), BackoffTimes: make(map[string]int), TimeDetail: util.TimeDetail{ ProcessTime: time.Second * time.Duration(i+1), WaitTime: time.Millisecond * time.Duration(i+1), }, }, } for _, backoff := range backoffs { d.BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(i+1) d.BackoffTimes[backoff] = i + 1 } ctx.MergeExecDetails(d, nil) } d := ctx.CopTasksDetails() require.Equal(t, 100, d.NumCopTasks) require.Equal(t, time.Second*101/2, d.AvgProcessTime) require.Equal(t, time.Second*91, d.P90ProcessTime) require.Equal(t, time.Second*100, d.MaxProcessTime) require.Equal(t, "100", d.MaxProcessAddress) require.Equal(t, time.Millisecond*101/2, d.AvgWaitTime) require.Equal(t, time.Millisecond*91, d.P90WaitTime) require.Equal(t, time.Millisecond*100, d.MaxWaitTime) require.Equal(t, "100", d.MaxWaitAddress) fields := d.ToZapFields() require.Equal(t, 9, len(fields)) for _, backoff := range backoffs { require.Equal(t, "100", d.MaxBackoffAddress[backoff]) require.Equal(t, 100*time.Millisecond*100, d.MaxBackoffTime[backoff]) require.Equal(t, time.Millisecond*100*91, d.P90BackoffTime[backoff]) require.Equal(t, time.Millisecond*100*101/2, d.AvgBackoffTime[backoff]) require.Equal(t, 101*50, d.TotBackoffTimes[backoff]) require.Equal(t, 101*50*100*time.Millisecond, d.TotBackoffTime[backoff]) } } func TestStatementContextPushDownFLags(t *testing.T) { newStmtCtx := func(fn func(*stmtctx.StatementContext)) *stmtctx.StatementContext { sc := stmtctx.NewStmtCtx() fn(sc) return sc } testCases := []struct { in *stmtctx.StatementContext out uint64 }{ {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InInsertStmt = true }), 8}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InUpdateStmt = true }), 16}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InDeleteStmt = true }), 16}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InSelectStmt = true }), 32}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) }), 1}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) }), 66}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) }), 128}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.DividedByZeroAsWarning = true }), 256}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InLoadDataStmt = true }), 1024}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InSelectStmt = true sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) }), 98}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.DividedByZeroAsWarning = true sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) }), 257}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InUpdateStmt = true sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) sc.InLoadDataStmt = true }), 1168}, } for _, tt := range testCases { got := tt.in.PushDownFlags() require.Equal(t, tt.out, got) } } func TestWeakConsistencyRead(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(id int primary key, c int, c1 int, unique index i(c))") execAndCheck := func(sql string, rows [][]interface{}, isolationLevel kv.IsoLevel) { ctx := context.WithValue(context.Background(), "CheckSelectRequestHook", func(req *kv.Request) { require.Equal(t, req.IsolationLevel, isolationLevel) }) rss, err := tk.Session().Execute(ctx, sql) require.Nil(t, err) for _, rs := range rss { rs.Close() } if rows != nil { tk.MustQuery(sql).Check(rows) } lastWeakConsistency := tk.Session().GetSessionVars().StmtCtx.WeakConsistency require.Equal(t, lastWeakConsistency, isolationLevel == kv.RC) } // strict execAndCheck("insert into t values(1, 1, 1)", nil, kv.SI) execAndCheck("select * from t", testkit.Rows("1 1 1"), kv.SI) tk.MustExec("prepare s from 'select * from t'") tk.MustExec("prepare u from 'update t set c1 = id + 1'") execAndCheck("execute s", testkit.Rows("1 1 1"), kv.SI) execAndCheck("execute u", nil, kv.SI) execAndCheck("admin check table t", nil, kv.SI) // weak tk.MustExec("set tidb_read_consistency = weak") execAndCheck("insert into t values(2, 2, 2)", nil, kv.SI) execAndCheck("select * from t", testkit.Rows("1 1 2", "2 2 2"), kv.RC) execAndCheck("execute s", testkit.Rows("1 1 2", "2 2 2"), kv.RC) execAndCheck("execute u", nil, kv.SI) // non-read-only queries should be strict execAndCheck("admin check table t", nil, kv.SI) execAndCheck("update t set c = c + 1 where id = 2", nil, kv.SI) execAndCheck("delete from t where id = 2", nil, kv.SI) // in-transaction queries should be strict tk.MustExec("begin") execAndCheck("select * from t", testkit.Rows("1 1 2"), kv.SI) execAndCheck("execute s", testkit.Rows("1 1 2"), kv.SI) tk.MustExec("rollback") } func TestMarshalSQLWarn(t *testing.T) { warns := []stmtctx.SQLWarn{ { Level: stmtctx.WarnLevelError, Err: errors.New("any error"), }, { Level: stmtctx.WarnLevelError, Err: errors.Trace(errors.New("any error")), }, { Level: stmtctx.WarnLevelWarning, Err: variable.ErrUnknownSystemVar.GenWithStackByArgs("unknown"), }, { Level: stmtctx.WarnLevelWarning, Err: errors.Trace(variable.ErrUnknownSystemVar.GenWithStackByArgs("unknown")), }, } store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) // First query can trigger loading global variables, which produces warnings. tk.MustQuery("select 1") tk.Session().GetSessionVars().StmtCtx.SetWarnings(warns) rows := tk.MustQuery("show warnings").Rows() require.Equal(t, len(warns), len(rows)) // The unmarshalled result doesn't need to be exactly the same with the original one. // We only need that the results of `show warnings` are the same. bytes, err := json.Marshal(warns) require.NoError(t, err) var newWarns []stmtctx.SQLWarn err = json.Unmarshal(bytes, &newWarns) require.NoError(t, err) tk.Session().GetSessionVars().StmtCtx.SetWarnings(newWarns) tk.MustQuery("show warnings").Check(rows) } func TestApproxRuntimeInfo(t *testing.T) { var n = rand.Intn(19000) + 1000 var valRange = rand.Int31n(10000) + 1000 backoffs := []string{"tikvRPC", "pdRPC", "regionMiss"} details := []*execdetails.ExecDetails{} for i := 0; i < n; i++ { d := &execdetails.ExecDetails{ DetailsNeedP90: execdetails.DetailsNeedP90{ CalleeAddress: fmt.Sprintf("%v", i+1), BackoffSleep: make(map[string]time.Duration), BackoffTimes: make(map[string]int), TimeDetail: util.TimeDetail{ ProcessTime: time.Second * time.Duration(rand.Int31n(valRange)), WaitTime: time.Millisecond * time.Duration(rand.Int31n(valRange)), }, }, } details = append(details, d) for _, backoff := range backoffs { d.BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(rand.Int31n(valRange)) d.BackoffTimes[backoff] = rand.Intn(int(valRange)) } } // Make CalleeAddress for each max value is deterministic. details[rand.Intn(n)].DetailsNeedP90.TimeDetail.ProcessTime = time.Second * time.Duration(valRange) details[rand.Intn(n)].DetailsNeedP90.TimeDetail.WaitTime = time.Millisecond * time.Duration(valRange) for _, backoff := range backoffs { details[rand.Intn(n)].BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(valRange) } ctx := stmtctx.NewStmtCtx() for i := 0; i < n; i++ { ctx.MergeExecDetails(details[i], nil) } d := ctx.CopTasksDetails() require.Equal(t, d.NumCopTasks, n) sort.Slice(details, func(i, j int) bool { return details[i].TimeDetail.ProcessTime.Nanoseconds() < details[j].TimeDetail.ProcessTime.Nanoseconds() }) var timeSum time.Duration for _, detail := range details { timeSum += detail.TimeDetail.ProcessTime } require.Equal(t, d.AvgProcessTime, timeSum/time.Duration(n)) require.InEpsilon(t, d.P90ProcessTime.Nanoseconds(), details[n*9/10].TimeDetail.ProcessTime.Nanoseconds(), 0.05) require.Equal(t, d.MaxProcessTime, details[n-1].TimeDetail.ProcessTime) require.Equal(t, d.MaxProcessAddress, details[n-1].CalleeAddress) sort.Slice(details, func(i, j int) bool { return details[i].TimeDetail.WaitTime.Nanoseconds() < details[j].TimeDetail.WaitTime.Nanoseconds() }) timeSum = 0 for _, detail := range details { timeSum += detail.TimeDetail.WaitTime } require.Equal(t, d.AvgWaitTime, timeSum/time.Duration(n)) require.InEpsilon(t, d.P90WaitTime.Nanoseconds(), details[n*9/10].TimeDetail.WaitTime.Nanoseconds(), 0.05) require.Equal(t, d.MaxWaitTime, details[n-1].TimeDetail.WaitTime) require.Equal(t, d.MaxWaitAddress, details[n-1].CalleeAddress) fields := d.ToZapFields() require.Equal(t, 9, len(fields)) for _, backoff := range backoffs { sort.Slice(details, func(i, j int) bool { return details[i].BackoffSleep[backoff].Nanoseconds() < details[j].BackoffSleep[backoff].Nanoseconds() }) timeSum = 0 var timesSum = 0 for _, detail := range details { timeSum += detail.BackoffSleep[backoff] timesSum += detail.BackoffTimes[backoff] } require.Equal(t, d.MaxBackoffAddress[backoff], details[n-1].CalleeAddress) require.Equal(t, d.MaxBackoffTime[backoff], details[n-1].BackoffSleep[backoff]) require.InEpsilon(t, d.P90BackoffTime[backoff], details[n*9/10].BackoffSleep[backoff], 0.1) require.Equal(t, d.AvgBackoffTime[backoff], timeSum/time.Duration(n)) require.Equal(t, d.TotBackoffTimes[backoff], timesSum) require.Equal(t, d.TotBackoffTime[backoff], timeSum) } } func TestStmtHintsClone(t *testing.T) { hints := stmtctx.StmtHints{} value := reflect.ValueOf(&hints).Elem() for i := 0; i < value.NumField(); i++ { field := value.Field(i) switch field.Kind() { case reflect.Int, reflect.Int32, reflect.Int64: field.SetInt(1) case reflect.Uint, reflect.Uint32, reflect.Uint64: field.SetUint(1) case reflect.Uint8: // byte field.SetUint(1) case reflect.Bool: field.SetBool(true) case reflect.String: field.SetString("test") default: } } require.Equal(t, hints, *hints.Clone()) } func TestNewStmtCtx(t *testing.T) { sc := stmtctx.NewStmtCtx() require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.Same(t, time.UTC, sc.TimeZone()) require.Same(t, time.UTC, sc.TimeZone()) sc.AppendWarning(errors.New("err1")) warnings := sc.GetWarnings() require.Equal(t, 1, len(warnings)) require.Equal(t, stmtctx.WarnLevelWarning, warnings[0].Level) require.Equal(t, "err1", warnings[0].Err.Error()) tz := time.FixedZone("UTC+1", 2*60*60) sc = stmtctx.NewStmtCtxWithTimeZone(tz) require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.Same(t, tz, sc.TimeZone()) require.Same(t, tz, sc.TimeZone()) sc.AppendWarning(errors.New("err2")) warnings = sc.GetWarnings() require.Equal(t, 1, len(warnings)) require.Equal(t, stmtctx.WarnLevelWarning, warnings[0].Level) require.Equal(t, "err2", warnings[0].Err.Error()) } func TestSetStmtCtxTimeZone(t *testing.T) { sc := stmtctx.NewStmtCtx() require.Same(t, time.UTC, sc.TimeZone()) tz := time.FixedZone("UTC+1", 2*60*60) sc.SetTimeZone(tz) require.Same(t, tz, sc.TimeZone()) } func TestSetStmtCtxTypeFlags(t *testing.T) { sc := stmtctx.NewStmtCtx() require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) sc.SetTypeFlags(types.FlagAllowNegativeToUnsigned | types.FlagSkipASCIICheck) require.Equal(t, types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck, sc.TypeFlags()) require.Equal(t, sc.TypeFlags(), sc.TypeFlags()) sc.SetTypeFlags(types.FlagSkipASCIICheck | types.FlagSkipUTF8Check | types.FlagTruncateAsWarning) require.Equal(t, types.FlagSkipASCIICheck|types.FlagSkipUTF8Check|types.FlagTruncateAsWarning, sc.TypeFlags()) require.Equal(t, sc.TypeFlags(), sc.TypeFlags()) } func TestResetStmtCtx(t *testing.T) { sc := stmtctx.NewStmtCtx() require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) tz := time.FixedZone("UTC+1", 2*60*60) sc.SetTimeZone(tz) sc.SetTypeFlags(types.FlagAllowNegativeToUnsigned | types.FlagSkipASCIICheck) sc.AppendWarning(errors.New("err1")) sc.InRestrictedSQL = true sc.StmtType = "Insert" require.Same(t, tz, sc.TimeZone()) require.Equal(t, types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck, sc.TypeFlags()) require.Equal(t, 1, len(sc.GetWarnings())) sc.Reset() require.Same(t, time.UTC, sc.TimeZone()) require.Same(t, time.UTC, sc.TimeZone()) require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.False(t, sc.InRestrictedSQL) require.Empty(t, sc.StmtType) require.Equal(t, 0, len(sc.GetWarnings())) sc.AppendWarning(errors.New("err2")) warnings := sc.GetWarnings() require.Equal(t, 1, len(warnings)) require.Equal(t, stmtctx.WarnLevelWarning, warnings[0].Level) require.Equal(t, "err2", warnings[0].Err.Error()) } func TestStmtCtxID(t *testing.T) { sc := stmtctx.NewStmtCtx() currentID := sc.CtxID() cases := []struct { fn func() *stmtctx.StatementContext }{ {func() *stmtctx.StatementContext { return stmtctx.NewStmtCtx() }}, {func() *stmtctx.StatementContext { return stmtctx.NewStmtCtxWithTimeZone(time.Local) }}, {func() *stmtctx.StatementContext { sc.Reset() return sc }}, } for _, c := range cases { ctxID := c.fn().CtxID() require.Greater(t, ctxID, currentID) currentID = ctxID } } func BenchmarkErrCtx(b *testing.B) { sc := stmtctx.NewStmtCtx() for i := 0; i < b.N; i++ { sc.ErrCtx() } }
pkg/sessionctx/stmtctx/stmtctx_test.go
1
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.9977460503578186, 0.3682895600795746, 0.00016775797121226788, 0.011388489976525307, 0.45439594984054565 ]
{ "id": 7, "code_window": [ "\t}\n", "}\n", "\n", "func BenchmarkErrCtx(b *testing.B) {\n", "\tsc := stmtctx.NewStmtCtx()\n", "\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "func TestErrCtx(t *testing.T) {\n", "\tsc := stmtctx.NewStmtCtx()\n", "\t// the default errCtx\n", "\terr := types.ErrTruncated\n", "\trequire.Error(t, sc.HandleError(err))\n", "\n", "\t// reset the types flags will re-initialize the error flag\n", "\tsc.SetTypeFlags(types.DefaultStmtFlags | types.FlagTruncateAsWarning)\n", "\trequire.NoError(t, sc.HandleError(err))\n", "}\n", "\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx_test.go", "type": "add", "edit_start_line_idx": 410 }
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. package utils import ( "github.com/pingcap/errors" "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "go.uber.org/zap" "golang.org/x/sync/errgroup" ) // WorkerPool contains a pool of workers. type WorkerPool struct { limit uint workers chan *Worker name string } // Worker identified by ID. type Worker struct { ID uint64 } type taskFunc func() type identifiedTaskFunc func(uint64) // NewWorkerPool returns a WorkPool. func NewWorkerPool(limit uint, name string) *WorkerPool { workers := make(chan *Worker, limit) for i := uint(0); i < limit; i++ { workers <- &Worker{ID: uint64(i + 1)} } return &WorkerPool{ limit: limit, workers: workers, name: name, } } // IdleCount counts how many idle workers in the pool. func (pool *WorkerPool) IdleCount() int { return len(pool.workers) } // Limit is the limit of the pool func (pool *WorkerPool) Limit() int { return int(pool.limit) } // Apply executes a task. func (pool *WorkerPool) Apply(fn taskFunc) { worker := pool.ApplyWorker() go func() { defer pool.RecycleWorker(worker) fn() }() } // ApplyWithID execute a task and provides it with the worker ID. func (pool *WorkerPool) ApplyWithID(fn identifiedTaskFunc) { worker := pool.ApplyWorker() go func() { defer pool.RecycleWorker(worker) fn(worker.ID) }() } // ApplyOnErrorGroup executes a task in an errorgroup. func (pool *WorkerPool) ApplyOnErrorGroup(eg *errgroup.Group, fn func() error) { worker := pool.ApplyWorker() eg.Go(func() error { defer pool.RecycleWorker(worker) return fn() }) } // ApplyWithIDInErrorGroup executes a task in an errorgroup and provides it with the worker ID. func (pool *WorkerPool) ApplyWithIDInErrorGroup(eg *errgroup.Group, fn func(id uint64) error) { worker := pool.ApplyWorker() eg.Go(func() error { defer pool.RecycleWorker(worker) return fn(worker.ID) }) } // ApplyWorker apply a worker. func (pool *WorkerPool) ApplyWorker() *Worker { var worker *Worker select { case worker = <-pool.workers: default: log.Debug("wait for workers", zap.String("pool", pool.name)) worker = <-pool.workers } return worker } // RecycleWorker recycle a worker. func (pool *WorkerPool) RecycleWorker(worker *Worker) { if worker == nil { panic("invalid restore worker") } pool.workers <- worker } // HasWorker checks if the pool has unallocated workers. func (pool *WorkerPool) HasWorker() bool { return pool.IdleCount() > 0 } // PanicToErr recovers when the execution get panicked, and set the error provided by the arg. // generally, this would be used with named return value and `defer`, like: // // func foo() (err error) { // defer utils.PanicToErr(&err) // return maybePanic() // } // // Before using this, there are some hints for reducing resource leakage or bugs: // - If any of clean work (by `defer`) relies on the error (say, when error happens, rollback some operations.), please // place `defer this` AFTER that. // - All resources allocated should be freed by the `defer` syntax, or when panicking, they may not be recycled. func PanicToErr(err *error) { item := recover() if item != nil { *err = errors.Annotatef(berrors.ErrUnknown, "panicked when executing, message: %v", item) log.Warn("PanicToErr: panicked, recovering and returning error", zap.StackSkip("stack", 1), logutil.ShortError(*err)) } }
br/pkg/utils/worker.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.004674375522881746, 0.0010855920845642686, 0.00017030883464030921, 0.000361804966814816, 0.0013101650401949883 ]
{ "id": 7, "code_window": [ "\t}\n", "}\n", "\n", "func BenchmarkErrCtx(b *testing.B) {\n", "\tsc := stmtctx.NewStmtCtx()\n", "\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "func TestErrCtx(t *testing.T) {\n", "\tsc := stmtctx.NewStmtCtx()\n", "\t// the default errCtx\n", "\terr := types.ErrTruncated\n", "\trequire.Error(t, sc.HandleError(err))\n", "\n", "\t// reset the types flags will re-initialize the error flag\n", "\tsc.SetTypeFlags(types.DefaultStmtFlags | types.FlagTruncateAsWarning)\n", "\trequire.NoError(t, sc.HandleError(err))\n", "}\n", "\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx_test.go", "type": "add", "edit_start_line_idx": 410 }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "bufio" "context" "fmt" "io" "os" "path/filepath" "regexp" "runtime" "slices" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/privilege" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/plancodec" "go.uber.org/zap" ) type signalsKey struct{} // ParseSlowLogBatchSize is the batch size of slow-log lines for a worker to parse, exported for testing. var ParseSlowLogBatchSize = 64 // slowQueryRetriever is used to read slow log data. type slowQueryRetriever struct { table *model.TableInfo outputCols []*model.ColumnInfo initialized bool extractor *plannercore.SlowQueryExtractor files []logFile fileIdx int fileLine int checker *slowLogChecker columnValueFactoryMap map[string]slowQueryColumnValueFactory instanceFactory func([]types.Datum) taskList chan slowLogTask stats *slowQueryRuntimeStats memTracker *memory.Tracker lastFetchSize int64 cancel context.CancelFunc wg sync.WaitGroup } func (e *slowQueryRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { if !e.initialized { err := e.initialize(ctx, sctx) if err != nil { return nil, err } ctx, e.cancel = context.WithCancel(ctx) e.initializeAsyncParsing(ctx, sctx) } return e.dataForSlowLog(ctx) } func (e *slowQueryRetriever) initialize(ctx context.Context, sctx sessionctx.Context) error { var err error var hasProcessPriv bool if pm := privilege.GetPrivilegeManager(sctx); pm != nil { hasProcessPriv = pm.RequestVerification(sctx.GetSessionVars().ActiveRoles, "", "", "", mysql.ProcessPriv) } // initialize column value factories. e.columnValueFactoryMap = make(map[string]slowQueryColumnValueFactory, len(e.outputCols)) for idx, col := range e.outputCols { if col.Name.O == util.ClusterTableInstanceColumnName { e.instanceFactory, err = getInstanceColumnValueFactory(sctx, idx) if err != nil { return err } continue } factory, err := getColumnValueFactoryByName(col.Name.O, idx) if err != nil { return err } if factory == nil { panic(fmt.Sprintf("should never happen, should register new column %v into getColumnValueFactoryByName function", col.Name.O)) } e.columnValueFactoryMap[col.Name.O] = factory } // initialize checker. e.checker = &slowLogChecker{ hasProcessPriv: hasProcessPriv, user: sctx.GetSessionVars().User, } e.stats = &slowQueryRuntimeStats{} if e.extractor != nil { e.checker.enableTimeCheck = e.extractor.Enable for _, tr := range e.extractor.TimeRanges { startTime := types.NewTime(types.FromGoTime(tr.StartTime.In(sctx.GetSessionVars().Location())), mysql.TypeDatetime, types.MaxFsp) endTime := types.NewTime(types.FromGoTime(tr.EndTime.In(sctx.GetSessionVars().Location())), mysql.TypeDatetime, types.MaxFsp) timeRange := &timeRange{ startTime: startTime, endTime: endTime, } e.checker.timeRanges = append(e.checker.timeRanges, timeRange) } } else { e.extractor = &plannercore.SlowQueryExtractor{} } e.initialized = true e.files, err = e.getAllFiles(ctx, sctx, sctx.GetSessionVars().SlowQueryFile) if e.extractor.Desc { e.reverseLogFiles() } return err } func (e *slowQueryRetriever) reverseLogFiles() { for i := 0; i < len(e.files)/2; i++ { j := len(e.files) - i - 1 e.files[i], e.files[j] = e.files[j], e.files[i] } } func (e *slowQueryRetriever) close() error { for _, f := range e.files { err := f.file.Close() if err != nil { logutil.BgLogger().Error("close slow log file failed.", zap.Error(err)) } } if e.cancel != nil { e.cancel() } e.wg.Wait() return nil } type parsedSlowLog struct { rows [][]types.Datum err error } func (e *slowQueryRetriever) getNextFile() *os.File { if e.fileIdx >= len(e.files) { return nil } file := e.files[e.fileIdx].file e.fileIdx++ if e.stats != nil { stat, err := file.Stat() if err == nil { // ignore the err will be ok. e.stats.readFileSize += stat.Size() e.stats.readFileNum++ } } return file } func (e *slowQueryRetriever) getPreviousFile() *os.File { fileIdx := e.fileIdx // fileIdx refer to the next file which should be read // so we need to set fileIdx to fileIdx - 2 to get the previous file. fileIdx = fileIdx - 2 if fileIdx < 0 { return nil } file := e.files[fileIdx].file _, err := file.Seek(0, io.SeekStart) if err != nil { return nil } return file } func (e *slowQueryRetriever) parseDataForSlowLog(ctx context.Context, sctx sessionctx.Context) { defer e.wg.Done() file := e.getNextFile() if file == nil { close(e.taskList) return } reader := bufio.NewReader(file) e.parseSlowLog(ctx, sctx, reader, ParseSlowLogBatchSize) } func (e *slowQueryRetriever) dataForSlowLog(ctx context.Context) ([][]types.Datum, error) { var ( task slowLogTask ok bool ) e.memConsume(-e.lastFetchSize) e.lastFetchSize = 0 for { select { case task, ok = <-e.taskList: case <-ctx.Done(): return nil, ctx.Err() } if !ok { return nil, nil } result := <-task.resultCh rows, err := result.rows, result.err if err != nil { return nil, err } if len(rows) == 0 { continue } if e.instanceFactory != nil { for i := range rows { e.instanceFactory(rows[i]) } } e.lastFetchSize = calculateDatumsSize(rows) return rows, nil } } type slowLogChecker struct { // Below fields is used to check privilege. hasProcessPriv bool user *auth.UserIdentity // Below fields is used to check slow log time valid. enableTimeCheck bool timeRanges []*timeRange } type timeRange struct { startTime types.Time endTime types.Time } func (sc *slowLogChecker) hasPrivilege(userName string) bool { return sc.hasProcessPriv || sc.user == nil || userName == sc.user.Username } func (sc *slowLogChecker) isTimeValid(t types.Time) bool { for _, tr := range sc.timeRanges { if sc.enableTimeCheck && (t.Compare(tr.startTime) >= 0 && t.Compare(tr.endTime) <= 0) { return true } } return !sc.enableTimeCheck } func getOneLine(reader *bufio.Reader) ([]byte, error) { return util.ReadLine(reader, int(variable.MaxOfMaxAllowedPacket)) } type offset struct { offset int length int } type slowLogTask struct { resultCh chan parsedSlowLog } type slowLogBlock []string func (e *slowQueryRetriever) getBatchLog(ctx context.Context, reader *bufio.Reader, offset *offset, num int) ([][]string, error) { var line string log := make([]string, 0, num) var err error for i := 0; i < num; i++ { for { if isCtxDone(ctx) { return nil, ctx.Err() } e.fileLine++ lineByte, err := getOneLine(reader) if err != nil { if err == io.EOF { e.fileLine = 0 file := e.getNextFile() if file == nil { return [][]string{log}, nil } offset.length = len(log) reader.Reset(file) continue } return [][]string{log}, err } line = string(hack.String(lineByte)) log = append(log, line) if strings.HasSuffix(line, variable.SlowLogSQLSuffixStr) { if strings.HasPrefix(line, "use") || strings.HasPrefix(line, variable.SlowLogRowPrefixStr) { continue } break } } } return [][]string{log}, err } func (e *slowQueryRetriever) getBatchLogForReversedScan(ctx context.Context, reader *bufio.Reader, offset *offset, num int) ([][]string, error) { // reader maybe change when read previous file. inputReader := reader defer func() { file := e.getNextFile() if file != nil { inputReader.Reset(file) } }() var line string var logs []slowLogBlock var log []string var err error hasStartFlag := false scanPreviousFile := false for { if isCtxDone(ctx) { return nil, ctx.Err() } e.fileLine++ lineByte, err := getOneLine(reader) if err != nil { if err == io.EOF { if len(log) == 0 { decomposedSlowLogTasks := decomposeToSlowLogTasks(logs, num) offset.length = len(decomposedSlowLogTasks) return decomposedSlowLogTasks, nil } e.fileLine = 0 file := e.getPreviousFile() if file == nil { return decomposeToSlowLogTasks(logs, num), nil } reader = bufio.NewReader(file) scanPreviousFile = true continue } return nil, err } line = string(hack.String(lineByte)) if !hasStartFlag && strings.HasPrefix(line, variable.SlowLogStartPrefixStr) { hasStartFlag = true } if hasStartFlag { log = append(log, line) if strings.HasSuffix(line, variable.SlowLogSQLSuffixStr) { if strings.HasPrefix(line, "use") || strings.HasPrefix(line, variable.SlowLogRowPrefixStr) { continue } logs = append(logs, log) if scanPreviousFile { break } log = make([]string, 0, 8) hasStartFlag = false } } } return decomposeToSlowLogTasks(logs, num), err } func decomposeToSlowLogTasks(logs []slowLogBlock, num int) [][]string { if len(logs) == 0 { return nil } //In reversed scan, We should reverse the blocks. last := len(logs) - 1 for i := 0; i < len(logs)/2; i++ { logs[i], logs[last-i] = logs[last-i], logs[i] } decomposedSlowLogTasks := make([][]string, 0) log := make([]string, 0, num*len(logs[0])) for i := range logs { log = append(log, logs[i]...) if i > 0 && i%num == 0 { decomposedSlowLogTasks = append(decomposedSlowLogTasks, log) log = make([]string, 0, len(log)) } } if len(log) > 0 { decomposedSlowLogTasks = append(decomposedSlowLogTasks, log) } return decomposedSlowLogTasks } func (e *slowQueryRetriever) parseSlowLog(ctx context.Context, sctx sessionctx.Context, reader *bufio.Reader, logNum int) { defer close(e.taskList) offset := offset{offset: 0, length: 0} // To limit the num of go routine concurrent := sctx.GetSessionVars().Concurrency.DistSQLScanConcurrency() ch := make(chan int, concurrent) if e.stats != nil { e.stats.concurrent = concurrent } defer close(ch) for { startTime := time.Now() var logs [][]string var err error if !e.extractor.Desc { logs, err = e.getBatchLog(ctx, reader, &offset, logNum) } else { logs, err = e.getBatchLogForReversedScan(ctx, reader, &offset, logNum) } if err != nil { t := slowLogTask{} t.resultCh = make(chan parsedSlowLog, 1) select { case <-ctx.Done(): return case e.taskList <- t: } e.sendParsedSlowLogCh(t, parsedSlowLog{nil, err}) } if len(logs) == 0 || len(logs[0]) == 0 { break } if e.stats != nil { e.stats.readFile += time.Since(startTime) } failpoint.Inject("mockReadSlowLogSlow", func(val failpoint.Value) { if val.(bool) { signals := ctx.Value(signalsKey{}).([]chan int) signals[0] <- 1 <-signals[1] } }) for i := range logs { log := logs[i] t := slowLogTask{} t.resultCh = make(chan parsedSlowLog, 1) start := offset ch <- 1 select { case <-ctx.Done(): return case e.taskList <- t: } e.wg.Add(1) go func() { defer e.wg.Done() result, err := e.parseLog(ctx, sctx, log, start) e.sendParsedSlowLogCh(t, parsedSlowLog{result, err}) <-ch }() offset.offset = e.fileLine offset.length = 0 select { case <-ctx.Done(): return default: } } } } func (*slowQueryRetriever) sendParsedSlowLogCh(t slowLogTask, re parsedSlowLog) { select { case t.resultCh <- re: default: return } } func getLineIndex(offset offset, index int) int { var fileLine int if offset.length <= index { fileLine = index - offset.length + 1 } else { fileLine = offset.offset + index + 1 } return fileLine } // kvSplitRegex: it was just for split "field: value field: value..." var kvSplitRegex = regexp.MustCompile(`\w+: `) // splitByColon split a line like "field: value field: value..." func splitByColon(line string) (fields []string, values []string) { matches := kvSplitRegex.FindAllStringIndex(line, -1) fields = make([]string, 0, len(matches)) values = make([]string, 0, len(matches)) beg := 0 end := 0 for _, match := range matches { // trim ": " fields = append(fields, line[match[0]:match[1]-2]) end = match[0] if beg != 0 { // trim " " values = append(values, line[beg:end-1]) } beg = match[1] } if end != len(line) { // " " does not exist in the end values = append(values, line[beg:]) } return fields, values } func (e *slowQueryRetriever) parseLog(ctx context.Context, sctx sessionctx.Context, log []string, offset offset) (data [][]types.Datum, err error) { start := time.Now() logSize := calculateLogSize(log) defer e.memConsume(-logSize) defer func() { if r := recover(); r != nil { err = util.GetRecoverError(r) buf := make([]byte, 4096) stackSize := runtime.Stack(buf, false) buf = buf[:stackSize] logutil.BgLogger().Warn("slow query parse slow log panic", zap.Error(err), zap.String("stack", string(buf))) } if e.stats != nil { atomic.AddInt64(&e.stats.parseLog, int64(time.Since(start))) } }() e.memConsume(logSize) failpoint.Inject("errorMockParseSlowLogPanic", func(val failpoint.Value) { if val.(bool) { panic("panic test") } }) var row []types.Datum user := "" tz := sctx.GetSessionVars().Location() startFlag := false for index, line := range log { if isCtxDone(ctx) { return nil, ctx.Err() } fileLine := getLineIndex(offset, index) if !startFlag && strings.HasPrefix(line, variable.SlowLogStartPrefixStr) { row = make([]types.Datum, len(e.outputCols)) user = "" valid := e.setColumnValue(sctx, row, tz, variable.SlowLogTimeStr, line[len(variable.SlowLogStartPrefixStr):], e.checker, fileLine) if valid { startFlag = true } continue } if startFlag { if strings.HasPrefix(line, variable.SlowLogRowPrefixStr) { line = line[len(variable.SlowLogRowPrefixStr):] valid := true if strings.HasPrefix(line, variable.SlowLogPrevStmtPrefix) { valid = e.setColumnValue(sctx, row, tz, variable.SlowLogPrevStmt, line[len(variable.SlowLogPrevStmtPrefix):], e.checker, fileLine) } else if strings.HasPrefix(line, variable.SlowLogUserAndHostStr+variable.SlowLogSpaceMarkStr) { value := line[len(variable.SlowLogUserAndHostStr+variable.SlowLogSpaceMarkStr):] fields := strings.SplitN(value, "@", 2) if len(fields) < 2 { continue } user = parseUserOrHostValue(fields[0]) if e.checker != nil && !e.checker.hasPrivilege(user) { startFlag = false continue } valid = e.setColumnValue(sctx, row, tz, variable.SlowLogUserStr, user, e.checker, fileLine) if !valid { startFlag = false continue } host := parseUserOrHostValue(fields[1]) valid = e.setColumnValue(sctx, row, tz, variable.SlowLogHostStr, host, e.checker, fileLine) } else if strings.HasPrefix(line, variable.SlowLogCopBackoffPrefix) { valid = e.setColumnValue(sctx, row, tz, variable.SlowLogBackoffDetail, line, e.checker, fileLine) } else if strings.HasPrefix(line, variable.SlowLogWarnings) { line = line[len(variable.SlowLogWarnings+variable.SlowLogSpaceMarkStr):] valid = e.setColumnValue(sctx, row, tz, variable.SlowLogWarnings, line, e.checker, fileLine) } else { fields, values := splitByColon(line) for i := 0; i < len(fields); i++ { valid := e.setColumnValue(sctx, row, tz, fields[i], values[i], e.checker, fileLine) if !valid { startFlag = false break } } } if !valid { startFlag = false } } else if strings.HasSuffix(line, variable.SlowLogSQLSuffixStr) { if strings.HasPrefix(line, "use") { // `use DB` statements in the slow log is used to keep it be compatible with MySQL, // since we already get the current DB from the `# DB` field, we can ignore it here, // please see https://github.com/pingcap/tidb/issues/17846 for more details. continue } if e.checker != nil && !e.checker.hasPrivilege(user) { startFlag = false continue } // Get the sql string, and mark the start flag to false. _ = e.setColumnValue(sctx, row, tz, variable.SlowLogQuerySQLStr, string(hack.Slice(line)), e.checker, fileLine) e.setDefaultValue(row) e.memConsume(types.EstimatedMemUsage(row, 1)) data = append(data, row) startFlag = false } else { startFlag = false } } } return data, nil } func (e *slowQueryRetriever) setColumnValue(sctx sessionctx.Context, row []types.Datum, tz *time.Location, field, value string, checker *slowLogChecker, lineNum int) bool { factory := e.columnValueFactoryMap[field] if factory == nil { // Fix issue 34320, when slow log time is not in the output columns, the time filter condition is mistakenly discard. if field == variable.SlowLogTimeStr && checker != nil { t, err := ParseTime(value) if err != nil { err = fmt.Errorf("Parse slow log at line %v, failed field is %v, failed value is %v, error is %v", lineNum, field, value, err) sctx.GetSessionVars().StmtCtx.AppendWarning(err) return false } timeValue := types.NewTime(types.FromGoTime(t), mysql.TypeTimestamp, types.MaxFsp) return checker.isTimeValid(timeValue) } return true } valid, err := factory(row, value, tz, checker) if err != nil { err = fmt.Errorf("Parse slow log at line %v, failed field is %v, failed value is %v, error is %v", lineNum, field, value, err) sctx.GetSessionVars().StmtCtx.AppendWarning(err) return true } return valid } func (e *slowQueryRetriever) setDefaultValue(row []types.Datum) { for i := range row { if !row[i].IsNull() { continue } row[i] = table.GetZeroValue(e.outputCols[i]) } } type slowQueryColumnValueFactory func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (valid bool, err error) func parseUserOrHostValue(value string) string { // the new User&Host format: root[root] @ localhost [127.0.0.1] tmp := strings.Split(value, "[") return strings.TrimSpace(tmp[0]) } func getColumnValueFactoryByName(colName string, columnIdx int) (slowQueryColumnValueFactory, error) { switch colName { case variable.SlowLogTimeStr: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (bool, error) { t, err := ParseTime(value) if err != nil { return false, err } timeValue := types.NewTime(types.FromGoTime(t.In(tz)), mysql.TypeTimestamp, types.MaxFsp) if checker != nil { valid := checker.isTimeValid(timeValue) if !valid { return valid, nil } } row[columnIdx] = types.NewTimeDatum(timeValue) return true, nil }, nil case variable.SlowLogBackoffDetail: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (bool, error) { backoffDetail := row[columnIdx].GetString() if len(backoffDetail) > 0 { backoffDetail += " " } backoffDetail += value row[columnIdx] = types.NewStringDatum(backoffDetail) return true, nil }, nil case variable.SlowLogPlan: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (bool, error) { plan := parsePlan(value) row[columnIdx] = types.NewStringDatum(plan) return true, nil }, nil case variable.SlowLogBinaryPlan: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (bool, error) { if strings.HasPrefix(value, variable.SlowLogBinaryPlanPrefix) { value = value[len(variable.SlowLogBinaryPlanPrefix) : len(value)-len(variable.SlowLogPlanSuffix)] } row[columnIdx] = types.NewStringDatum(value) return true, nil }, nil case variable.SlowLogConnIDStr, variable.SlowLogExecRetryCount, variable.SlowLogPreprocSubQueriesStr, execdetails.WriteKeysStr, execdetails.WriteSizeStr, execdetails.PrewriteRegionStr, execdetails.TxnRetryStr, execdetails.RequestCountStr, execdetails.TotalKeysStr, execdetails.ProcessKeysStr, execdetails.RocksdbDeleteSkippedCountStr, execdetails.RocksdbKeySkippedCountStr, execdetails.RocksdbBlockCacheHitCountStr, execdetails.RocksdbBlockReadCountStr, variable.SlowLogTxnStartTSStr, execdetails.RocksdbBlockReadByteStr: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (valid bool, err error) { v, err := strconv.ParseUint(value, 10, 64) if err != nil { return false, err } row[columnIdx] = types.NewUintDatum(v) return true, nil }, nil case variable.SlowLogExecRetryTime, variable.SlowLogQueryTimeStr, variable.SlowLogParseTimeStr, variable.SlowLogCompileTimeStr, variable.SlowLogRewriteTimeStr, variable.SlowLogPreProcSubQueryTimeStr, variable.SlowLogOptimizeTimeStr, variable.SlowLogWaitTSTimeStr, execdetails.PreWriteTimeStr, execdetails.WaitPrewriteBinlogTimeStr, execdetails.CommitTimeStr, execdetails.GetCommitTSTimeStr, execdetails.CommitBackoffTimeStr, execdetails.ResolveLockTimeStr, execdetails.LocalLatchWaitTimeStr, execdetails.CopTimeStr, execdetails.ProcessTimeStr, execdetails.WaitTimeStr, execdetails.BackoffTimeStr, execdetails.LockKeysTimeStr, variable.SlowLogCopProcAvg, variable.SlowLogCopProcP90, variable.SlowLogCopProcMax, variable.SlowLogCopWaitAvg, variable.SlowLogCopWaitP90, variable.SlowLogCopWaitMax, variable.SlowLogKVTotal, variable.SlowLogPDTotal, variable.SlowLogBackoffTotal, variable.SlowLogWriteSQLRespTotal, variable.SlowLogRRU, variable.SlowLogWRU, variable.SlowLogWaitRUDuration: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (valid bool, err error) { v, err := strconv.ParseFloat(value, 64) if err != nil { return false, err } row[columnIdx] = types.NewFloat64Datum(v) return true, nil }, nil case variable.SlowLogUserStr, variable.SlowLogHostStr, execdetails.BackoffTypesStr, variable.SlowLogDBStr, variable.SlowLogIndexNamesStr, variable.SlowLogDigestStr, variable.SlowLogStatsInfoStr, variable.SlowLogCopProcAddr, variable.SlowLogCopWaitAddr, variable.SlowLogPlanDigest, variable.SlowLogPrevStmt, variable.SlowLogQuerySQLStr, variable.SlowLogWarnings, variable.SlowLogSessAliasStr, variable.SlowLogResourceGroup: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (valid bool, err error) { row[columnIdx] = types.NewStringDatum(value) return true, nil }, nil case variable.SlowLogMemMax, variable.SlowLogDiskMax, variable.SlowLogResultRows: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (valid bool, err error) { v, err := strconv.ParseInt(value, 10, 64) if err != nil { return false, err } row[columnIdx] = types.NewIntDatum(v) return true, nil }, nil case variable.SlowLogPrepared, variable.SlowLogSucc, variable.SlowLogPlanFromCache, variable.SlowLogPlanFromBinding, variable.SlowLogIsInternalStr, variable.SlowLogIsExplicitTxn, variable.SlowLogIsWriteCacheTable, variable.SlowLogHasMoreResults: return func(row []types.Datum, value string, tz *time.Location, checker *slowLogChecker) (valid bool, err error) { v, err := strconv.ParseBool(value) if err != nil { return false, err } row[columnIdx] = types.NewDatum(v) return true, nil }, nil } return nil, nil } func getInstanceColumnValueFactory(sctx sessionctx.Context, columnIdx int) (func(row []types.Datum), error) { instanceAddr, err := infoschema.GetInstanceAddr(sctx) if err != nil { return nil, err } return func(row []types.Datum) { row[columnIdx] = types.NewStringDatum(instanceAddr) }, nil } func parsePlan(planString string) string { if len(planString) <= len(variable.SlowLogPlanPrefix)+len(variable.SlowLogPlanSuffix) { return planString } planString = planString[len(variable.SlowLogPlanPrefix) : len(planString)-len(variable.SlowLogPlanSuffix)] decodePlanString, err := plancodec.DecodePlan(planString) if err == nil { planString = decodePlanString } else { logutil.BgLogger().Error("decode plan in slow log failed", zap.String("plan", planString), zap.Error(err)) } return planString } // ParseTime exports for testing. func ParseTime(s string) (time.Time, error) { t, err := time.Parse(logutil.SlowLogTimeFormat, s) if err != nil { // This is for compatibility. t, err = time.Parse(logutil.OldSlowLogTimeFormat, s) if err != nil { err = errors.Errorf("string \"%v\" doesn't has a prefix that matches format \"%v\", err: %v", s, logutil.SlowLogTimeFormat, err) } } return t, err } type logFile struct { file *os.File // The opened file handle start, end time.Time // The start/end time of the log file } // getAllFiles is used to get all slow-log needed to parse, it is exported for test. func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Context, logFilePath string) ([]logFile, error) { totalFileNum := 0 if e.stats != nil { startTime := time.Now() defer func() { e.stats.initialize = time.Since(startTime) e.stats.totalFileNum = totalFileNum }() } if e.extractor == nil || !e.extractor.Enable { totalFileNum = 1 //nolint: gosec file, err := os.Open(logFilePath) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } return []logFile{{file: file}}, nil } var logFiles []logFile logDir := filepath.Dir(logFilePath) ext := filepath.Ext(logFilePath) prefix := logFilePath[:len(logFilePath)-len(ext)] handleErr := func(err error) error { // Ignore the error and append warning for usability. if err != io.EOF { sctx.GetSessionVars().StmtCtx.AppendWarning(err) } return nil } files, err := os.ReadDir(logDir) if err != nil { return nil, err } walkFn := func(path string, info os.DirEntry) error { if info.IsDir() { return nil } // All rotated log files have the same prefix with the original file. if !strings.HasPrefix(path, prefix) { return nil } if isCtxDone(ctx) { return ctx.Err() } totalFileNum++ file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm) if err != nil { return handleErr(err) } skip := false defer func() { if !skip { terror.Log(file.Close()) } }() // Get the file start time. fileStartTime, err := e.getFileStartTime(ctx, file) if err != nil { return handleErr(err) } start := types.NewTime(types.FromGoTime(fileStartTime), mysql.TypeDatetime, types.MaxFsp) notInAllTimeRanges := true for _, tr := range e.checker.timeRanges { if start.Compare(tr.endTime) <= 0 { notInAllTimeRanges = false break } } if notInAllTimeRanges { return nil } // Get the file end time. fileEndTime, err := e.getFileEndTime(ctx, file) if err != nil { return handleErr(err) } end := types.NewTime(types.FromGoTime(fileEndTime), mysql.TypeDatetime, types.MaxFsp) inTimeRanges := false for _, tr := range e.checker.timeRanges { if !(start.Compare(tr.endTime) > 0 || end.Compare(tr.startTime) < 0) { inTimeRanges = true break } } if !inTimeRanges { return nil } _, err = file.Seek(0, io.SeekStart) if err != nil { return handleErr(err) } logFiles = append(logFiles, logFile{ file: file, start: fileStartTime, end: fileEndTime, }) skip = true return nil } for _, file := range files { err := walkFn(filepath.Join(logDir, file.Name()), file) if err != nil { return nil, err } } // Sort by start time slices.SortFunc(logFiles, func(i, j logFile) int { return i.start.Compare(j.start) }) return logFiles, err } func (*slowQueryRetriever) getFileStartTime(ctx context.Context, file *os.File) (time.Time, error) { var t time.Time _, err := file.Seek(0, io.SeekStart) if err != nil { return t, err } reader := bufio.NewReader(file) maxNum := 128 for { lineByte, err := getOneLine(reader) if err != nil { return t, err } line := string(lineByte) if strings.HasPrefix(line, variable.SlowLogStartPrefixStr) { return ParseTime(line[len(variable.SlowLogStartPrefixStr):]) } maxNum-- if maxNum <= 0 { break } if isCtxDone(ctx) { return t, ctx.Err() } } return t, errors.Errorf("malform slow query file %v", file.Name()) } func (e *slowQueryRetriever) getRuntimeStats() execdetails.RuntimeStats { return e.stats } type slowQueryRuntimeStats struct { totalFileNum int readFileNum int readFile time.Duration initialize time.Duration readFileSize int64 parseLog int64 concurrent int } // String implements the RuntimeStats interface. func (s *slowQueryRuntimeStats) String() string { return fmt.Sprintf("initialize: %s, read_file: %s, parse_log: {time:%s, concurrency:%v}, total_file: %v, read_file: %v, read_size: %s", execdetails.FormatDuration(s.initialize), execdetails.FormatDuration(s.readFile), execdetails.FormatDuration(time.Duration(s.parseLog)), s.concurrent, s.totalFileNum, s.readFileNum, memory.FormatBytes(s.readFileSize)) } // Merge implements the RuntimeStats interface. func (s *slowQueryRuntimeStats) Merge(rs execdetails.RuntimeStats) { tmp, ok := rs.(*slowQueryRuntimeStats) if !ok { return } s.totalFileNum += tmp.totalFileNum s.readFileNum += tmp.readFileNum s.readFile += tmp.readFile s.initialize += tmp.initialize s.readFileSize += tmp.readFileSize s.parseLog += tmp.parseLog } // Clone implements the RuntimeStats interface. func (s *slowQueryRuntimeStats) Clone() execdetails.RuntimeStats { newRs := *s return &newRs } // Tp implements the RuntimeStats interface. func (*slowQueryRuntimeStats) Tp() int { return execdetails.TpSlowQueryRuntimeStat } func (*slowQueryRetriever) getFileEndTime(ctx context.Context, file *os.File) (time.Time, error) { var t time.Time var tried int stat, err := file.Stat() if err != nil { return t, err } endCursor := stat.Size() maxLineNum := 128 for { lines, readBytes, err := readLastLines(ctx, file, endCursor) if err != nil { return t, err } // read out the file if readBytes == 0 { break } endCursor -= int64(readBytes) for i := len(lines) - 1; i >= 0; i-- { if strings.HasPrefix(lines[i], variable.SlowLogStartPrefixStr) { return ParseTime(lines[i][len(variable.SlowLogStartPrefixStr):]) } } tried += len(lines) if tried >= maxLineNum { break } if isCtxDone(ctx) { return t, ctx.Err() } } return t, errors.Errorf("invalid slow query file %v", file.Name()) } const maxReadCacheSize = 1024 * 1024 * 64 // Read lines from the end of a file // endCursor initial value should be the filesize func readLastLines(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) { var lines []byte var firstNonNewlinePos int var cursor = endCursor var size int64 = 2048 for { // stop if we are at the beginning // check it in the start to avoid read beyond the size if cursor <= 0 { break } if size < maxReadCacheSize { size = size * 2 } if cursor < size { size = cursor } cursor -= size _, err := file.Seek(cursor, io.SeekStart) if err != nil { return nil, 0, err } chars := make([]byte, size) _, err = file.Read(chars) if err != nil { return nil, 0, err } lines = append(chars, lines...) // nozero // find first '\n' or '\r' for i := 0; i < len(chars); i++ { // reach the line end // the first newline may be in the line end at the first round if i >= len(lines)-1 { break } if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 { firstNonNewlinePos = i + 1 break } } if firstNonNewlinePos > 0 { break } if isCtxDone(ctx) { return nil, 0, ctx.Err() } } finalStr := string(lines[firstNonNewlinePos:]) return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil } func (e *slowQueryRetriever) initializeAsyncParsing(ctx context.Context, sctx sessionctx.Context) { e.taskList = make(chan slowLogTask, 1) e.wg.Add(1) go e.parseDataForSlowLog(ctx, sctx) } func calculateLogSize(log []string) int64 { size := 0 for _, line := range log { size += len(line) } return int64(size) } func calculateDatumsSize(rows [][]types.Datum) int64 { size := int64(0) for _, row := range rows { size += types.EstimatedMemUsage(row, 1) } return size } func (e *slowQueryRetriever) memConsume(bytes int64) { if e.memTracker != nil { e.memTracker.Consume(bytes) } }
pkg/executor/slow_query.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.9695265293121338, 0.022681433707475662, 0.00016876070003490895, 0.0003076072607655078, 0.1296892762184143 ]
{ "id": 7, "code_window": [ "\t}\n", "}\n", "\n", "func BenchmarkErrCtx(b *testing.B) {\n", "\tsc := stmtctx.NewStmtCtx()\n", "\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "func TestErrCtx(t *testing.T) {\n", "\tsc := stmtctx.NewStmtCtx()\n", "\t// the default errCtx\n", "\terr := types.ErrTruncated\n", "\trequire.Error(t, sc.HandleError(err))\n", "\n", "\t// reset the types flags will re-initialize the error flag\n", "\tsc.SetTypeFlags(types.DefaultStmtFlags | types.FlagTruncateAsWarning)\n", "\trequire.NoError(t, sc.HandleError(err))\n", "}\n", "\n" ], "file_path": "pkg/sessionctx/stmtctx/stmtctx_test.go", "type": "add", "edit_start_line_idx": 410 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package watcher import ( "os" "path/filepath" "sync" "testing" "time" "github.com/stretchr/testify/require" ) func TestWatcher(t *testing.T) { var ( oldFilePath, newFilePath string oldFileName = "mysql-bin.000001" newFileName = "mysql-bin.000002" wg sync.WaitGroup ) // create dir dir := t.TempDir() // join the path oldFilePath = filepath.Join(dir, oldFileName) newFilePath = filepath.Join(dir, newFileName) // create watcher w := NewWatcher() // watch directory err := w.Add(dir) require.NoError(t, err) // start watcher err = w.Start(10 * time.Millisecond) require.NoError(t, err) defer w.Close() // create file f, err := os.Create(oldFilePath) require.NoError(t, err) f.Close() // watch for create wg.Add(1) go func() { defer wg.Done() assertEvent(w, oldFilePath, Create, t) }() wg.Wait() // watch for write wg.Add(1) go func() { defer wg.Done() assertEvent(w, oldFilePath, Modify, t) }() f, err = os.OpenFile(oldFilePath, os.O_WRONLY, 0766) require.NoError(t, err) f.Write([]byte("meaningless content")) f.Close() wg.Wait() // watch for chmod wg.Add(1) go func() { defer wg.Done() assertEvent(w, oldFilePath, Chmod, t) }() err = os.Chmod(oldFilePath, 0777) require.NoError(t, err) wg.Wait() // watch for rename wg.Add(1) go func() { defer wg.Done() assertEvent(w, oldFilePath, Rename, t) }() err = os.Rename(oldFilePath, newFilePath) require.NoError(t, err) wg.Wait() // watch for remove wg.Add(1) go func() { defer wg.Done() assertEvent(w, newFilePath, Remove, t) }() err = os.Remove(newFilePath) require.NoError(t, err) wg.Wait() // watch for create again wg.Add(1) go func() { defer wg.Done() assertEvent(w, oldFilePath, Create, t) }() // create file again f, err = os.Create(oldFilePath) require.NoError(t, err) f.Close() wg.Wait() // create another dir dir2 := t.TempDir() oldFilePath2 := filepath.Join(dir2, oldFileName) // add another directory for watching err = w.Add(dir2) require.NoError(t, err) // watch for move (rename to another directory) wg.Add(1) go func() { defer wg.Done() assertEvent(w, oldFilePath, Move, t) }() err = os.Rename(oldFilePath, oldFilePath2) require.NoError(t, err) wg.Wait() } func assertEvent(w *Watcher, path string, op Op, t *testing.T) { for { select { case ev := <-w.Events: if ev.IsDirEvent() { continue // skip event for directory } require.True(t, ev.HasOps(op)) require.Equal(t, path, ev.Path) return case err2 := <-w.Errors: t.Fatal(err2) return } } }
pkg/util/watcher/watcher_test.go
0
https://github.com/pingcap/tidb/commit/e61ee664f5e6a57facbdc760be3ba55327dfd2fd
[ 0.001491919974796474, 0.0002799346693791449, 0.00016826756473165005, 0.00018637000175658613, 0.0003072753606829792 ]