hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 0,
"code_window": [
"\t})\n",
"\n",
"\t// Create a dummy server stream to pass to ConnectInboundStream.\n",
"\tserverStream, _ /* clientStream */, cleanup, err := createDummyStream()\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\tdefer cleanup()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tserverStream, _ /* clientStream */, cleanup := createDummyStream(t)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 249
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9977005124092102,
0.16598866879940033,
0.0001650733029237017,
0.001823894795961678,
0.3439469635486603
] |
{
"id": 0,
"code_window": [
"\t})\n",
"\n",
"\t// Create a dummy server stream to pass to ConnectInboundStream.\n",
"\tserverStream, _ /* clientStream */, cleanup, err := createDummyStream()\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\tdefer cleanup()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tserverStream, _ /* clientStream */, cleanup := createDummyStream(t)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 249
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { connect } from "react-redux";
import { RouteComponentProps, withRouter } from "react-router-dom";
import { AppState } from "src/store";
import { actions as localStorageActions } from "src/store/localStorage";
import {
TransactionInsightsViewDispatchProps,
TransactionInsightsViewStateProps,
} from "./transactionInsights";
import {
StatementInsightsViewDispatchProps,
StatementInsightsViewStateProps,
} from "./statementInsights";
import { WorkloadInsightEventFilters } from "../types";
import {
WorkloadInsightsViewProps,
WorkloadInsightsRootControl,
} from "./workloadInsightRootControl";
import { SortSetting } from "src/sortedtable";
import {
actions as statementInsights,
selectColumns,
selectStatementInsights,
selectStatementInsightsError,
} from "src/store/insights/statementInsights";
import {
actions as transactionInsights,
selectTransactionInsights,
selectTransactionInsightsError,
selectFilters,
selectSortSetting,
} from "src/store/insights/transactionInsights";
import { Dispatch } from "redux";
import { TimeScale } from "../../timeScaleDropdown";
import { actions as sqlStatsActions } from "../../store/sqlStats";
const transactionMapStateToProps = (
state: AppState,
_props: RouteComponentProps,
): TransactionInsightsViewStateProps => ({
transactions: selectTransactionInsights(state),
transactionsError: selectTransactionInsightsError(state),
filters: selectFilters(state),
sortSetting: selectSortSetting(state),
});
const statementMapStateToProps = (
state: AppState,
_props: RouteComponentProps,
): StatementInsightsViewStateProps => ({
statements: selectStatementInsights(state),
statementsError: selectStatementInsightsError(state),
filters: selectFilters(state),
sortSetting: selectSortSetting(state),
selectedColumnNames: selectColumns(state),
});
const TransactionDispatchProps = (
dispatch: Dispatch,
): TransactionInsightsViewDispatchProps => ({
onFiltersChange: (filters: WorkloadInsightEventFilters) =>
dispatch(
localStorageActions.update({
key: "filters/InsightsPage",
value: filters,
}),
),
onSortChange: (ss: SortSetting) =>
dispatch(
localStorageActions.update({
key: "sortSetting/InsightsPage",
value: ss,
}),
),
setTimeScale: (ts: TimeScale) => {
dispatch(
sqlStatsActions.updateTimeScale({
ts: ts,
}),
);
},
refreshTransactionInsights: () => {
dispatch(transactionInsights.refresh());
},
});
const StatementDispatchProps = (
dispatch: Dispatch,
): StatementInsightsViewDispatchProps => ({
onFiltersChange: (filters: WorkloadInsightEventFilters) =>
dispatch(
localStorageActions.update({
key: "filters/InsightsPage",
value: filters,
}),
),
onSortChange: (ss: SortSetting) =>
dispatch(
localStorageActions.update({
key: "sortSetting/InsightsPage",
value: ss,
}),
),
// We use `null` when the value was never set and it will show all columns.
// If the user modifies the selection and no columns are selected,
// the function will save the value as a blank space, otherwise
// it gets saved as `null`.
onColumnsChange: (value: string[]) =>
dispatch(
localStorageActions.update({
key: "showColumns/StatementInsightsPage",
value: value.length === 0 ? " " : value.join(","),
}),
),
setTimeScale: (ts: TimeScale) => {
dispatch(
sqlStatsActions.updateTimeScale({
ts: ts,
}),
);
},
refreshStatementInsights: () => {
dispatch(statementInsights.refresh());
},
});
type StateProps = {
transactionInsightsViewStateProps: TransactionInsightsViewStateProps;
statementInsightsViewStateProps: StatementInsightsViewStateProps;
};
type DispatchProps = {
transactionInsightsViewDispatchProps: TransactionInsightsViewDispatchProps;
statementInsightsViewDispatchProps: StatementInsightsViewDispatchProps;
};
export const WorkloadInsightsPageConnected = withRouter(
connect<
StateProps,
DispatchProps,
RouteComponentProps,
WorkloadInsightsViewProps
>(
(state: AppState, props: RouteComponentProps) => ({
transactionInsightsViewStateProps: transactionMapStateToProps(
state,
props,
),
statementInsightsViewStateProps: statementMapStateToProps(state, props),
}),
dispatch => ({
transactionInsightsViewDispatchProps: TransactionDispatchProps(dispatch),
statementInsightsViewDispatchProps: StatementDispatchProps(dispatch),
}),
(stateProps, dispatchProps) => ({
transactionInsightsViewProps: {
...stateProps.transactionInsightsViewStateProps,
...dispatchProps.transactionInsightsViewDispatchProps,
},
statementInsightsViewProps: {
...stateProps.statementInsightsViewStateProps,
...dispatchProps.statementInsightsViewDispatchProps,
},
}),
)(WorkloadInsightsRootControl),
);
| pkg/ui/workspaces/cluster-ui/src/insights/workloadInsights/workloadInsightsPageConnected.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0001792924158507958,
0.00017114308138843626,
0.00016473885625600815,
0.00017018917424138635,
0.0000040236409404315054
] |
{
"id": 0,
"code_window": [
"\t})\n",
"\n",
"\t// Create a dummy server stream to pass to ConnectInboundStream.\n",
"\tserverStream, _ /* clientStream */, cleanup, err := createDummyStream()\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\tdefer cleanup()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tserverStream, _ /* clientStream */, cleanup := createDummyStream(t)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 249
} | #! /usr/bin/env expect -f
source [file join [file dirname $argv0] common.tcl]
start_server $argv
spawn $argv sql
eexpect "defaultdb>"
start_test "Test that a multi-line entry can be recalled escaped."
send "select 'foo\r"
eexpect " ->"
send "bar';\r"
eexpect "1 row"
eexpect "defaultdb>"
# Send up-arrow.
send "\033\[A"
eexpect "select 'foo"
eexpect " -> bar';"
send "\r"
eexpect "1 row"
eexpect "defaultdb>"
start_test "Test that Ctrl+C after the first line merely cancels the statement and presents the prompt."
send "\r"
eexpect defaultdb>
send "select\r"
eexpect " ->"
interrupt
eexpect "defaultdb>"
end_test
start_test "Test that a dangling table creation can be committed, and that other non-DDL, non-DML statements can be issued in the same txn. (#15283)"
send "create database if not exists t;"
send "drop table if exists t.blih;"
send "create table if not exists t.kv(k int primary key, v int);\r"
eexpect "CREATE TABLE"
eexpect "defaultdb>"
send "begin; create table t.blih(x INT REFERENCES t.kv(k));\r\r"
eexpect "CREATE TABLE"
eexpect "defaultdb"
eexpect OPEN
send "show all cluster settings;\r"
eexpect "rows"
eexpect "defaultdb"
eexpect OPEN
send "commit;\r"
eexpect COMMIT
eexpect "defaultdb>"
end_test
send "quit\r"
eexpect eof
# Pending the bubbletea changes here: https://github.com/charmbracelet/bubbletea/issues/404
# # we force TERM to xterm, otherwise we can't
# # test bracketed paste below.
# set env(TERM) xterm
#
# spawn $argv sql
# eexpect "defaultdb>"
#
# start_test "Test that a multi-line bracketed paste is handled properly."
# send "\033\[200~"
# send "\\set display_format csv\r\n"
# send "values (1,'a'), (2,'b'), (3,'c');\r\n"
# send "\033\[201~\r\n"
# eexpect "1,a"
# eexpect "2,b"
# eexpect "3,c"
# eexpect "defaultdb>"
# end_test
#
# send_eof
# eexpect eof
stop_server $argv
| pkg/cli/interactive_tests/test_multiline_statements.tcl | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017452103202231228,
0.00017028156435117126,
0.0001653463114053011,
0.0001702755398582667,
0.0000023064635570335668
] |
{
"id": 0,
"code_window": [
"\t})\n",
"\n",
"\t// Create a dummy server stream to pass to ConnectInboundStream.\n",
"\tserverStream, _ /* clientStream */, cleanup, err := createDummyStream()\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\tdefer cleanup()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tserverStream, _ /* clientStream */, cleanup := createDummyStream(t)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 249
} | - Feature Name: Insights into Constraint Conformance
- Status: completed
- Start Date: 2019-06-19
- Authors: Andrei Matei
- RFC PR: [#38309](https://github.com/cockroachdb/cockroach/issues/14113)
- Cockroach Issue: [#14113](https://github.com/cockroachdb/cockroach/issues/14113)
[#19644](https://github.com/cockroachdb/cockroach/issues/19644)
[#31597](https://github.com/cockroachdb/cockroach/issues/31597)
[#26757](https://github.com/cockroachdb/cockroach/issues/26757)
[#31551](https://github.com/cockroachdb/cockroach/issues/31551)
# Summary
The set of features described here aim to provide admins with visibility into
aspects of replication and replica placement. In particular admins will be able
to access a report that details which replication constraints are violated.
# Motivation
As of this writing, CRDB administrators have poor insight into data placement in
relation to the constraints they have set and in relation with other
considerations - specifically replication factor and replica diversity. This can
lead to real problems: it’s unclear when a whole locality can be turned down
without losing quorum for any ranges.
As a result of this work, admins will also be able to script actions to be taken
when partitioning, or another zone config change, has been “fully applied”.
# Guide-level explanation
## Background
Reminder for a few CRDB concepts:
**Replication zone configs and constraints:** key spans can be grouped into
*“replication zones” and properties can be set for each zone through a “zone
*config”. Among others, a zone config specifies the replication factor for the
*data in the zone (i.e. the replication factor of all the ranges in the zone), a
*list of constraints, and a list of lease preference policies.
**Replication Constraints:** used to restrict the set of nodes/stores that can
*hold a replica for the respective ranges. Constraints can say that only nodes
*marked with certain localities or attributes are to be used or that certain
*nodes/stores have to be avoided. Constraints can also be specified at the level
*of a replica, not only a range; for example, constraints can be set to require
*that some ranges have one replica in locality A, one in B and one in C.
**Lease preferences:** Lease preferences are expressed just like constraints, except
they only affect the choice of a leaseholder. For example, one can say that the
leaseholder needs to always be in a specific locality. Unlike constraints, not
being able to satisfy a lease preference doesn’t stop a lease acquisition.
**Replica diversity:** Given a set of constraints (or no constraints), CRDB
tries to split replicas evenly between localities/sublocalities that match the
constraint. The implementation computes a "diversity score" between a pair of
replicas as the inverse of the length of the common prefix of localities of the
two respective stores. It then computes a range's diversity score as the
average diversity score across all replica pairs. The allocator considers a
range under-diversified if there is another store that could replace an
existing replica and result in a higher range diversity score.
## Constraint conformance
Currently the only insight an administrator has into the health of replication
is via the Admin UI which displays a counter for under-replicated ranges and one
for unavailable ranges, and timeseries for over-replicated/under-replicated/unavailable.
There’s nothing about constraints, lease preferences or diversity. Also there’s
no information on over-replicated ranges - although hopefully those will go away
once we get atomic group membership changes. Also, the way in which these
counters are computed is defective: the counters are incoherent and, for
example, if all the nodes holding a partition go away, the ranges in that
partition are not counted (see
https://github.com/cockroachdb/cockroach/issues/19644).
Besides the general need for an administrator to know that everything is
copacetic, there are some specific cases where the lack of information is
particularly unfortunate:
1. Testing fault tolerance: A scenario people ran into was testing CRDB’s
resiliency to networking failures. A cluster is configured with 3 Availability
Zones (AZs) and the data is constrained to require a replica in every AZ (or
even without constraints, the implicit diversity requirement would drive towards
the same effect). One would think that an AZ can go away at any time and the
cluster should survive. But that’s only colloquially true. After an AZ goes away
and subsequently recovers, another AZ cannot disappear right away without
availability loss; the 2nd AZ has to stay up until constraint conformance is
re-established. Otherwise, the 2nd AZ likely has 2 replicas for the ranges that
were migrated away during the first outage.
As things stand, it’s entirely unclear how one is supposed to know when it’s
safe to bring down the 2nd AZ. (Of course, here we’re talking about disaster
testing, not the use of node decomissioning.)
1. Partitioning a table and running performance tests that assume the partioning
is in place. For example, say I want to partition my data between Romania and
Bulgaria and test that my Bulgarian clients indeed start getting local
latencies. Well, partitioning is obviously not instantaneous and so it’s
entirely unclear how long I’m supposed to wait until I can conduct my test.
Same with lease preferences.
## Proposal
We’ll be introducing three ways for admins to observe different aspects of
replication state: a cluster-wide constraint conformance report visible in the
AdminUI, new jobs for tracking the progress of constraint changes, and two
crdb_internal virtual tables that can be queried for similar information to the
report - one table would list zone-level constraint violations, another would
list the violations on a more granular per-object basis.
For providing some insights into the diversity of ranges in different zone
configs, there's also going to be a crdb_internal virtual table that lists the
localities whose unavailability would result in range unavailability (e.g. "if
locality US goes away, half of the ranges in the default zone would lose
quorum").
### crdb_internal.replication_report(cached bool (default true))
We'll introduce the `crdb_internal.replication_report()` function, which
returns a report about constraint violations and critical localities as a
(single) JSON record.
```
{
// The time when this report was generated.
generated_at timestamp,
// All the cluster's zones are listed.
zones [
{
// zone_name and config_sql correspond to what `show zone configuration`
// returns.
zone_name string,
config_sql string,
// Each constraint that is violated is present here.
violations [
{
// enum: under replication, over replication, replica placement,
// lease preference
violation_type string,
// spec is the part of the zone config that is violated.
constraint string,
// A human readable message describing the violation.
// For example, for a violation_type="under replication", the message
// might be "64MB/1TB of data only (1/20000 ranges) only have 2 replica(s).
// 128MB/1TB of data only (2/20000 ranges) only have 1 replica(s)."
message string,
// the number of ranges for which this locality is critical.
ranges int,
// the sum of the size of all critical ranges (in MB/s).
data_size_mb int,
// the average rate (in ranges/s and MiB/s) by which this quantity
// changed since some previous time the report was generated. A negative rate
// means things are improving (i.e. amount of data in violation is going down).
// NULL if no previous data is available to compute the velocity.
change_rate_ranges_per_sec int,
change_rate_mb_per_sec int,
}
]
// The localities that are critical for the current zone.
critical_localities [
{
locality string,
// the number of ranges for which this locality is critical.
ranges int,
// the sum of the size of all critical ranges (in MB/s).
data_size_mb int,
change_rate_ranges_per_sec int,
change_rate_mb_per_sec int,
}
]
}
]
}
```
The report is generated periodically (every minute) by a process described
below and stored in the database. Through the optional function argument, one
can ask for the cached report to be returned, or for a fresh report to be
generated.
Notes about violations:
1. All zones are present in the `zones` array, but only violated constrains are
present in the `violations` array.
2. Per-replica replication constraints (i.e. the constraints of the form `2
replicas: region:us, 1 replica: region:eu`) are split into their
constituents for the purposes of this report. In this example, the report
will list violations separately for the 2 us replicas vs the 1 eu replica.
Also, the per-replica constraints are considered "minimums", not exact
numbers. I.e. if a range has 2 replicas in eu, it is not in violation of the
eu constraint. Of course, it might be in violation of other constraints.
3. Per-replica constraint conformance doesn't care about dead nodes. For
example, if a constraint specifies that 2 replicas must be placed in the US
and, according to a range descriptor, the range has two replicas in the US
but one of the nodes is unresponsive/dead/decomissioned, the constraint will
be considered satisfied. Dead nodes will promptly trigger under-replication
violations, though (see next bullet).
The idea here is that, if a node dies, we'd rather not instantly generate a
ton of violations for the purpose of this report.
4. The notion of a node being alive or dead will be more nuanced in this
reporting code than it is in the allocator. The allocator only concerns
itself with nodes that have been dead for 5 minutes (`cluster setting
server.time_until_store_dead`) - that’s when it starts moving replicas away.
Nodes that died more recently are as good as live ones. But not so for this
report; we can’t claim that everything is fine for 5 minutes after a node’s
death. For the purposes of reporting under-replication, constraint
conformance (and implicitly lease preference conformance), a node will be
considered dead a few seconds after it failed to ping its liveness record.
Replicas on dead nodes are discarded; expired leases by themselves don't
trigger any violation (as specified elsewhere).
The message describing the violation due to dead nodes will have information
about what ranges are being rebalanced by the allocator and what ranges are
not yet receiving that treatment.
5. Violations of inherited constraints are reported only at the level of the
zone hierarchy where the constraint is introduced; they are not reported at
the lower levels.
6. The `message` field will generally contain the info from the `ranges` and
`data_size_mb` fields, but in an unstructured way. In the example message in
the JSON ("64MB/1TB of data only (1/20000 ranges) only have 2 replica(s).
128MB/1TB of data only (2/20000 ranges) only have 1 replica(s)."), the
message has more detailed information than the numerical fields; the
numberical fields count all ranges in violation of the respective constraint
without any further bucketing.
7. Since this report is possibly stale, the zone information presented can
differ from what `show all zone configurations` returns - it might not
include newly-created zones or newly applied configurations. That's a
feature as it allows the consumer to reason about whether a recent change to
zone configs had been picked up or not.
#### Critical localities
The `critical_localities` field exposes information about what localities would
cause data unavailability if the respective localities were to become
unavailable - a measure related to replica diversity.
For every zone, we'll count how many ranges would lose quorum by the dissapearance
of any single locality. For the purposes of this report, we consider both more
narrow and more wide localities. E.g. if a node has
`--locality=country=us,region=east`, then we report for both localities `c=us`
and `c=us,r=east`. For the purposes of this report, we also consider a node id
as the last component of the node's locality. So, if, say, node 5 has
`--locality=country=us,region=east`, then for its ranges we actually consider
all of `c=us`, `c=us/r=east` and `c=us,r=east,n=5` as localities to report for.
If a locality is not critical for a range, it does not appear under that zone.
If a locality is critical for some data, that data also counts for the
criticality of any wider locality (e.g. if `us/east` is critical for 1MB of
data, `us` will be critical for (at least) that 1MB).
This report strictly covers critical localities; it does not answer all
questions one might have about the data's risk. For one, there's no way to
answer questions about the risk stemming from combinations of localities (e.g.
if I'd like my data to survive the loss of one region plus a node in another
region, or of (any) two regions, this report does not help.
This report is not directly related to the notion of diversity considered by
the allocator - which tries to maximize diversity. We don't report on whether
the allocator is expected to take any action.
### Data collection
A background process will compute the report once per minute or so (see the
[Detailed design section](#detailed-design)). The "velocity information" in the
report is generated by looking at the raw numbers in the previous version of
the report and computing the delta.
### AdminUI conformance report
We’ll add a page in the AdminUI showing the contents of the report. I'm
imagining that in the future we can allow one to drill down into the different
zones by expanding a zone into the databases/tables/indexes/partitions
contained in the zone. This is future work though.
The already existing range counters on the Cluster Overview page -
under-replicated/over-replicated/unavailable ranges - will change to be backed
by this implementation. This will be beneficial; the current implementation,
based on different nodes reporting their counts at different time, is
inconsistent and blind to failures of individual nodes to report. We'll also
show an asterisk linking to the conformance report if there are any constraint
violations.
The timeseries for the under-replicated/over-replicated/unavailable ranges will
also be backed by the process generating this report. The range 1 leaseholder
node is the one computing these values and so it will be one writing them to
the timeseries, but we need to do it in such a way as to not double count when
the lease changes hands. I'm not sure how to assure that exactly. Maybe the
node writing the counts will itself write 0 for all the other nodes.
## New jobs
We’re also going to have some data-moving operations roughly represented as
jobs: repartitioning and altering zone config properties. The idea here is to be
able to track the progress of an operation that was explicitly triggered through
a SQL command as opposed to observing the state of the cluster as a whole.
For example, when a table is repartitioned, we’ll look at how much data of that
table is now placed incorrectly (i.e. it violates a constraint) and we’ll
consider the job complete once all the table’s data is conformant with all the
constraints. Note that the initial partitioning of a table does not create any
data movement (as all the partitions inherit the same zone config) and so we
won’t create a job for it.
Altering a zone config (for a table, partition or index) will similarly cause a
job to be created. The job will be considered completed once all the ranges
corresponding to the table/index/partition are in conformance with all the
constraints. That is, the first time when all ranges in the affected zone are
found to be in compliance, we’ll declare success.
Note that the association of range movement with a job, and thus the computation
of the job’s progress, is fairly loose. The exact replication effects being
enacted by a particular partition or zone change will be immaterial to the
progress status of the respective job; the only thing will be considered for
computing the progress is the conformance of the ranges affected by the change
with all the constraints (not just the constraints being modified) - including
pre-existing constraints for the respective zone and constraints inherited from
parent zones. Doing something more exact seems hard. However this can lead to
funny effects like a progress going backwards if the conformance worsens (i.e.
number of con-conformant ranges increases for whatever reason) while a job is
“running”.
The updating of the progress report of these jobs will be done through the
periodic background process that also updates the cluster conformance report.
The completion %age is based on how many ranges were found to be non-conformant
with the change the job is tracking when the job is created.
The jobs are not cancelable by users. However a job is considered completed if
the particular partitioning or zone config alter it's tracking is superseded by
another operation (i.e. if the partitioning is changed again in case of
partitioning jobs or if the same zone config is changed again). Removing a
table partitioning or a zone config similarly marks all ongoing jobs tracking
the respective object as complete and creates a new job (tracking the table or
the parent zone config, respectively).
## Detailed design
The data powering the virtual tables and the jobs progress will be produced by
a process running on the leaseholder for range 1. Every minute, this process
will scan all of the meta2 range descriptors together with all the zone configs
(using a consistent scan slightly in the past), as well as collect lease
information (see below). For each descriptor, it’ll use logic factored out of
the allocator for deciding what constraints the range is violating (if any).
For each constraint, it’ll aggregate the (sum of sizes of) ranges violating
that constraint. Same for lease preference and replication factor. Ranges with
no active lease are considered to be conformant with any lease preference.
There will also be a way to generate the data on demand.
Critical localities are determined as follows: for each range we consider every
locality that contains a replica (so, every prefix of the --locality for all
the stores involved plus the node id). If that locality contains half or more
of the replicas, it is considered critical. A count of critical ranges /
locality is maintained.
Since we're scanning meta2, the respective node has an opportunity to update
its range descriptor cache. Which suggests that perhaps all nodes should do
this.
The resulting report will be saved in storage under a regular (versioned) key.
We'll store it in proto form (as opposed to the JSON in which it is returned by
the SQL function). The velocity information is not stored; it is computed on
demand by reading past versions of the report, finding the one more than a
minute away (so, not necessarily the most recent if that one is very recent)
and computing the delta.
### Collecting lease and size information
The current leaseholder is not reflected in the range descriptor (i.e. in
meta2). So, in order to report on leaseholder preference conformance, the node
generating the report needs to collect this information in another way.
Similarly for range size info.
We'll add a new RPC - `Internal.RangesInfo` - asking a node to return all the leases for its replicas
(including info on ranges with no lease). The node generating the report will
ask all other nodes for information on the leases and sizes of all its replicas.
The report generator will join that information with meta2.
This will be a streaming RPC returning information in range key order, so that
the aggregator node doesn't have to hold too much range information in memory
at a time - instead the aggregator can do paginated (i.e. limit) scans over the
meta2 index and stream info from a merger of all the `RangeInfo` responses.
Since lease information is gathered distinctly from range information, the two
might not be consistent: ranges could have appeared and dissapeared in between.
To keep it simple, we'll consider the meta2 the source of truth. Among the
information we get for a range, the most recent lease is considered. For ranges
for which the info we get from all the replicas disagrees with meta2, we'll
consider the range to be without a lease and have size 0.
The implementation of the view `crdb_internal.ranges` will also change to take
advantage of this new RPC. Currently, it's a view on top of the virtual table
`crdb_internal.ranges_no_leases` executing an
`crdb_internal.lease_holder(start_key)` (i.e. a `LeaseInfo` request) for every
range. That's too expensive. Once we moved to the new mechanism, we can also
deprecate `crdb_internal.ranges_no_leases`.
As an incidental benefit of collecting lease information this way, the node
collecting all this information can update its leaseholder cache. Which
suggests that perhaps all nodes should exchange this info with each other.
When generating the report, RPCs to all the nodes will be issued in parallel.
Timeouts will be used. The fact that info on each range is reported by all its
replicas makes us able to tolerate some RPC failures.
Service definition:
```
service Internal {
rpc RangesInfo(empty) returns (RangesInfoResponse) {}
}
message RangesInfoResponse {
repeated message ranges {
range_descriptor;
lease;
float size_mb;
}
}
```
### Notes
The notion of a node being alive or dead will be more nuanced in this reporting
code than it is in the allocator. The allocator only concerns itself with nodes
that have been dead for 5 minutes (`cluster setting server.time_until_store_dead`)
- that’s when it starts moving replicas away. Nodes that died more recently are
as good as live ones. But not so for this report; we can’t claim that everything
is fine for 5 minutes after a node’s death. For the purposes of reporting
under-replication, constraint conformance (and implicitly lease preference
conformance), a node will be considered dead a few seconds after it failed to
ping its liveness record. Replicas on dead nodes are discarded; expired leases
by themselves don't trigger any violation (as specified elsewhere).
When sub-zones are involved and constraints are inherited from parent to child,
violations of inherited constraints will be counted towards the parent zone, not
the child zone.
## Rationale and alternatives
For the presentation of the report, a natural alternative is to present it
through system table(s) or virtual table(s). That would me a more usual way of
presenting it to SQL clients than a function returning a JSON record, and it
would probably allow easier filtering. There's also not much precedent in CRDB
for returning JSON from system SQL interfaces. But unfortunately a tabular
interface does not seem to be very convenient for this case: how do you expose
the timestamp when the report is taken? How do you expose the versions of the
zone configs used? Even if you find a way to expose them, how does one query
across them consistently? There are obviously different options available, but
none of them look very clean. So I think the time is ripe to create this JSON
precedent. The computation of the "velocity" fields is also simpler with the
report being one big struct. Otherwise it wasn't entirely clear how one would
store and access the historical information required.
Note that CRDB already has some JSON processing capabilities built-in, so it is
feasible to retrieve only parts of the report (between the
`jsonb_array_elements` and the JSON path operators).
Another way to get this report would be akin to what we do now for counting
unavailable ranges: have each node report the counts for the ranges that it's
resposible for (i.e. it is the leaseholder or, if there's no lease, it's the
smallest node among the replicas). That would save the need to read meta2; each
node can use the in-memory replicas structure. The downside is that dealing with
failures of a node to report is tricky and also, since each node would report at
a different time, the view across sub-reports would be inconsistent possibly
leading to non-sensical counts.
An alternative for collecting the lease information through an RPC is to have
nodes gossip their info. This would have the advantage that all nodes can keep
their cache up to date. But the quantities of data involved might be too
large. Or yet another option is to have the all the nodes periodically write
all their lease info into the database, and have the report generator node read
it from there instead of requesting it through RPCs. This sounds better for
scalability (to higher number of nodes) but also sounds more difficult to
implement. We'll start with the RPCs and keep this for later.
For reporting on diversity, instead of (or perhaps in addition to) the critical
localities proposed above, another idea is to report, on a per-"locality-level"
basis, the amount of data that would lose quorum if different numbers of
instances of that level were to become unavailable. For example, in a two-level
hierarchy of localities (say, country+region) plus an implicit third level
which is the node, we'd report something like: if one country is lost, this
much data is lost. If 2 countries are lost - this much. Similar for one region,
two regions, ... n regions. Similar for up to n nodes.
Comparing with the critical regions proposal as alternatives (although they
don't necessarily need to be either or), the advantages would be:
- you get some information on the criticality of combinations of localities.
Namely, you get info an combinations of values on the same level in the
hierarchy. You don't, however get info on combinations across levels: e.g. you
still can't tell if you can survive the failure of any one dc + any other one
node.
And the disadvantages:
- no locality names are presented. You can see that there is at least one
country that's critical, or perhaps (at least one) a pair of countries, but
you can't tell which one.
- with locality hierarchies, it's unclear if lower level should be counted
across higher levels. For example, if there's region and zone, and in the us
region there are no critical zones, but in the eu region there are some,
should we just say "there are critical zones", or should we say "there are
crtical zones in eu"?
- it's also unclear how exactly to deal with jagged localities - where the keys
of the levels don't match between nodes. And the even more pathological case
where even the same locality key appears on different levels (one node has
`country=us, dc=dc1`, another one has just `dc=dc2`). One proposal is to not
report anything in such cases, but that has downsides - adding a node with no
`--locality` would make the whole report disappear.
## Out of scope
1. More user actions could create the types of rebalancing-related jobs that
we've discussed here: for example adding nodes in a new locality which would
trigger rebalancing for the purposes of diversity. That's left for the future.
2. More granular reporting at the level of a database/table/index/partition
instead of simply at the zone level.
3. Timeseries of rebalancing progress per constraint violation. This RFC
proposes reporting one number per constraint violation - the average rate of
improvement over the past minute, but no way to get historical information
about these rates. Part of the difficulty in actually recording timeseries
is that it's unclear how our timeseries platform would work for a dynamic
set of timeseries (i.e. constraint violations come and go).
## Unresolved questions
1. Should the SQL statements that now create jobs also become synchronous (i.e.
only return to the client once the corresponding job is done)? A la schema
changes. If not, should they return the id of the job they've created?
| docs/RFCS/20190619_constraint_conformance_report.md | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017576820391695946,
0.00016959357890300453,
0.00016434740973636508,
0.00016899607726372778,
0.0000027052587938669603
] |
{
"id": 1,
"code_window": [
"\tdefer cleanup()\n",
"\n",
"\t_, _, _, err = reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy)\n",
"\tif !testutils.IsError(err, \"came too late\") {\n",
"\t\tt.Fatalf(\"expected %q, got: %v\", \"came too late\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t_, _, _, err := reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 255
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.012781479395925999,
0.0028389114886522293,
0.0001661214482737705,
0.0002979482233058661,
0.004266821779310703
] |
{
"id": 1,
"code_window": [
"\tdefer cleanup()\n",
"\n",
"\t_, _, _, err = reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy)\n",
"\tif !testutils.IsError(err, \"came too late\") {\n",
"\t\tt.Fatalf(\"expected %q, got: %v\", \"came too late\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t_, _, _, err := reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 255
} | # Send a basic SELECT with a 0-valued decimal.
send
Parse {"Query": "SELECT $1::decimal"}
Bind {"ParameterFormatCodes": [1], "Parameters": [{"binary":"0000000000000000"}]}
Execute
Sync
----
until
ReadyForQuery
----
{"Type":"ParseComplete"}
{"Type":"BindComplete"}
{"Type":"DataRow","Values":[{"text":"0"}]}
{"Type":"CommandComplete","CommandTag":"SELECT 1"}
{"Type":"ReadyForQuery","TxStatus":"I"}
# Regression for #38139.
# Use a decimal whose Ndigits are 8192 (math.MaxInt16/4+1). That
# value will overflow an int16 to negative when multiplied by 4
# (pgwirebase.PGDecDigits). See https://play.golang.org/p/ULMTLNzhpA0
# for how this string was generated.
send
Bind {"ParameterFormatCodes": [1], "Parameters": [{"binary":"200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}]}
Execute
Sync
----
until
ReadyForQuery
----
{"Type":"BindComplete"}
{"Type":"DataRow","Values":[{"text":"0"}]}
{"Type":"CommandComplete","CommandTag":"SELECT 1"}
{"Type":"ReadyForQuery","TxStatus":"I"}
# [0, 1, 0, 1, 0, 0, 0, 0, 0, 1] = binary 1E+4
# [0, 1, 0, 1, 0, 0, 0, 0, 0, 10] = binary 10E+4
send
Parse {"Name": "s1", "Query": "SELECT 10000::decimal, $1::decimal, $2::decimal"}
Bind {"DestinationPortal": "p1", "PreparedStatement": "s1", "ParameterFormatCodes": [1], "Parameters": [{"binary":"00010001000000000001"}, {"binary":"0001000100000000000a"}]}
Execute {"Portal": "p1"}
Sync
----
# CockroachDB intentionally uses exponents for decimals like 1E+4, as
# oppposed to Postgres, which returns 10000.
until crdb_only
ReadyForQuery
----
{"Type":"ParseComplete"}
{"Type":"BindComplete"}
{"Type":"DataRow","Values":[{"text":"10000"},{"text":"1E+4"},{"text":"1.0E+5"}]}
{"Type":"CommandComplete","CommandTag":"SELECT 1"}
{"Type":"ReadyForQuery","TxStatus":"I"}
until noncrdb_only
ReadyForQuery
----
{"Type":"ParseComplete"}
{"Type":"BindComplete"}
{"Type":"DataRow","Values":[{"text":"10000"},{"text":"10000"},{"text":"100000"}]}
{"Type":"CommandComplete","CommandTag":"SELECT 1"}
{"Type":"ReadyForQuery","TxStatus":"I"}
# ResultFormatCodes [1] = FormatBinary
# [0, 1, 0, 1, 0, 0, 0, 0, 0, 1] = binary 1E+4
# [0, 1, 0, 1, 0, 0, 0, 0, 0, 10] = binary 10E+4
send
Parse {"Name": "s2", "Query": "SELECT 10000::decimal, $1::decimal, $2::decimal"}
Bind {"DestinationPortal": "p2", "PreparedStatement": "s2", "ParameterFormatCodes": [1], "Parameters": [{"binary":"00010001000000000001"}, {"binary":"0001000100000000000a"}], "ResultFormatCodes": [1,1, 1]}
Execute {"Portal": "p2"}
Sync
----
until
ReadyForQuery
----
{"Type":"ParseComplete"}
{"Type":"BindComplete"}
{"Type":"DataRow","Values":[{"binary":"00010001000000000001"},{"binary":"00010001000000000001"},{"binary":"0001000100000000000a"}]}
{"Type":"CommandComplete","CommandTag":"SELECT 1"}
{"Type":"ReadyForQuery","TxStatus":"I"}
| pkg/sql/pgwire/testdata/pgtest/decimal | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.014181056059896946,
0.0017291407566517591,
0.00016822120232973248,
0.0001727501949062571,
0.0044024172239005566
] |
{
"id": 1,
"code_window": [
"\tdefer cleanup()\n",
"\n",
"\t_, _, _, err = reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy)\n",
"\tif !testutils.IsError(err, \"came too late\") {\n",
"\t\tt.Fatalf(\"expected %q, got: %v\", \"came too late\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t_, _, _, err := reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 255
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package fuzzystrmatch
import (
"strings"
"unicode"
)
// The soundex code consists of four characters.
const soundexLen = 4
// ABCDEFGHIJKLMNOPQRSTUVWXYZ
const soundexTable = "01230120022455012623010202"
func isAlpha(r rune) bool {
return (r >= 'a' && r <= 'z') ||
(r >= 'A' && r <= 'Z')
}
func soundexCode(r rune) byte {
letter := byte(unicode.ToUpper(r))
if letter >= 'A' && letter <= 'Z' {
return soundexTable[int(letter-'A')]
}
return 0x0
}
func soundex(source string) string {
// Skip leading non-alphabetic characters
source = strings.TrimLeftFunc(source, func(r rune) bool {
return !isAlpha(r)
})
code := make([]byte, soundexLen)
// No string left
if len(source) == 0 {
return string(code)
}
runes := []rune(source)
if unicode.IsUpper(runes[0]) || unicode.IsLower(runes[0]) {
// Convert the first character to upper case.
code[0] = byte(unicode.ToUpper(runes[0]))
}
j := 1
for i := 1; i < len(runes) && j < soundexLen; i++ {
if !isAlpha(runes[i]) {
continue
}
if soundexCode(runes[i]) != soundexCode(runes[i-1]) {
c := soundexCode(runes[i])
if c != '0' {
code[j] = c
j++
}
}
}
// Fill with 0's at the end
for j < soundexLen {
code[j] = '0'
j++
}
return string(code)
}
// Soundex convert source to its Soundex code.
func Soundex(source string) string {
code := soundex(source)
resCode := make([]byte, 0)
for _, b := range []byte(code) {
if b != 0x0 {
resCode = append(resCode, b)
}
}
return string(resCode)
}
// Difference convert source and target to their Soundex codes
// and then reports the number of matching code positions.
func Difference(source, target string) int {
sourceCode := soundex(source)
targetCode := soundex(target)
diff := 0
for i := 0; i < soundexLen; i++ {
if sourceCode[i] == targetCode[i] {
diff++
}
}
return diff
}
| pkg/util/fuzzystrmatch/soundex.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0002854735648725182,
0.00018538115546107292,
0.0001673306105658412,
0.00017514120554551482,
0.0000335764852934517
] |
{
"id": 1,
"code_window": [
"\tdefer cleanup()\n",
"\n",
"\t_, _, _, err = reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy)\n",
"\tif !testutils.IsError(err, \"came too late\") {\n",
"\t\tt.Fatalf(\"expected %q, got: %v\", \"came too late\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t_, _, _, err := reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 255
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"bytes"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/cockroachdb/cockroach/pkg/sql/sem/builtins"
"github.com/cockroachdb/cockroach/pkg/sql/sem/builtins/builtinsregistry"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
"github.com/golang-commonmark/markdown"
"github.com/spf13/cobra"
)
func init() {
cmds = append(cmds, &cobra.Command{
Use: "functions <output-dir>",
Short: "generate markdown documentation of functions and operators",
RunE: func(cmd *cobra.Command, args []string) error {
outDir := filepath.Join("docs", "generated", "sql")
if len(args) > 0 {
outDir = args[0]
}
if stat, err := os.Stat(outDir); err != nil {
return err
} else if !stat.IsDir() {
return errors.Errorf("%q is not a directory", outDir)
}
if err := os.WriteFile(
filepath.Join(outDir, "functions.md"), generateFunctions(builtins.AllBuiltinNames(), true), 0644,
); err != nil {
return err
}
if err := os.WriteFile(
filepath.Join(outDir, "aggregates.md"), generateFunctions(builtins.AllAggregateBuiltinNames(), false), 0644,
); err != nil {
return err
}
if err := os.WriteFile(
filepath.Join(outDir, "window_functions.md"), generateFunctions(builtins.AllWindowBuiltinNames(), false), 0644,
); err != nil {
return err
}
return os.WriteFile(
filepath.Join(outDir, "operators.md"), generateOperators(), 0644,
)
},
})
}
type operation struct {
left string
right string
ret string
op string
}
func (o operation) String() string {
if o.right == "" {
return fmt.Sprintf("<code>%s</code>%s", o.op, linkTypeName(o.left))
}
return fmt.Sprintf("%s <code>%s</code> %s", linkTypeName(o.left), o.op, linkTypeName(o.right))
}
type operations []operation
func (p operations) Len() int { return len(p) }
func (p operations) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p operations) Less(i, j int) bool {
if p[i].right != "" && p[j].right == "" {
return false
}
if p[i].right == "" && p[j].right != "" {
return true
}
if p[i].left != p[j].left {
return p[i].left < p[j].left
}
if p[i].right != p[j].right {
return p[i].right < p[j].right
}
return p[i].ret < p[j].ret
}
func generateOperators() []byte {
ops := make(map[string]operations)
for optyp, overloads := range tree.UnaryOps {
op := optyp.String()
_ = overloads.ForEachUnaryOp(func(v *tree.UnaryOp) error {
ops[op] = append(ops[op], operation{
left: v.Typ.String(),
ret: v.ReturnType.String(),
op: op,
})
return nil
})
}
for optyp, overloads := range tree.BinOps {
op := optyp.String()
_ = overloads.ForEachBinOp(func(v *tree.BinOp) error {
left := v.LeftType.String()
right := v.RightType.String()
ops[op] = append(ops[op], operation{
left: left,
right: right,
ret: v.ReturnType.String(),
op: op,
})
return nil
})
}
for optyp, overloads := range tree.CmpOps {
op := optyp.String()
_ = overloads.ForEachCmpOp(func(v *tree.CmpOp) error {
left := v.LeftType.String()
right := v.RightType.String()
ops[op] = append(ops[op], operation{
left: left,
right: right,
ret: "bool",
op: op,
})
return nil
})
}
var opstrs []string
for k, v := range ops {
sort.Sort(v)
opstrs = append(opstrs, k)
}
sort.Strings(opstrs)
b := new(bytes.Buffer)
seen := map[string]bool{}
for _, op := range opstrs {
fmt.Fprintf(b, "<table><thead>\n")
fmt.Fprintf(b, "<tr><td><code>%s</code></td><td>Return</td></tr>\n", op)
fmt.Fprintf(b, "</thead><tbody>\n")
for _, v := range ops[op] {
s := fmt.Sprintf("<tr><td>%s</td><td>%s</td></tr>\n", v.String(), linkTypeName(v.ret))
if seen[s] {
continue
}
seen[s] = true
b.WriteString(s)
}
fmt.Fprintf(b, "</tbody></table>")
fmt.Fprintln(b)
}
return b.Bytes()
}
// TODO(mjibson): use the exported value from sql/parser/pg_builtins.go.
const notUsableInfo = "Not usable; exposed only for compatibility with PostgreSQL."
func generateFunctions(from []string, categorize bool) []byte {
functions := make(map[string][]string)
seen := make(map[string]struct{})
md := markdown.New(markdown.XHTMLOutput(true), markdown.Nofollow(true))
for _, name := range from {
// NB: funcs can appear more than once i.e. upper/lowercase variants for
// faster lookups, so normalize to lowercase and de-dupe using a set.
name = strings.ToLower(name)
if _, ok := seen[name]; ok {
continue
}
seen[name] = struct{}{}
props, fns := builtinsregistry.GetBuiltinProperties(name)
if !props.ShouldDocument() {
continue
}
for _, fn := range fns {
if fn.Info == notUsableInfo {
continue
}
// We generate docs for both aggregates and window functions in separate
// files, so we want to omit them when processing all builtins.
if categorize && (props.Class == tree.AggregateClass || props.Class == tree.WindowClass) {
continue
}
args := fn.Types.String()
retType := fn.InferReturnTypeFromInputArgTypes(fn.Types.Types())
ret := retType.String()
cat := props.Category
if cat == "" {
cat = strings.ToUpper(ret)
}
if !categorize {
cat = ""
}
extra := ""
if fn.Info != "" {
// Render the info field to HTML upfront, because Markdown
// won't do it automatically in a table context.
// Boo Markdown, bad Markdown.
// TODO(knz): Do not use Markdown.
info := md.RenderToString([]byte(fn.Info))
extra = fmt.Sprintf("<span class=\"funcdesc\">%s</span>", info)
}
s := fmt.Sprintf("<tr><td><a name=\"%s\"></a><code>%s(%s) → %s</code></td><td>%s</td><td>%s</td></tr>",
name,
name,
linkArguments(args),
linkArguments(ret),
extra,
fn.Volatility.TitleString(),
)
functions[cat] = append(functions[cat], s)
}
}
var cats []string
for k, v := range functions {
sort.Strings(v)
cats = append(cats, k)
}
sort.Strings(cats)
// HACK: swap "Compatibility" to be last.
// TODO(dt): Break up generated list be one _include per category, to allow
// manually written copy on some sections.
for i, cat := range cats {
if cat == "Compatibility" {
cats = append(append(cats[:i], cats[i+1:]...), "Compatibility")
break
}
}
b := new(bytes.Buffer)
for _, cat := range cats {
if categorize {
fmt.Fprintf(b, "### %s functions\n\n", cat)
}
b.WriteString("<table>\n<thead><tr><th>Function → Returns</th><th>Description</th><th>Volatility</th></tr></thead>\n")
b.WriteString("<tbody>\n")
b.WriteString(strings.Join(functions[cat], "\n"))
b.WriteString("</tbody>\n</table>\n\n")
}
return b.Bytes()
}
var linkRE = regexp.MustCompile(`([a-z]+)([\.\[\]]*)$`)
func linkArguments(t string) string {
sp := strings.Split(t, ", ")
for i, s := range sp {
sp[i] = linkRE.ReplaceAllStringFunc(s, func(s string) string {
match := linkRE.FindStringSubmatch(s)
s = linkTypeName(match[1])
return s + match[2]
})
}
return strings.Join(sp, ", ")
}
func linkTypeName(s string) string {
s = strings.TrimSuffix(s, "{}")
s = strings.TrimSuffix(s, "{*}")
name := s
switch s {
case "timestamptz":
s = "timestamp"
case "collatedstring":
s = "collate"
}
s = strings.TrimSuffix(s, "[]")
s = strings.TrimSuffix(s, "*")
switch s {
case "int", "decimal", "float", "bool", "date", "timestamp", "interval", "string", "bytes",
"inet", "uuid", "collate", "time":
s = fmt.Sprintf("<a href=\"%s.html\">%s</a>", s, name)
}
return s
}
| pkg/cmd/docgen/funcs.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.018548768013715744,
0.001554464572109282,
0.00016459502512589097,
0.00017493903578724712,
0.003998581785708666
] |
{
"id": 2,
"code_window": [
"\t\t\tflowID := execinfrapb.FlowID{UUID: uuid.MakeV4()}\n",
"\t\t\tstreamID := execinfrapb.StreamID(1)\n",
"\n",
"\t\t\tserverStream, clientStream, cleanup, err := createDummyStream()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tt.Fatal(err)\n",
"\t\t\t}\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tconnectProducer := func() {\n",
"\t\t\t\t// Simulate a producer connecting to the server. This should be called\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tserverStream, clientStream, cleanup := createDummyStream(t)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 297
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9967218041419983,
0.16610760986804962,
0.00017844204558059573,
0.001814328134059906,
0.3435305655002594
] |
{
"id": 2,
"code_window": [
"\t\t\tflowID := execinfrapb.FlowID{UUID: uuid.MakeV4()}\n",
"\t\t\tstreamID := execinfrapb.StreamID(1)\n",
"\n",
"\t\t\tserverStream, clientStream, cleanup, err := createDummyStream()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tt.Fatal(err)\n",
"\t\t\t}\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tconnectProducer := func() {\n",
"\t\t\t\t// Simulate a producer connecting to the server. This should be called\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tserverStream, clientStream, cleanup := createDummyStream(t)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 297
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// FirstNodeID is the NodeID assigned to the node bootstrapping a new cluster.
const FirstNodeID = roachpb.NodeID(1)
// FirstStoreID is the StoreID assigned to the first store on the node with ID
// FirstNodeID.
const FirstStoreID = roachpb.StoreID(1)
// InitEngine writes a new store ident to the underlying engine. To
// ensure that no crufty data already exists in the engine, it scans
// the engine contents before writing the new store ident. The engine
// should be completely empty save for a cluster version, which must
// already have been persisted to it. Returns an error if this is not
// the case.
func InitEngine(ctx context.Context, eng storage.Engine, ident roachpb.StoreIdent) error {
exIdent, err := ReadStoreIdent(ctx, eng)
if err == nil {
return errors.Errorf("engine %s is already initialized with ident %s", eng, exIdent.String())
}
if !errors.HasType(err, (*NotBootstrappedError)(nil)) {
return err
}
if err := checkCanInitializeEngine(ctx, eng); err != nil {
return errors.Wrap(err, "while trying to initialize engine")
}
batch := eng.NewBatch()
if err := storage.MVCCPutProto(
ctx,
batch,
nil,
keys.StoreIdentKey(),
hlc.Timestamp{},
hlc.ClockTimestamp{},
nil,
&ident,
); err != nil {
batch.Close()
return err
}
if err := batch.Commit(true /* sync */); err != nil {
return errors.Wrap(err, "persisting engine initialization data")
}
return nil
}
// WriteInitialClusterData writes initialization data to an engine. It creates
// system ranges (filling in meta1 and meta2) and the default zone config.
//
// Args:
// eng: the engine to which data is to be written.
// initialValues: an optional list of k/v to be written as well after each
//
// value's checksum is initialized.
//
// bootstrapVersion: the version at which the cluster is bootstrapped.
// numStores: the number of stores this node will have.
// splits: an optional list of split points. Range addressing will be created
//
// for all the splits. The list needs to be sorted.
//
// nowNanos: the timestamp at which to write the initial engine data.
func WriteInitialClusterData(
ctx context.Context,
eng storage.Engine,
initialValues []roachpb.KeyValue,
bootstrapVersion roachpb.Version,
numStores int,
splits []roachpb.RKey,
nowNanos int64,
knobs StoreTestingKnobs,
) error {
// Bootstrap version information. We'll add the "bootstrap version" to the
// list of initialValues, so that we don't have to handle it specially
// (particularly since we don't want to manually figure out which range it
// falls into).
bootstrapVal := roachpb.Value{}
if err := bootstrapVal.SetProto(&bootstrapVersion); err != nil {
return err
}
initialValues = append(initialValues,
roachpb.KeyValue{Key: keys.BootstrapVersionKey, Value: bootstrapVal})
// Initialize various sequence generators.
var nodeIDVal, storeIDVal, rangeIDVal, livenessVal roachpb.Value
nodeIDVal.SetInt(int64(FirstNodeID))
// The caller will initialize the stores with ids FirstStoreID, ..., FirstStoreID+numStores-1.
storeIDVal.SetInt(int64(FirstStoreID) + int64(numStores) - 1)
// The last range has id = len(splits) + 1
rangeIDVal.SetInt(int64(len(splits) + 1))
// We're the first node in the cluster, let's seed our liveness record.
// It's crucial that we do to maintain the invariant that there's always a
// liveness record for a given node. We'll do something similar through the
// join RPC when adding new nodes to an already bootstrapped cluster [1].
//
// We start off at epoch=0; when nodes heartbeat their liveness records for
// the first time it'll get incremented to epoch=1 [2].
//
// [1]: See `(*NodeLiveness).CreateLivenessRecord` and usages for where that happens.
// [2]: See `(*NodeLiveness).Start` for where that happens.
livenessRecord := livenesspb.Liveness{NodeID: FirstNodeID, Epoch: 0}
if err := livenessVal.SetProto(&livenessRecord); err != nil {
return err
}
initialValues = append(initialValues,
roachpb.KeyValue{Key: keys.NodeIDGenerator, Value: nodeIDVal},
roachpb.KeyValue{Key: keys.StoreIDGenerator, Value: storeIDVal},
roachpb.KeyValue{Key: keys.RangeIDGenerator, Value: rangeIDVal},
roachpb.KeyValue{Key: keys.NodeLivenessKey(FirstNodeID), Value: livenessVal})
// firstRangeMS is going to accumulate the stats for the first range, as we
// write the meta records for all the other ranges.
firstRangeMS := &enginepb.MVCCStats{}
// filter initial values for a given descriptor, returning only the ones that
// pertain to the respective range.
filterInitialValues := func(desc *roachpb.RangeDescriptor) []roachpb.KeyValue {
var r []roachpb.KeyValue
for _, kv := range initialValues {
if desc.ContainsKey(roachpb.RKey(kv.Key)) {
r = append(r, kv)
}
}
return r
}
initialReplicaVersion := bootstrapVersion
if knobs.InitialReplicaVersionOverride != nil {
initialReplicaVersion = *knobs.InitialReplicaVersionOverride
}
// We iterate through the ranges backwards, since they all need to contribute
// to the stats of the first range (i.e. because they all write meta2 records
// in the first range), and so we want to create the first range last so that
// the stats we compute for it are correct.
startKey := roachpb.RKeyMax
for i := len(splits) - 1; i >= -1; i-- {
endKey := startKey
rangeID := roachpb.RangeID(i + 2) // RangeIDs are 1-based.
if i >= 0 {
startKey = splits[i]
} else {
startKey = roachpb.RKeyMin
}
desc := &roachpb.RangeDescriptor{
RangeID: rangeID,
StartKey: startKey,
EndKey: endKey,
NextReplicaID: 2,
}
const firstReplicaID = 1
replicas := []roachpb.ReplicaDescriptor{
{
NodeID: FirstNodeID,
StoreID: FirstStoreID,
ReplicaID: firstReplicaID,
},
}
desc.SetReplicas(roachpb.MakeReplicaSet(replicas))
if err := desc.Validate(); err != nil {
return err
}
rangeInitialValues := filterInitialValues(desc)
log.VEventf(
ctx, 2, "creating range %d [%s, %s). Initial values: %d",
desc.RangeID, desc.StartKey, desc.EndKey, len(rangeInitialValues))
batch := eng.NewBatch()
defer batch.Close()
now := hlc.Timestamp{
WallTime: nowNanos,
Logical: 0,
}
// NOTE: We don't do stats computations in any of the puts below. Instead,
// we write everything and then compute the stats over the whole range.
// If requested, write an MVCC range tombstone at the bottom of the
// keyspace, for performance and correctness testing.
if knobs.GlobalMVCCRangeTombstone {
if err := writeGlobalMVCCRangeTombstone(ctx, batch, desc, now.Prev()); err != nil {
return err
}
}
// Range descriptor.
if err := storage.MVCCPutProto(
ctx, batch, nil /* ms */, keys.RangeDescriptorKey(desc.StartKey),
now, hlc.ClockTimestamp{}, nil /* txn */, desc,
); err != nil {
return err
}
// Replica GC timestamp.
if err := storage.MVCCPutProto(
ctx, batch, nil /* ms */, keys.RangeLastReplicaGCTimestampKey(desc.RangeID),
hlc.Timestamp{}, hlc.ClockTimestamp{}, nil /* txn */, &now,
); err != nil {
return err
}
// Range addressing for meta2.
meta2Key := keys.RangeMetaKey(endKey)
if err := storage.MVCCPutProto(
ctx, batch, firstRangeMS, meta2Key.AsRawKey(),
now, hlc.ClockTimestamp{}, nil /* txn */, desc,
); err != nil {
return err
}
// The first range gets some special treatment.
if startKey.Equal(roachpb.RKeyMin) {
// Range addressing for meta1.
meta1Key := keys.RangeMetaKey(keys.RangeMetaKey(roachpb.RKeyMax))
if err := storage.MVCCPutProto(
ctx, batch, nil /* ms */, meta1Key.AsRawKey(), now, hlc.ClockTimestamp{}, nil /* txn */, desc,
); err != nil {
return err
}
}
// Now add all passed-in default entries.
for _, kv := range rangeInitialValues {
// Initialize the checksums.
kv.Value.InitChecksum(kv.Key)
if err := storage.MVCCPut(
ctx, batch, nil /* ms */, kv.Key, now, hlc.ClockTimestamp{}, kv.Value, nil, /* txn */
); err != nil {
return err
}
}
if err := stateloader.WriteInitialRangeState(
ctx, batch, *desc, firstReplicaID, initialReplicaVersion); err != nil {
return err
}
computedStats, err := rditer.ComputeStatsForRange(desc, batch, now.WallTime)
if err != nil {
return err
}
sl := stateloader.Make(rangeID)
if err := sl.SetMVCCStats(ctx, batch, &computedStats); err != nil {
return err
}
if err := batch.Commit(true /* sync */); err != nil {
return err
}
}
return nil
}
// writeGlobalMVCCRangeTombstone writes an MVCC range tombstone across the
// entire table data keyspace of the range. This is used to test that storage
// operations are correct and performant in the presence of range tombstones. An
// MVCC range tombstone below all other data should in principle not affect
// anything at all.
func writeGlobalMVCCRangeTombstone(
ctx context.Context, w storage.Writer, desc *roachpb.RangeDescriptor, ts hlc.Timestamp,
) error {
rangeKey := storage.MVCCRangeKey{
StartKey: desc.StartKey.AsRawKey(),
EndKey: desc.EndKey.AsRawKey(),
Timestamp: ts,
}
if rangeKey.EndKey.Compare(keys.TableDataMin) <= 0 {
return nil
}
if rangeKey.StartKey.Compare(keys.TableDataMin) < 0 {
rangeKey.StartKey = keys.TableDataMin
}
if err := w.PutMVCCRangeKey(rangeKey, storage.MVCCValue{}); err != nil {
return err
}
log.Warningf(ctx, "wrote global MVCC range tombstone %s", rangeKey)
return nil
}
| pkg/kv/kvserver/store_init.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017844188550952822,
0.00016875886649359018,
0.00016202201368287206,
0.00016921039787121117,
0.0000035229923014412634
] |
{
"id": 2,
"code_window": [
"\t\t\tflowID := execinfrapb.FlowID{UUID: uuid.MakeV4()}\n",
"\t\t\tstreamID := execinfrapb.StreamID(1)\n",
"\n",
"\t\t\tserverStream, clientStream, cleanup, err := createDummyStream()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tt.Fatal(err)\n",
"\t\t\t}\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tconnectProducer := func() {\n",
"\t\t\t\t// Simulate a producer connecting to the server. This should be called\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tserverStream, clientStream, cleanup := createDummyStream(t)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 297
} | sort_clause ::=
'ORDER' 'BY' a_expr 'ASC' 'NULLS' 'FIRST' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' a_expr 'ASC' 'NULLS' 'LAST' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' a_expr 'ASC' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' a_expr 'DESC' 'NULLS' 'FIRST' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' a_expr 'DESC' 'NULLS' 'LAST' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' a_expr 'DESC' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' a_expr 'NULLS' 'FIRST' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' a_expr 'NULLS' 'LAST' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' a_expr ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' 'PRIMARY' 'KEY' table_name 'ASC' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' 'PRIMARY' 'KEY' table_name 'DESC' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' 'PRIMARY' 'KEY' table_name ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' 'INDEX' table_name '@' index_name 'ASC' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' 'INDEX' table_name '@' index_name 'DESC' ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| 'ORDER' 'BY' 'INDEX' table_name '@' index_name ( ( ',' ( a_expr ( 'ASC' | 'DESC' | ) ( 'NULLS' 'FIRST' | 'NULLS' 'LAST' | ) | 'PRIMARY' 'KEY' table_name ( 'ASC' | 'DESC' | ) | 'INDEX' table_name '@' index_name ( 'ASC' | 'DESC' | ) ) ) )*
| docs/generated/sql/bnf/sort_clause.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0001658449473325163,
0.00016572076128795743,
0.00016559656069148332,
0.00016572076128795743,
1.241933205164969e-7
] |
{
"id": 2,
"code_window": [
"\t\t\tflowID := execinfrapb.FlowID{UUID: uuid.MakeV4()}\n",
"\t\t\tstreamID := execinfrapb.StreamID(1)\n",
"\n",
"\t\t\tserverStream, clientStream, cleanup, err := createDummyStream()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tt.Fatal(err)\n",
"\t\t\t}\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tconnectProducer := func() {\n",
"\t\t\t\t// Simulate a producer connecting to the server. This should be called\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tserverStream, clientStream, cleanup := createDummyStream(t)\n"
],
"file_path": "pkg/sql/flowinfra/flow_registry_test.go",
"type": "replace",
"edit_start_line_idx": 297
} | # LogicTest: 5node-default-configs
statement ok
CREATE TABLE t (k INT, v INT)
statement ok
INSERT INTO t VALUES (1, 10), (2, 20), (3, 30)
statement ok
ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[3], 1)
statement ok
CREATE TABLE xy (x INT PRIMARY KEY, y INT)
statement ok
INSERT INTO xy VALUES (2, 200), (3, 300), (4, 400)
statement ok
ALTER TABLE t SPLIT AT VALUES (3), (4)
statement ok
ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[1], 2), (ARRAY[2], 3), (ARRAY[3], 4)
# Test that LEFT SEMI hash join outputs batches only with the columns from the
# left side.
query II rowsort
SELECT * FROM t WHERE EXISTS(SELECT * FROM xy WHERE x=t.k)
----
2 20
3 30
# Regression test for #39303.
statement ok
CREATE TABLE small (a INT PRIMARY KEY, b INT)
statement ok
CREATE TABLE large (c INT, d INT)
statement ok
INSERT INTO small SELECT x, 3*x FROM
generate_series(1, 10) AS a(x)
statement ok
INSERT INTO large SELECT 2*x, 4*x FROM
generate_series(1, 10) AS a(x)
statement ok
ALTER TABLE small SPLIT AT SELECT a FROM small
statement ok
ALTER TABLE small EXPERIMENTAL_RELOCATE SELECT ARRAY[mod(i, 3) + 1], i FROM generate_series(1, 10) AS g(i)
statement ok
ALTER TABLE large SPLIT AT SELECT 2*i FROM generate_series(1, 10) AS g(i)
statement ok
ALTER TABLE large EXPERIMENTAL_RELOCATE SELECT ARRAY[mod(i, 3) + 1], 2*i FROM generate_series(1, 10) as g(i)
# Test that RIGHT OUTER hash join correctly sets probeRowUnmatched on
# subsequent batches.
query II rowsort
SELECT small.b, large.d FROM large RIGHT HASH JOIN small ON small.b = large.c AND large.d < 30 ORDER BY 1 LIMIT 5
----
3 NULL
6 12
9 NULL
12 24
15 NULL
query T
SELECT feature_name FROM crdb_internal.feature_usage WHERE feature_name='sql.exec.query.is-distributed' AND usage_count > 0
----
sql.exec.query.is-distributed
# Regression test for an error of seeking to /Min key for a Get request issued
# for the reverse scan (#83618). Placement of the ranges here doesn't matter.
statement ok
CREATE TABLE a (id TEXT PRIMARY KEY);
CREATE TABLE b (
id TEXT PRIMARY KEY,
a_id TEXT,
status INT,
INDEX b_a_id (a_id ASC),
INDEX b_status_idx (status ASC)
);
SELECT a.id FROM a
LEFT JOIN b AS b2 ON (a.id = b2.a_id AND b2.status = 2)
WHERE (a.id IN ('3f90e30a-c87a-4017-b9a0-8f964b91c4af', '3adaf3da-0368-461a-8437-ee448724b78d', 'd0c13b06-5368-4522-8126-105b0a9513cd'))
ORDER BY id DESC
LIMIT 2;
# Some tests with OID types in the equality columns.
statement ok
CREATE TABLE t86075 (k INT PRIMARY KEY, c REGPROCEDURE, a REGPROCEDURE[]);
INSERT INTO t86075 VALUES (1, 1, ARRAY[1]), (2, 2, ARRAY[2]), (3, 3, ARRAY[3]);
CREATE TABLE t86075_2 (k INT PRIMARY KEY, c REGPROCEDURE, a REGPROCEDURE[]);
INSERT INTO t86075_2 VALUES (1, 1, ARRAY[1]), (2, 2, ARRAY[2]), (3, 3, ARRAY[3]);
statement ok
ALTER TABLE t86075 SPLIT AT VALUES (2), (3)
statement ok
ALTER TABLE t86075 SCATTER
statement ok
ALTER TABLE t86075_2 SPLIT AT VALUES (2), (3)
statement ok
ALTER TABLE t86075_2 SCATTER
query I rowsort
SELECT t1.k FROM t86075 AS t1, t86075_2 AS t2 WHERE t1.c = t2.c
----
1
2
3
query I rowsort
SELECT t1.k FROM t86075 AS t1, t86075_2 AS t2 WHERE t1.a = t2.a
----
1
2
3
| pkg/sql/logictest/testdata/logic_test/hash_join_dist | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0013405003119260073,
0.0002610842348076403,
0.00016454875003546476,
0.00017068533634301275,
0.00031162117375060916
] |
{
"id": 3,
"code_window": [
"\n",
"package flowinfra\n",
"\n",
"import (\n",
"\t\"context\"\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/rpc\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/sql/execinfra\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"io\"\n",
"\t\"testing\"\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "add",
"edit_start_line_idx": 14
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9918135404586792,
0.14430689811706543,
0.0001639928377699107,
0.0005148999043740332,
0.3460375666618347
] |
{
"id": 3,
"code_window": [
"\n",
"package flowinfra\n",
"\n",
"import (\n",
"\t\"context\"\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/rpc\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/sql/execinfra\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"io\"\n",
"\t\"testing\"\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "add",
"edit_start_line_idx": 14
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package constraint
import (
"strconv"
"strings"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/partition"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
)
// ParseConstraint parses a constraint in the format of Constraint.String, e.g:
//
// "/1/2/3: [/1 - /2]".
func ParseConstraint(evalCtx *eval.Context, str string) Constraint {
s := strings.SplitN(str, ": ", 2)
if len(s) != 2 {
panic(errors.AssertionFailedf("invalid constraint format: %s", str))
}
var cols []opt.OrderingColumn
for _, v := range parseIntPath(s[0]) {
cols = append(cols, opt.OrderingColumn(v))
}
var c Constraint
c.Columns.Init(cols)
c.Spans = parseSpans(evalCtx, s[1])
return c
}
// parseSpans parses a list of spans with integer values like:
//
// "[/1 - /2] [/5 - /6]".
func parseSpans(evalCtx *eval.Context, str string) Spans {
if str == "" || str == "contradiction" {
return Spans{}
}
if str == "unconstrained" {
s := Spans{}
s.InitSingleSpan(&UnconstrainedSpan)
return s
}
s := strings.Split(str, " ")
// Each span has three pieces.
if len(s)%3 != 0 {
panic(errors.AssertionFailedf("invalid span format: %s", str))
}
var result Spans
for i := 0; i < len(s)/3; i++ {
sp := ParseSpan(evalCtx, strings.Join(s[i*3:i*3+3], " "))
result.Append(&sp)
}
return result
}
// ParseSpan parses a span in the format of Span.String, e.g: [/1 - /2].
// If no types are passed in, the type is inferred as being an int if possible;
// otherwise a string. If any types are specified, they must be specified for
// every datum.
func ParseSpan(evalCtx *eval.Context, str string, typs ...types.Family) Span {
if len(str) < len("[ - ]") {
panic(str)
}
boundary := map[byte]SpanBoundary{
'[': IncludeBoundary,
']': IncludeBoundary,
'(': ExcludeBoundary,
')': ExcludeBoundary,
}
s, e := str[0], str[len(str)-1]
if (s != '[' && s != '(') || (e != ']' && e != ')') {
panic(str)
}
keys := strings.Split(str[1:len(str)-1], " - ")
if len(keys) != 2 {
panic(str)
}
// Retrieve the values of the longest key.
longestKey := tree.ParsePath(keys[0])
endDatums := tree.ParsePath(keys[1])
if len(longestKey) < len(endDatums) {
longestKey = endDatums
}
if len(longestKey) > 0 && len(typs) == 0 {
// Infer the datum types and populate typs accordingly.
typs = tree.InferTypes(longestKey)
}
var sp Span
startVals := partition.ParseDatumPath(evalCtx, keys[0], typs)
endVals := partition.ParseDatumPath(evalCtx, keys[1], typs)
sp.Init(
MakeCompositeKey(startVals...), boundary[s],
MakeCompositeKey(endVals...), boundary[e],
)
return sp
}
// parseIntPath parses a string like "/1/2/3" into a list of integers.
func parseIntPath(str string) []int {
var res []int
for _, valStr := range tree.ParsePath(str) {
val, err := strconv.Atoi(valStr)
if err != nil {
panic(err)
}
res = append(res, val)
}
return res
}
| pkg/sql/opt/constraint/testutils.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.002847256138920784,
0.00052133813733235,
0.00016697647515684366,
0.00017057618242688477,
0.0008147012558765709
] |
{
"id": 3,
"code_window": [
"\n",
"package flowinfra\n",
"\n",
"import (\n",
"\t\"context\"\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/rpc\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/sql/execinfra\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"io\"\n",
"\t\"testing\"\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "add",
"edit_start_line_idx": 14
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { Pick } from "src/util/pick";
import { RouteComponentProps, withRouter } from "react-router-dom";
import { connect } from "react-redux";
import { AdminUIState } from "src/redux/state";
import { LocalSetting } from "src/redux/localsettings";
import { CachedDataReducerState, refreshSessions } from "src/redux/apiReducers";
import { createSelector } from "reselect";
import {
SessionsResponseMessage,
StatementsResponseMessage,
} from "src/util/api";
import {
defaultFilters,
Filters,
SessionsPage,
} from "@cockroachlabs/cluster-ui";
import {
terminateQueryAction,
terminateSessionAction,
} from "src/redux/sessions/sessionsSagas";
type SessionsState = Pick<AdminUIState, "cachedData", "sessions">;
export const selectData = createSelector(
(state: AdminUIState) => state.cachedData.statements,
(state: CachedDataReducerState<StatementsResponseMessage>) => {
if (!state.data || state.inFlight || !state.valid) return null;
return state.data;
},
);
export const selectSessions = createSelector(
(state: SessionsState) => state.cachedData.sessions,
(_state: SessionsState, props: RouteComponentProps) => props,
(
state: CachedDataReducerState<SessionsResponseMessage>,
_: RouteComponentProps<any>,
) => {
if (!state.data) {
return null;
}
return state.data.sessions.map(session => {
return { session };
});
},
);
export const selectAppName = createSelector(
(state: SessionsState) => state.cachedData.sessions,
(_state: SessionsState, props: RouteComponentProps) => props,
(
state: CachedDataReducerState<SessionsResponseMessage>,
_: RouteComponentProps<any>,
) => {
if (!state.data) {
return null;
}
return state.data.internal_app_name_prefix;
},
);
export const sortSettingLocalSetting = new LocalSetting(
"sortSetting/SessionsPage",
(state: AdminUIState) => state.localSettings,
{ ascending: false, columnTitle: "statementAge" },
);
export const sessionColumnsLocalSetting = new LocalSetting(
"showColumns/SessionsPage",
(state: AdminUIState) => state.localSettings,
null,
);
export const filtersLocalSetting = new LocalSetting<AdminUIState, Filters>(
"filters/SessionsPage",
(state: AdminUIState) => state.localSettings,
defaultFilters,
);
const SessionsPageConnected = withRouter(
connect(
(state: AdminUIState, props: RouteComponentProps) => ({
columns: sessionColumnsLocalSetting.selectorToArray(state),
internalAppNamePrefix: selectAppName(state, props),
filters: filtersLocalSetting.selector(state),
sessions: selectSessions(state, props),
sessionsError: state.cachedData.sessions.lastError,
sortSetting: sortSettingLocalSetting.selector(state),
}),
{
refreshSessions,
cancelSession: terminateSessionAction,
cancelQuery: terminateQueryAction,
onSortingChange: (
_tableName: string,
columnName: string,
ascending: boolean,
) =>
sortSettingLocalSetting.set({
ascending: ascending,
columnTitle: columnName,
}),
onColumnsChange: (value: string[]) =>
sessionColumnsLocalSetting.set(
value.length === 0 ? " " : value.join(","),
),
onFilterChange: (filters: Filters) => filtersLocalSetting.set(filters),
},
)(SessionsPage),
);
export default SessionsPageConnected;
| pkg/ui/workspaces/db-console/src/views/sessions/sessionsPage.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0001758173166308552,
0.00017170801584143192,
0.0001674527011346072,
0.00017146326717920601,
0.0000025383303636772325
] |
{
"id": 3,
"code_window": [
"\n",
"package flowinfra\n",
"\n",
"import (\n",
"\t\"context\"\n",
"\t\"time\"\n",
"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/rpc\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/sql/execinfra\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"io\"\n",
"\t\"testing\"\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "add",
"edit_start_line_idx": 14
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package xform
import (
"context"
"math"
"math/rand"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/distribution"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/ordering"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/util/intsets"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/redact"
)
// Coster is used by the optimizer to assign a cost to a candidate expression
// that can provide a set of required physical properties. If a candidate
// expression has a lower cost than any other expression in the memo group, then
// it becomes the new best expression for the group.
//
// The set of costing formulas maintained by the coster for the set of all
// operators constitute the "cost model". A given cost model can be designed to
// maximize any optimization goal, such as:
//
// 1. Max aggregate cluster throughput (txns/sec across cluster)
// 2. Min transaction latency (time to commit txns)
// 3. Min latency to first row (time to get first row of txns)
// 4. Min memory usage
// 5. Some weighted combination of #1 - #4
//
// The cost model in this file targets #1 as the optimization goal. However,
// note that #2 is implicitly important to that goal, since overall cluster
// throughput will suffer if there are lots of pending transactions waiting on
// I/O.
//
// Coster is an interface so that different costing algorithms can be used by
// the optimizer. For example, the OptSteps command uses a custom coster that
// assigns infinite costs to some expressions in order to prevent them from
// being part of the lowest cost tree (for debugging purposes).
type Coster interface {
// ComputeCost returns the estimated cost of executing the candidate
// expression. The optimizer does not expect the cost to correspond to any
// real-world metric, but does expect costs to be comparable to one another,
// as well as summable.
ComputeCost(candidate memo.RelExpr, required *physical.Required) memo.Cost
}
// coster encapsulates the default cost model for the optimizer. The coster
// assigns an estimated cost to each expression in the memo so that the
// optimizer can choose the lowest cost expression tree. The estimated cost is
// a best-effort approximation of the actual cost of execution, based on table
// and index statistics that are propagated throughout the logical expression
// tree.
type coster struct {
ctx context.Context
evalCtx *eval.Context
mem *memo.Memo
// locality gives the location of the current node as a set of user-defined
// key/value pairs, ordered from most inclusive to least inclusive. If there
// are no tiers, then the node's location is not known. Example:
//
// [region=us,dc=east]
//
locality roachpb.Locality
// perturbation indicates how much to randomly perturb the cost. It is used
// to generate alternative plans for testing. For example, if perturbation is
// 0.5, and the estimated cost of an expression is c, the cost returned by
// ComputeCost will be in the range [c - 0.5 * c, c + 0.5 * c).
perturbation float64
// rng is used for deterministic perturbation.
rng *rand.Rand
}
var _ Coster = &coster{}
// MakeDefaultCoster creates an instance of the default coster.
func MakeDefaultCoster(ctx context.Context, evalCtx *eval.Context, mem *memo.Memo) Coster {
return &coster{
ctx: ctx,
evalCtx: evalCtx,
mem: mem,
locality: evalCtx.Locality,
}
}
const (
// These costs have been copied from the Postgres optimizer:
// https://github.com/postgres/postgres/blob/master/src/include/optimizer/cost.h
// TODO(rytaft): "How Good are Query Optimizers, Really?" says that the
// PostgreSQL ratio between CPU and I/O is probably unrealistic in modern
// systems since much of the data can be cached in memory. Consider
// increasing the cpuCostFactor to account for this.
cpuCostFactor = 0.01
seqIOCostFactor = 1
randIOCostFactor = 4
// TODO(justin): make this more sophisticated.
// lookupJoinRetrieveRowCost is the cost to retrieve a single row during a
// lookup join.
// See https://github.com/cockroachdb/cockroach/pull/35561 for the initial
// justification for this constant.
lookupJoinRetrieveRowCost = 2 * seqIOCostFactor
// virtualScanTableDescriptorFetchCost is the cost to retrieve the table
// descriptors when performing a virtual table scan.
virtualScanTableDescriptorFetchCost = 25 * randIOCostFactor
// Input rows to a join are processed in batches of this size.
// See joinreader.go.
joinReaderBatchSize = 100.0
// latencyCostFactor represents the throughput impact of doing scans on an
// index that may be remotely located in a different locality. If latencies
// are higher, then overall cluster throughput will suffer somewhat, as there
// will be more queries in memory blocking on I/O. The impact on throughput
// is expected to be relatively low, so latencyCostFactor is set to a small
// value. However, even a low value will cause the optimizer to prefer
// indexes that are likely to be geographically closer, if they are otherwise
// the same cost to access.
// TODO(andyk): Need to do analysis to figure out right value and/or to come
// up with better way to incorporate latency into the coster.
latencyCostFactor = cpuCostFactor
// hugeCost is used with expressions we want to avoid; these are expressions
// that "violate" a hint like forcing a specific index or join algorithm.
// If the final expression has this cost or larger, it means that there was no
// plan that could satisfy the hints.
hugeCost memo.Cost = 1e100
// fullScanRowCountPenalty adds a penalty to full table scans. This is especially
// useful for empty or very small tables, where we would get plans that are
// surprising to users (like full scans instead of point lookups).
fullScanRowCountPenalty = 10
// unboundedMaxCardinalityScanCostPenalty adds a penalty to scans with
// unbounded maximum cardinality. This helps prevent surprising plans for very
// small tables or for when stats are stale. For full table scans, this
// penalty is added on top of the fullScanRowCountPenalty.
unboundedMaxCardinalityScanCostPenalty = 10
// largeMaxCardinalityScanCostPenalty is the maximum penalty to add to scans
// with a bounded maximum cardinality exceeding the row count estimate. This
// helps prevent surprising plans for very small tables or for when stats are
// stale.
largeMaxCardinalityScanCostPenalty = unboundedMaxCardinalityScanCostPenalty / 2
// LargeDistributeCost is the cost to use for Distribute operations when a
// session mode is set to error out on access of rows from remote regions.
LargeDistributeCost = hugeCost / 100
// preferLookupJoinFactor is a scale factor for the cost of a lookup join when
// we have a hint for preferring a lookup join.
preferLookupJoinFactor = 1e-6
// noSpillRowCount represents the maximum number of rows that should have no
// buffering cost because we expect they will never need to be spilled to
// disk. Since 64MB is the default work mem limit, 64 rows will not cause a
// disk spill unless the rows are at least 1 MB on average.
noSpillRowCount = 64
// spillRowCount represents the minimum number of rows that we expect will
// always need to be spilled to disk. Since 64MB is the default work mem
// limit, 6400000 rows with an average of at least 10 bytes per row will cause
// a disk spill.
spillRowCount = 6400000
// spillCostFactor is the cost of spilling to disk. We use seqIOCostFactor to
// model the cost of spilling to disk, because although there will be some
// random I/O required to insert rows into a sorted structure, the inherent
// batching in the LSM tree should amortize the cost.
spillCostFactor = seqIOCostFactor
)
// fnCost maps some functions to an execution cost. Currently this list
// contains only st_* functions, including some we don't have implemented
// yet. Although function costs differ based on the overload (due to
// arguments), here we are using the minimum from similar functions based on
// postgres' pg_proc table. The following query can be used to generate this table:
//
// SELECT proname, min(procost) FROM pg_proc WHERE proname LIKE 'st\_%' AND procost > 1 GROUP BY proname ORDER BY proname
//
// TODO(mjibson): Add costs directly to overloads. When that is done, we should
// also add a test that ensures those costs match postgres.
var fnCost = map[string]memo.Cost{
"st_3dclosestpoint": 1000 * cpuCostFactor,
"st_3ddfullywithin": 10000 * cpuCostFactor,
"st_3ddistance": 1000 * cpuCostFactor,
"st_3ddwithin": 10000 * cpuCostFactor,
"st_3dintersects": 10000 * cpuCostFactor,
"st_3dlength": 100 * cpuCostFactor,
"st_3dlongestline": 1000 * cpuCostFactor,
"st_3dmakebox": 100 * cpuCostFactor,
"st_3dmaxdistance": 1000 * cpuCostFactor,
"st_3dperimeter": 100 * cpuCostFactor,
"st_3dshortestline": 1000 * cpuCostFactor,
"st_addmeasure": 1000 * cpuCostFactor,
"st_addpoint": 100 * cpuCostFactor,
"st_affine": 100 * cpuCostFactor,
"st_angle": 100 * cpuCostFactor,
"st_area": 100 * cpuCostFactor,
"st_area2d": 100 * cpuCostFactor,
"st_asbinary": 100 * cpuCostFactor,
"st_asencodedpolyline": 100 * cpuCostFactor,
"st_asewkb": 100 * cpuCostFactor,
"st_asewkt": 100 * cpuCostFactor,
"st_asgeojson": 100 * cpuCostFactor,
"st_asgml": 100 * cpuCostFactor,
"st_ashexewkb": 100 * cpuCostFactor,
"st_askml": 100 * cpuCostFactor,
"st_aslatlontext": 100 * cpuCostFactor,
"st_assvg": 100 * cpuCostFactor,
"st_astext": 100 * cpuCostFactor,
"st_astwkb": 1000 * cpuCostFactor,
"st_asx3d": 100 * cpuCostFactor,
"st_azimuth": 100 * cpuCostFactor,
"st_bdmpolyfromtext": 100 * cpuCostFactor,
"st_bdpolyfromtext": 100 * cpuCostFactor,
"st_boundary": 1000 * cpuCostFactor,
"st_boundingdiagonal": 100 * cpuCostFactor,
"st_box2dfromgeohash": 1000 * cpuCostFactor,
"st_buffer": 100 * cpuCostFactor,
"st_buildarea": 10000 * cpuCostFactor,
"st_centroid": 100 * cpuCostFactor,
"st_chaikinsmoothing": 10000 * cpuCostFactor,
"st_cleangeometry": 10000 * cpuCostFactor,
"st_clipbybox2d": 10000 * cpuCostFactor,
"st_closestpoint": 1000 * cpuCostFactor,
"st_closestpointofapproach": 10000 * cpuCostFactor,
"st_clusterdbscan": 10000 * cpuCostFactor,
"st_clusterintersecting": 10000 * cpuCostFactor,
"st_clusterkmeans": 10000 * cpuCostFactor,
"st_clusterwithin": 10000 * cpuCostFactor,
"st_collectionextract": 100 * cpuCostFactor,
"st_collectionhomogenize": 100 * cpuCostFactor,
"st_concavehull": 10000 * cpuCostFactor,
"st_contains": 10000 * cpuCostFactor,
"st_containsproperly": 10000 * cpuCostFactor,
"st_convexhull": 10000 * cpuCostFactor,
"st_coorddim": 100 * cpuCostFactor,
"st_coveredby": 100 * cpuCostFactor,
"st_covers": 100 * cpuCostFactor,
"st_cpawithin": 10000 * cpuCostFactor,
"st_createtopogeo": 100 * cpuCostFactor,
"st_crosses": 10000 * cpuCostFactor,
"st_curvetoline": 10000 * cpuCostFactor,
"st_delaunaytriangles": 10000 * cpuCostFactor,
"st_dfullywithin": 10000 * cpuCostFactor,
"st_difference": 10000 * cpuCostFactor,
"st_dimension": 100 * cpuCostFactor,
"st_disjoint": 10000 * cpuCostFactor,
"st_distance": 100 * cpuCostFactor,
"st_distancecpa": 10000 * cpuCostFactor,
"st_distancesphere": 100 * cpuCostFactor,
"st_distancespheroid": 1000 * cpuCostFactor,
"st_dump": 1000 * cpuCostFactor,
"st_dumppoints": 100 * cpuCostFactor,
"st_dumprings": 1000 * cpuCostFactor,
"st_dwithin": 100 * cpuCostFactor,
"st_endpoint": 100 * cpuCostFactor,
"st_envelope": 100 * cpuCostFactor,
"st_equals": 10000 * cpuCostFactor,
"st_expand": 100 * cpuCostFactor,
"st_exteriorring": 100 * cpuCostFactor,
"st_filterbym": 1000 * cpuCostFactor,
"st_findextent": 100 * cpuCostFactor,
"st_flipcoordinates": 1000 * cpuCostFactor,
"st_force2d": 100 * cpuCostFactor,
"st_force3d": 100 * cpuCostFactor,
"st_force3dm": 100 * cpuCostFactor,
"st_force3dz": 100 * cpuCostFactor,
"st_force4d": 100 * cpuCostFactor,
"st_forcecollection": 100 * cpuCostFactor,
"st_forcecurve": 1000 * cpuCostFactor,
"st_forcepolygonccw": 100 * cpuCostFactor,
"st_forcepolygoncw": 1000 * cpuCostFactor,
"st_forcerhr": 1000 * cpuCostFactor,
"st_forcesfs": 1000 * cpuCostFactor,
"st_frechetdistance": 10000 * cpuCostFactor,
"st_generatepoints": 10000 * cpuCostFactor,
"st_geogfromtext": 100 * cpuCostFactor,
"st_geogfromwkb": 100 * cpuCostFactor,
"st_geographyfromtext": 100 * cpuCostFactor,
"st_geohash": 1000 * cpuCostFactor,
"st_geomcollfromtext": 100 * cpuCostFactor,
"st_geomcollfromwkb": 100 * cpuCostFactor,
"st_geometricmedian": 10000 * cpuCostFactor,
"st_geometryfromtext": 1000 * cpuCostFactor,
"st_geometryn": 100 * cpuCostFactor,
"st_geometrytype": 100 * cpuCostFactor,
"st_geomfromewkb": 100 * cpuCostFactor,
"st_geomfromewkt": 100 * cpuCostFactor,
"st_geomfromgeohash": 1000 * cpuCostFactor,
"st_geomfromgeojson": 1000 * cpuCostFactor,
"st_geomfromgml": 100 * cpuCostFactor,
"st_geomfromkml": 1000 * cpuCostFactor,
"st_geomfromtext": 1000 * cpuCostFactor,
"st_geomfromtwkb": 100 * cpuCostFactor,
"st_geomfromwkb": 100 * cpuCostFactor,
"st_gmltosql": 100 * cpuCostFactor,
"st_hasarc": 100 * cpuCostFactor,
"st_hausdorffdistance": 10000 * cpuCostFactor,
"st_inittopogeo": 100 * cpuCostFactor,
"st_interiorringn": 100 * cpuCostFactor,
"st_interpolatepoint": 1000 * cpuCostFactor,
"st_intersection": 100 * cpuCostFactor,
"st_intersects": 100 * cpuCostFactor,
"st_isclosed": 100 * cpuCostFactor,
"st_iscollection": 1000 * cpuCostFactor,
"st_isempty": 100 * cpuCostFactor,
"st_ispolygonccw": 100 * cpuCostFactor,
"st_ispolygoncw": 100 * cpuCostFactor,
"st_isring": 1000 * cpuCostFactor,
"st_issimple": 1000 * cpuCostFactor,
"st_isvalid": 100 * cpuCostFactor,
"st_isvaliddetail": 10000 * cpuCostFactor,
"st_isvalidreason": 100 * cpuCostFactor,
"st_isvalidtrajectory": 10000 * cpuCostFactor,
"st_length": 100 * cpuCostFactor,
"st_length2d": 100 * cpuCostFactor,
"st_length2dspheroid": 1000 * cpuCostFactor,
"st_lengthspheroid": 1000 * cpuCostFactor,
"st_linecrossingdirection": 10000 * cpuCostFactor,
"st_linefromencodedpolyline": 1000 * cpuCostFactor,
"st_linefrommultipoint": 100 * cpuCostFactor,
"st_linefromtext": 100 * cpuCostFactor,
"st_linefromwkb": 100 * cpuCostFactor,
"st_lineinterpolatepoint": 1000 * cpuCostFactor,
"st_lineinterpolatepoints": 1000 * cpuCostFactor,
"st_linelocatepoint": 1000 * cpuCostFactor,
"st_linemerge": 10000 * cpuCostFactor,
"st_linestringfromwkb": 100 * cpuCostFactor,
"st_linesubstring": 1000 * cpuCostFactor,
"st_linetocurve": 10000 * cpuCostFactor,
"st_locatealong": 1000 * cpuCostFactor,
"st_locatebetween": 1000 * cpuCostFactor,
"st_locatebetweenelevations": 1000 * cpuCostFactor,
"st_longestline": 100 * cpuCostFactor,
"st_makeenvelope": 100 * cpuCostFactor,
"st_makeline": 100 * cpuCostFactor,
"st_makepoint": 100 * cpuCostFactor,
"st_makepointm": 100 * cpuCostFactor,
"st_makepolygon": 100 * cpuCostFactor,
"st_makevalid": 10000 * cpuCostFactor,
"st_maxdistance": 100 * cpuCostFactor,
"st_memsize": 100 * cpuCostFactor,
"st_minimumboundingcircle": 10000 * cpuCostFactor,
"st_minimumboundingradius": 10000 * cpuCostFactor,
"st_minimumclearance": 10000 * cpuCostFactor,
"st_minimumclearanceline": 10000 * cpuCostFactor,
"st_mlinefromtext": 100 * cpuCostFactor,
"st_mlinefromwkb": 100 * cpuCostFactor,
"st_mpointfromtext": 100 * cpuCostFactor,
"st_mpointfromwkb": 100 * cpuCostFactor,
"st_mpolyfromtext": 100 * cpuCostFactor,
"st_mpolyfromwkb": 100 * cpuCostFactor,
"st_multi": 100 * cpuCostFactor,
"st_multilinefromwkb": 100 * cpuCostFactor,
"st_multilinestringfromtext": 100 * cpuCostFactor,
"st_multipointfromtext": 100 * cpuCostFactor,
"st_multipointfromwkb": 100 * cpuCostFactor,
"st_multipolyfromwkb": 100 * cpuCostFactor,
"st_multipolygonfromtext": 100 * cpuCostFactor,
"st_node": 10000 * cpuCostFactor,
"st_normalize": 100 * cpuCostFactor,
"st_npoints": 100 * cpuCostFactor,
"st_nrings": 100 * cpuCostFactor,
"st_numgeometries": 100 * cpuCostFactor,
"st_numinteriorring": 100 * cpuCostFactor,
"st_numinteriorrings": 100 * cpuCostFactor,
"st_numpatches": 100 * cpuCostFactor,
"st_numpoints": 100 * cpuCostFactor,
"st_offsetcurve": 10000 * cpuCostFactor,
"st_orderingequals": 10000 * cpuCostFactor,
"st_orientedenvelope": 10000 * cpuCostFactor,
"st_overlaps": 10000 * cpuCostFactor,
"st_patchn": 100 * cpuCostFactor,
"st_perimeter": 100 * cpuCostFactor,
"st_perimeter2d": 100 * cpuCostFactor,
"st_point": 100 * cpuCostFactor,
"st_pointfromgeohash": 1000 * cpuCostFactor,
"st_pointfromtext": 100 * cpuCostFactor,
"st_pointfromwkb": 100 * cpuCostFactor,
"st_pointinsidecircle": 1000 * cpuCostFactor,
"st_pointn": 100 * cpuCostFactor,
"st_pointonsurface": 1000 * cpuCostFactor,
"st_points": 1000 * cpuCostFactor,
"st_polyfromtext": 100 * cpuCostFactor,
"st_polyfromwkb": 100 * cpuCostFactor,
"st_polygon": 100 * cpuCostFactor,
"st_polygonfromtext": 100 * cpuCostFactor,
"st_polygonfromwkb": 100 * cpuCostFactor,
"st_polygonize": 10000 * cpuCostFactor,
"st_project": 1000 * cpuCostFactor,
"st_quantizecoordinates": 1000 * cpuCostFactor,
"st_relate": 10000 * cpuCostFactor,
"st_relatematch": 1000 * cpuCostFactor,
"st_removepoint": 100 * cpuCostFactor,
"st_removerepeatedpoints": 1000 * cpuCostFactor,
"st_reverse": 1000 * cpuCostFactor,
"st_rotate": 100 * cpuCostFactor,
"st_rotatex": 100 * cpuCostFactor,
"st_rotatey": 100 * cpuCostFactor,
"st_rotatez": 100 * cpuCostFactor,
"st_scale": 100 * cpuCostFactor,
"st_segmentize": 1000 * cpuCostFactor,
"st_seteffectivearea": 1000 * cpuCostFactor,
"st_setpoint": 100 * cpuCostFactor,
"st_setsrid": 100 * cpuCostFactor,
"st_sharedpaths": 10000 * cpuCostFactor,
"st_shortestline": 1000 * cpuCostFactor,
"st_simplify": 100 * cpuCostFactor,
"st_simplifypreservetopology": 10000 * cpuCostFactor,
"st_simplifyvw": 10000 * cpuCostFactor,
"st_snap": 10000 * cpuCostFactor,
"st_snaptogrid": 100 * cpuCostFactor,
"st_split": 10000 * cpuCostFactor,
"st_srid": 100 * cpuCostFactor,
"st_startpoint": 100 * cpuCostFactor,
"st_subdivide": 10000 * cpuCostFactor,
"st_summary": 100 * cpuCostFactor,
"st_swapordinates": 100 * cpuCostFactor,
"st_symdifference": 10000 * cpuCostFactor,
"st_symmetricdifference": 10000 * cpuCostFactor,
"st_tileenvelope": 100 * cpuCostFactor,
"st_touches": 10000 * cpuCostFactor,
"st_transform": 100 * cpuCostFactor,
"st_translate": 100 * cpuCostFactor,
"st_transscale": 100 * cpuCostFactor,
"st_unaryunion": 10000 * cpuCostFactor,
"st_union": 10000 * cpuCostFactor,
"st_voronoilines": 100 * cpuCostFactor,
"st_voronoipolygons": 100 * cpuCostFactor,
"st_within": 10000 * cpuCostFactor,
"st_wkbtosql": 100 * cpuCostFactor,
"st_wkttosql": 1000 * cpuCostFactor,
}
// Init initializes a new coster structure with the given memo.
func (c *coster) Init(
ctx context.Context, evalCtx *eval.Context, mem *memo.Memo, perturbation float64, rng *rand.Rand,
) {
// This initialization pattern ensures that fields are not unwittingly
// reused. Field reuse must be explicit.
*c = coster{
ctx: ctx,
evalCtx: evalCtx,
mem: mem,
locality: evalCtx.Locality,
perturbation: perturbation,
rng: rng,
}
}
// ComputeCost calculates the estimated cost of the top-level operator in a
// candidate best expression, based on its logical properties and those of its
// children.
//
// Note: each custom function to compute the cost of an operator calculates
// the cost based on Big-O estimated complexity. Most constant factors are
// ignored for now.
func (c *coster) ComputeCost(candidate memo.RelExpr, required *physical.Required) memo.Cost {
var cost memo.Cost
switch candidate.Op() {
case opt.TopKOp:
cost = c.computeTopKCost(candidate.(*memo.TopKExpr), required)
case opt.SortOp:
cost = c.computeSortCost(candidate.(*memo.SortExpr), required)
case opt.DistributeOp:
cost = c.computeDistributeCost(candidate.(*memo.DistributeExpr), required)
case opt.ScanOp:
cost = c.computeScanCost(candidate.(*memo.ScanExpr), required)
case opt.SelectOp:
cost = c.computeSelectCost(candidate.(*memo.SelectExpr), required)
case opt.ProjectOp:
cost = c.computeProjectCost(candidate.(*memo.ProjectExpr))
case opt.InvertedFilterOp:
cost = c.computeInvertedFilterCost(candidate.(*memo.InvertedFilterExpr))
case opt.ValuesOp:
cost = c.computeValuesCost(candidate.(*memo.ValuesExpr))
case opt.InnerJoinOp, opt.LeftJoinOp, opt.RightJoinOp, opt.FullJoinOp,
opt.SemiJoinOp, opt.AntiJoinOp, opt.InnerJoinApplyOp, opt.LeftJoinApplyOp,
opt.SemiJoinApplyOp, opt.AntiJoinApplyOp:
// All join ops use hash join by default.
cost = c.computeHashJoinCost(candidate)
case opt.MergeJoinOp:
cost = c.computeMergeJoinCost(candidate.(*memo.MergeJoinExpr))
case opt.IndexJoinOp:
cost = c.computeIndexJoinCost(candidate.(*memo.IndexJoinExpr), required)
case opt.LookupJoinOp:
cost = c.computeLookupJoinCost(candidate.(*memo.LookupJoinExpr), required)
case opt.InvertedJoinOp:
cost = c.computeInvertedJoinCost(candidate.(*memo.InvertedJoinExpr), required)
case opt.ZigzagJoinOp:
cost = c.computeZigzagJoinCost(candidate.(*memo.ZigzagJoinExpr))
case opt.UnionOp, opt.IntersectOp, opt.ExceptOp,
opt.UnionAllOp, opt.IntersectAllOp, opt.ExceptAllOp, opt.LocalityOptimizedSearchOp:
cost = c.computeSetCost(candidate)
case opt.GroupByOp, opt.ScalarGroupByOp, opt.DistinctOnOp, opt.EnsureDistinctOnOp,
opt.UpsertDistinctOnOp, opt.EnsureUpsertDistinctOnOp:
cost = c.computeGroupingCost(candidate, required)
case opt.LimitOp:
cost = c.computeLimitCost(candidate.(*memo.LimitExpr))
case opt.OffsetOp:
cost = c.computeOffsetCost(candidate.(*memo.OffsetExpr))
case opt.OrdinalityOp:
cost = c.computeOrdinalityCost(candidate.(*memo.OrdinalityExpr))
case opt.ProjectSetOp:
cost = c.computeProjectSetCost(candidate.(*memo.ProjectSetExpr))
case opt.ExplainOp:
// Technically, the cost of an Explain operation is independent of the cost
// of the underlying plan. However, we want to explain the plan we would get
// without EXPLAIN, i.e. the lowest cost plan. So do nothing special to get
// default behavior.
}
// Add a one-time cost for any operator, meant to reflect the cost of setting
// up execution for the operator. This makes plans with fewer operators
// preferable, all else being equal.
cost += cpuCostFactor
// Add a one-time cost for any operator with unbounded cardinality. This
// ensures we prefer plans that push limits as far down the tree as possible,
// all else being equal.
if candidate.Relational().Cardinality.IsUnbounded() {
cost += cpuCostFactor
}
if !cost.Less(memo.MaxCost) {
// Optsteps uses MaxCost to suppress nodes in the memo. When a node with
// MaxCost is added to the memo, it can lead to an obscure crash with an
// unknown node. We'd rather detect this early.
panic(errors.AssertionFailedf("node %s with MaxCost added to the memo", redact.Safe(candidate.Op())))
}
if c.perturbation != 0 {
// Don't perturb the cost if we are forcing an index.
if cost < hugeCost {
// Get a random value in the range [-1.0, 1.0)
var multiplier float64
if c.rng == nil {
multiplier = 2*rand.Float64() - 1
} else {
multiplier = 2*c.rng.Float64() - 1
}
// If perturbation is p, and the estimated cost of an expression is c,
// the new cost is in the range [max(0, c - pc), c + pc). For example,
// if p=1.5, the new cost is in the range [0, c + 1.5 * c).
cost += cost * memo.Cost(c.perturbation*multiplier)
// The cost must always be >= 0.
if cost < 0 {
cost = 0
}
}
}
return cost
}
func (c *coster) computeTopKCost(topk *memo.TopKExpr, required *physical.Required) memo.Cost {
rel := topk.Relational()
outputRowCount := rel.Statistics().RowCount
inputRowCount := topk.Input.Relational().Statistics().RowCount
if !required.Ordering.Any() {
// When there is a partial ordering of the input rows' sort columns, we may
// be able to reduce the number of input rows needed to find the top K rows.
inputRowCount = topKInputLimitHint(c.mem, topk, inputRowCount, outputRowCount, float64(topk.K))
}
// Add the cost of sorting.
// Start with a cost of storing each row; TopK sort only stores K rows in a
// max heap.
cost := memo.Cost(cpuCostFactor * float64(rel.OutputCols.Len()) * outputRowCount)
// Add buffering cost for the output rows.
cost += c.rowBufferCost(outputRowCount)
// In the worst case, there are O(N*log(K)) comparisons to compare each row in
// the input to the top of the max heap and sift the max heap if each row
// compared is in the top K found so far.
cost += c.rowCmpCost(len(topk.Ordering.Columns)) * memo.Cost((1+math.Log2(math.Max(outputRowCount, 1)))*inputRowCount)
// TODO(harding): Add the CPU cost of emitting the K output rows. This should
// be done in conjunction with computeSortCost.
return cost
}
func (c *coster) computeSortCost(sort *memo.SortExpr, required *physical.Required) memo.Cost {
// We calculate the cost of a (potentially) segmented sort.
//
// In a non-segmented sort, we have a single segment to sort according to
// required.Ordering.Columns.
//
// In a segmented sort, rows are split into segments according to
// InputOrdering.Columns; each segment is sorted according to the remaining
// columns from required.Ordering.Columns.
numKeyCols := len(required.Ordering.Columns)
numPreorderedCols := len(sort.InputOrdering.Columns)
rel := sort.Relational()
stats := rel.Statistics()
numSegments := c.countSegments(sort)
// Start with a cost of storing each row; this takes the total number of
// columns into account so that a sort on fewer columns is preferred (e.g.
// sort before projecting a new column).
cost := memo.Cost(cpuCostFactor * float64(rel.OutputCols.Len()) * stats.RowCount)
if !sort.InputOrdering.Any() {
// Add the cost for finding the segments: each row is compared to the
// previous row on the preordered columns. Most of these comparisons will
// yield equality, so we don't use rowCmpCost(): we expect to have to
// compare all preordered columns.
cost += cpuCostFactor * memo.Cost(numPreorderedCols) * memo.Cost(stats.RowCount)
}
// Add the cost to sort the segments. On average, each row is involved in
// O(log(segmentSize)) comparisons.
numCmpOpsPerRow := float64(1)
if segmentSize := stats.RowCount / numSegments; segmentSize > 1 {
numCmpOpsPerRow += math.Log2(segmentSize)
// Add a cost for buffering rows that takes into account increased memory
// pressure and the possibility of spilling to disk.
cost += memo.Cost(numSegments) * c.rowBufferCost(segmentSize)
}
cost += c.rowCmpCost(numKeyCols-numPreorderedCols) * memo.Cost(numCmpOpsPerRow*stats.RowCount)
// TODO(harding): Add the CPU cost of emitting the output rows. This should be
// done in conjunction with computeTopKCost.
return cost
}
func (c *coster) computeDistributeCost(
distribute *memo.DistributeExpr, required *physical.Required,
) memo.Cost {
if distribute.NoOpDistribution() {
// If the distribution will be elided, the cost is zero.
return memo.Cost(0)
}
if c.evalCtx != nil && c.evalCtx.SessionData().EnforceHomeRegion && c.evalCtx.Planner.IsANSIDML() {
return LargeDistributeCost
}
// TODO(rytaft): Compute a real cost here. Currently we just add a tiny cost
// as a placeholder.
return cpuCostFactor
}
func (c *coster) computeScanCost(scan *memo.ScanExpr, required *physical.Required) memo.Cost {
if scan.Flags.ForceIndex && scan.Flags.Index != scan.Index || scan.Flags.ForceZigzag {
// If we are forcing an index, any other index has a very high cost. In
// practice, this will only happen when this is a primary index scan.
return hugeCost
}
isUnfiltered := scan.IsUnfiltered(c.mem.Metadata())
if scan.Flags.NoFullScan {
// Normally a full scan of a partial index would be allowed with the
// NO_FULL_SCAN hint (isUnfiltered is false for partial indexes), but if the
// user has explicitly forced the partial index *and* used NO_FULL_SCAN, we
// disallow the full index scan.
if isUnfiltered || (scan.Flags.ForceIndex && scan.IsFullIndexScan(c.mem.Metadata())) {
return hugeCost
}
}
stats := scan.Relational().Statistics()
rowCount := stats.RowCount
if isUnfiltered && c.evalCtx != nil && c.evalCtx.SessionData().DisallowFullTableScans {
isLarge := !stats.Available || rowCount > c.evalCtx.SessionData().LargeFullScanRows
if isLarge {
return hugeCost
}
}
// Add the IO cost of retrieving and the CPU cost of emitting the rows. The
// row cost depends on the size of the columns scanned.
perRowCost := c.rowScanCost(scan.Table, scan.Index, scan.Cols)
numSpans := 1
if scan.Constraint != nil {
numSpans = scan.Constraint.Spans.Count()
} else if scan.InvertedConstraint != nil {
numSpans = len(scan.InvertedConstraint)
}
baseCost := memo.Cost(numSpans * randIOCostFactor)
// If this is a virtual scan, add the cost of fetching table descriptors.
if c.mem.Metadata().Table(scan.Table).IsVirtualTable() {
baseCost += virtualScanTableDescriptorFetchCost
}
// Performing a reverse scan is more expensive than a forward scan, but it's
// still preferable to sorting the output of a forward scan. To ensure we
// choose a reverse scan over a sort, add the reverse scan cost before we
// alter the row count for unbounded scan penalties below. This cost must also
// be added before adjusting the row count for the limit hint.
if ordering.ScanIsReverse(scan, &required.Ordering) {
if rowCount > 1 {
// Need to do binary search to seek to the previous row.
perRowCost += memo.Cost(math.Log2(rowCount)) * cpuCostFactor
}
}
// Add a penalty to full table scans. All else being equal, we prefer a
// constrained scan. Adding a few rows worth of cost helps prevent surprising
// plans for very small tables.
if isUnfiltered {
rowCount += fullScanRowCountPenalty
// For tables with multiple partitions, add the cost of visiting each
// partition.
// TODO(rytaft): In the future we should take latency into account here.
index := c.mem.Metadata().Table(scan.Table).Index(scan.Index)
if partitionCount := index.PartitionCount(); partitionCount > 1 {
// Subtract 1 since we already accounted for the first partition when
// counting spans.
baseCost += memo.Cost(partitionCount-1) * randIOCostFactor
}
}
// Add a penalty if the cardinality exceeds the row count estimate. Adding a
// few rows worth of cost helps prevent surprising plans for very small tables
// or for when stats are stale.
//
// Note: we add this to the baseCost rather than the rowCount, so that the
// number of index columns does not have an outsized effect on the cost of
// the scan. See issue #68556.
baseCost += c.largeCardinalityCostPenalty(scan.Relational().Cardinality, rowCount)
if required.LimitHint != 0 {
rowCount = math.Min(rowCount, required.LimitHint)
}
cost := baseCost + memo.Cost(rowCount)*(seqIOCostFactor+perRowCost)
// If this scan is locality optimized, divide the cost by 3 in order to make
// the total cost of the two scans in the locality optimized plan less than
// the cost of the single scan in the non-locality optimized plan.
// TODO(rytaft): This is hacky. We should really be making this determination
// based on the latency between regions.
if scan.LocalityOptimized {
cost /= 3
}
return cost
}
func (c *coster) computeSelectCost(sel *memo.SelectExpr, required *physical.Required) memo.Cost {
// Typically the filter has to be evaluated on each input row.
inputRowCount := sel.Input.Relational().Statistics().RowCount
// If there is a LimitHint, n, it is expected that the filter will only be
// evaluated on the number of rows required to produce n rows.
if required.LimitHint != 0 {
selectivity := sel.Relational().Statistics().Selectivity.AsFloat()
inputRowCount = math.Min(inputRowCount, required.LimitHint/selectivity)
}
filterSetup, filterPerRow := c.computeFiltersCost(sel.Filters, intsets.Fast{})
cost := memo.Cost(inputRowCount) * filterPerRow
cost += filterSetup
return cost
}
func (c *coster) computeProjectCost(prj *memo.ProjectExpr) memo.Cost {
// Each synthesized column causes an expression to be evaluated on each row.
rowCount := prj.Relational().Statistics().RowCount
synthesizedColCount := len(prj.Projections)
cost := memo.Cost(rowCount) * memo.Cost(synthesizedColCount) * cpuCostFactor
// Add the CPU cost of emitting the rows.
cost += memo.Cost(rowCount) * cpuCostFactor
return cost
}
func (c *coster) computeInvertedFilterCost(invFilter *memo.InvertedFilterExpr) memo.Cost {
// The filter has to be evaluated on each input row.
inputRowCount := invFilter.Input.Relational().Statistics().RowCount
cost := memo.Cost(inputRowCount) * cpuCostFactor
return cost
}
func (c *coster) computeValuesCost(values *memo.ValuesExpr) memo.Cost {
return memo.Cost(values.Relational().Statistics().RowCount) * cpuCostFactor
}
func (c *coster) computeHashJoinCost(join memo.RelExpr) memo.Cost {
if join.Private().(*memo.JoinPrivate).Flags.Has(memo.DisallowHashJoinStoreRight) {
return hugeCost
}
leftRowCount := join.Child(0).(memo.RelExpr).Relational().Statistics().RowCount
rightRowCount := join.Child(1).(memo.RelExpr).Relational().Statistics().RowCount
if (join.Op() == opt.SemiJoinOp || join.Op() == opt.AntiJoinOp) && leftRowCount < rightRowCount {
// If we have a semi or an anti join, during the execbuilding we choose
// the relation with smaller cardinality to be on the right side, so we
// need to swap row counts accordingly.
// TODO(raduberinde): we might also need to look at memo.JoinFlags when
// choosing a side.
leftRowCount, rightRowCount = rightRowCount, leftRowCount
}
// A hash join must process every row from both tables once.
//
// We add some factors to account for the hashtable build and lookups. The
// right side is the one stored in the hashtable, so we use a larger factor
// for that side. This ensures that a join with the smaller right side is
// preferred to the symmetric join.
cost := memo.Cost(1.25*leftRowCount+1.75*rightRowCount) * cpuCostFactor
// Add a cost for buffering rows that takes into account increased memory
// pressure and the possibility of spilling to disk.
cost += c.rowBufferCost(rightRowCount)
// Compute filter cost. Fetch the indices of the filters that will be used in
// the join, since they will not add to the cost and should be skipped.
on := join.Child(2).(*memo.FiltersExpr)
leftCols := join.Child(0).(memo.RelExpr).Relational().OutputCols
rightCols := join.Child(1).(memo.RelExpr).Relational().OutputCols
filtersToSkip := memo.ExtractJoinConditionFilterOrds(leftCols, rightCols, *on, false /* inequality */)
filterSetup, filterPerRow := c.computeFiltersCost(*on, filtersToSkip)
cost += filterSetup
// Add the CPU cost of emitting the rows.
rowsProcessed, ok := c.mem.RowsProcessed(join)
if !ok {
// This can happen as part of testing. In this case just return the number
// of rows.
rowsProcessed = join.Relational().Statistics().RowCount
}
cost += memo.Cost(rowsProcessed) * filterPerRow
return cost
}
func (c *coster) computeMergeJoinCost(join *memo.MergeJoinExpr) memo.Cost {
if join.MergeJoinPrivate.Flags.Has(memo.DisallowMergeJoin) {
return hugeCost
}
leftRowCount := join.Left.Relational().Statistics().RowCount
rightRowCount := join.Right.Relational().Statistics().RowCount
if (join.Op() == opt.SemiJoinOp || join.Op() == opt.AntiJoinOp) && leftRowCount < rightRowCount {
// If we have a semi or an anti join, during the execbuilding we choose
// the relation with smaller cardinality to be on the right side, so we
// need to swap row counts accordingly.
// TODO(raduberinde): we might also need to look at memo.JoinFlags when
// choosing a side.
leftRowCount, rightRowCount = rightRowCount, leftRowCount
}
// The vectorized merge join in some cases buffers rows from the right side
// whereas the left side is processed in a streaming fashion. To account for
// this difference, we multiply both row counts so that a join with the
// smaller right side is preferred to the symmetric join.
cost := memo.Cost(0.9*leftRowCount+1.1*rightRowCount) * cpuCostFactor
filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.Fast{})
cost += filterSetup
// Add the CPU cost of emitting the rows.
rowsProcessed, ok := c.mem.RowsProcessed(join)
if !ok {
// We shouldn't ever get here. Since we don't allow the memo
// to be optimized twice, the coster should never be used after
// logPropsBuilder.clear() is called.
panic(errors.AssertionFailedf("could not get rows processed for merge join"))
}
cost += memo.Cost(rowsProcessed) * filterPerRow
return cost
}
func (c *coster) computeIndexJoinCost(
join *memo.IndexJoinExpr, required *physical.Required,
) memo.Cost {
return c.computeIndexLookupJoinCost(
join,
required,
true, /* lookupColsAreTableKey */
memo.TrueFilter,
join.Cols,
join.Table,
cat.PrimaryIndex,
memo.JoinFlags(0),
false, /* localityOptimized */
)
}
func (c *coster) computeLookupJoinCost(
join *memo.LookupJoinExpr, required *physical.Required,
) memo.Cost {
if join.LookupJoinPrivate.Flags.Has(memo.DisallowLookupJoinIntoRight) {
return hugeCost
}
cost := c.computeIndexLookupJoinCost(
join,
required,
join.LookupColsAreTableKey,
join.On,
join.Cols,
join.Table,
join.Index,
join.Flags,
join.LocalityOptimized,
)
if c.evalCtx != nil && c.evalCtx.SessionData().EnforceHomeRegion && c.evalCtx.Planner.IsANSIDML() {
provided := distribution.BuildLookupJoinLookupTableDistribution(c.ctx, c.evalCtx, join)
if provided.Any() || len(provided.Regions) != 1 {
cost += LargeDistributeCost
}
var localDist physical.Distribution
localDist.FromLocality(c.evalCtx.Locality)
if !localDist.Equals(provided) {
cost += LargeDistributeCost
}
}
return cost
}
func (c *coster) computeIndexLookupJoinCost(
join memo.RelExpr,
required *physical.Required,
lookupColsAreTableKey bool,
on memo.FiltersExpr,
cols opt.ColSet,
table opt.TableID,
index cat.IndexOrdinal,
flags memo.JoinFlags,
localityOptimized bool,
) memo.Cost {
input := join.Child(0).(memo.RelExpr)
lookupCount := input.Relational().Statistics().RowCount
// Take into account that the "internal" row count is higher, according to
// the selectivities of the conditions. In particular, we need to ignore
// left-over conditions that are not selective.
// For example:
// ab JOIN xy ON a=x AND x=10
// becomes (during normalization):
// ab JOIN xy ON a=x AND a=10 AND x=10
// which can become a lookup join with left-over condition x=10 which doesn't
// actually filter anything.
rowsProcessed, ok := c.mem.RowsProcessed(join)
if !ok {
// We shouldn't ever get here. Since we don't allow the memo
// to be optimized twice, the coster should never be used after
// logPropsBuilder.clear() is called.
panic(errors.AssertionFailedf("could not get rows processed for lookup join"))
}
// Lookup joins can return early if enough rows have been found. An otherwise
// expensive lookup join might have a lower cost if its limit hint estimates
// that most rows will not be needed.
if required.LimitHint != 0 && lookupCount > 0 {
outputRows := join.Relational().Statistics().RowCount
unlimitedLookupCount := lookupCount
lookupCount = lookupJoinInputLimitHint(unlimitedLookupCount, outputRows, required.LimitHint)
// We scale the number of rows processed by the same factor (we are
// calculating the average number of rows processed per lookup and
// multiplying by the new lookup count).
rowsProcessed = (rowsProcessed / unlimitedLookupCount) * lookupCount
}
perLookupCost := indexLookupJoinPerLookupCost(join)
if !lookupColsAreTableKey {
// If the lookup columns don't form a key, execution will have to limit
// KV batches which prevents running requests to multiple nodes in parallel.
// An experiment on a 4 node cluster with a table with 100k rows split into
// 100 ranges showed that a "non-parallel" lookup join is about 5 times
// slower.
// TODO(drewk): this no longer applies now that the streamer work is used.
perLookupCost += 4 * randIOCostFactor
}
if c.mem.Metadata().Table(table).IsVirtualTable() {
// It's expensive to perform a lookup join into a virtual table because
// we need to fetch the table descriptors on each lookup.
perLookupCost += virtualScanTableDescriptorFetchCost
}
cost := memo.Cost(lookupCount) * perLookupCost
filterSetup, filterPerRow := c.computeFiltersCost(on, intsets.Fast{})
cost += filterSetup
// Each lookup might retrieve many rows; add the IO cost of retrieving the
// rows (relevant when we expect many resulting rows per lookup) and the CPU
// cost of emitting the rows.
// TODO(harding): Add the cost of reading all columns in the lookup table when
// we cost rows by column size.
lookupCols := cols.Difference(input.Relational().OutputCols)
perRowCost := lookupJoinRetrieveRowCost + filterPerRow +
c.rowScanCost(table, index, lookupCols)
cost += memo.Cost(rowsProcessed) * perRowCost
if flags.Has(memo.PreferLookupJoinIntoRight) {
// If we prefer a lookup join, make the cost much smaller.
cost *= preferLookupJoinFactor
}
// If this lookup join is locality optimized, divide the cost by 2.5 in order to make
// the total cost of the two lookup joins in the locality optimized plan less than
// the cost of the single lookup join in the non-locality optimized plan.
// TODO(rytaft): This is hacky. We should really be making this determination
// based on the latency between regions.
if localityOptimized {
cost /= 2.5
}
return cost
}
func (c *coster) computeInvertedJoinCost(
join *memo.InvertedJoinExpr, required *physical.Required,
) memo.Cost {
if join.InvertedJoinPrivate.Flags.Has(memo.DisallowInvertedJoinIntoRight) {
return hugeCost
}
lookupCount := join.Input.Relational().Statistics().RowCount
// Take into account that the "internal" row count is higher, according to
// the selectivities of the conditions. In particular, we need to ignore
// the conditions that don't affect the number of rows processed.
// A contrived example, where gid is a SERIAL PK:
// nyc_census_blocks c JOIN nyc_neighborhoods n ON
// ST_Intersects(c.geom, n.geom) AND c.gid < n.gid
// which can become a lookup join with left-over condition c.gid <
// n.gid.
rowsProcessed, ok := c.mem.RowsProcessed(join)
if !ok {
// We shouldn't ever get here. Since we don't allow the memo
// to be optimized twice, the coster should never be used after
// logPropsBuilder.clear() is called.
panic(errors.AssertionFailedf("could not get rows processed for inverted join"))
}
// Lookup joins can return early if enough rows have been found. An otherwise
// expensive lookup join might have a lower cost if its limit hint estimates
// that most rows will not be needed.
if required.LimitHint != 0 && lookupCount > 0 {
outputRows := join.Relational().Statistics().RowCount
unlimitedLookupCount := lookupCount
lookupCount = lookupJoinInputLimitHint(unlimitedLookupCount, outputRows, required.LimitHint)
// We scale the number of rows processed by the same factor (we are
// calculating the average number of rows processed per lookup and
// multiplying by the new lookup count).
rowsProcessed = (rowsProcessed / unlimitedLookupCount) * lookupCount
}
// The rows in the (left) input are used to probe into the (right) table.
// Since the matching rows in the table may not all be in the same range, this
// counts as random I/O.
perLookupCost := memo.Cost(randIOCostFactor)
// Since inverted indexes can't form a key, execution will have to
// limit KV batches which prevents running requests to multiple nodes
// in parallel. An experiment on a 4 node cluster with a table with
// 100k rows split into 100 ranges showed that a "non-parallel" lookup
// join is about 5 times slower.
perLookupCost *= 5
cost := memo.Cost(lookupCount) * perLookupCost
filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.Fast{})
cost += filterSetup
// Each lookup might retrieve many rows; add the IO cost of retrieving the
// rows (relevant when we expect many resulting rows per lookup) and the CPU
// cost of emitting the rows.
lookupCols := join.Cols.Difference(join.Input.Relational().OutputCols)
perRowCost := lookupJoinRetrieveRowCost + filterPerRow +
c.rowScanCost(join.Table, join.Index, lookupCols)
cost += memo.Cost(rowsProcessed) * perRowCost
if c.evalCtx != nil && c.evalCtx.SessionData().EnforceHomeRegion && c.evalCtx.Planner.IsANSIDML() {
provided := distribution.BuildInvertedJoinLookupTableDistribution(c.ctx, c.evalCtx, join)
if provided.Any() || len(provided.Regions) != 1 {
cost += LargeDistributeCost
}
var localDist physical.Distribution
localDist.FromLocality(c.evalCtx.Locality)
if !localDist.Equals(provided) {
cost += LargeDistributeCost
}
}
return cost
}
// computeExprCost calculates per-row cost of the expression.
// It finds every embedded spatial function and add its cost.
func (c *coster) computeExprCost(expr opt.Expr) memo.Cost {
perRowCost := memo.Cost(0)
if expr.Op() == opt.FunctionOp {
// We are ok with the zero value here for functions not in the map.
function := expr.(*memo.FunctionExpr)
perRowCost += fnCost[function.Name]
}
// recurse into the children of the current expression
for i := 0; i < expr.ChildCount(); i++ {
perRowCost += c.computeExprCost(expr.Child(i))
}
return perRowCost
}
// computeFiltersCost returns the setup and per-row cost of executing
// a filter. Callers of this function should add setupCost and multiply
// perRowCost by the number of rows expected to be filtered.
//
// filtersToSkip identifies the indices of filters that should be skipped,
// because they do not add to the cost. This can happen when a condition still
// exists in the filters even though it is handled by the join.
func (c *coster) computeFiltersCost(
filters memo.FiltersExpr, filtersToSkip intsets.Fast,
) (setupCost, perRowCost memo.Cost) {
// Add a base perRowCost so that callers do not need to have their own
// base per-row cost.
perRowCost += cpuCostFactor
for i := range filters {
if filtersToSkip.Contains(i) {
continue
}
f := &filters[i]
perRowCost += c.computeExprCost(f.Condition)
// Add a constant "setup" cost per ON condition to account for the fact that
// the rowsProcessed estimate alone cannot effectively discriminate between
// plans when RowCount is too small.
setupCost += cpuCostFactor
}
return setupCost, perRowCost
}
func (c *coster) computeZigzagJoinCost(join *memo.ZigzagJoinExpr) memo.Cost {
rowCount := join.Relational().Statistics().RowCount
// Assume the upper bound on scan cost to be the sum of the cost of scanning
// the two constituent indexes. To determine which columns are returned from
// each scan, intersect the output column set join.Cols with each side's
// IndexColumns. Columns present in both indexes are projected from the left
// side only.
md := c.mem.Metadata()
leftCols := md.TableMeta(join.LeftTable).IndexColumns(join.LeftIndex)
leftCols.IntersectionWith(join.Cols)
rightCols := md.TableMeta(join.RightTable).IndexColumns(join.RightIndex)
rightCols.IntersectionWith(join.Cols)
rightCols.DifferenceWith(leftCols)
scanCost := c.rowScanCost(join.LeftTable, join.LeftIndex, leftCols)
scanCost += c.rowScanCost(join.RightTable, join.RightIndex, rightCols)
filterSetup, filterPerRow := c.computeFiltersCost(join.On, intsets.Fast{})
// It is much more expensive to do a seek in zigzag join vs. lookup join
// because zigzag join starts a new scan for every seek via
// `Fetcher.StartScan`. Instead of using `seqIOCostFactor`, bump seek costs to
// be similar to lookup join, though more fine-tuning is needed.
// TODO(msirek): Refine zigzag join costs and try out changes to execution to
// do a point lookup for a match in the other index before
// starting a new scan. Lookup join and inverted join add a
// cost of 5 * randIOCostFactor per row to account for not
// running non-key lookups in parallel. This may be applicable
// here too.
// Explore dynamically detecting selection of a bad zigzag join
// during execution and switching to merge join on-the-fly.
// Seek costs should be at least as expensive as lookup join.
// See `indexLookupJoinPerLookupCost` and `computeIndexLookupJoinCost`.
// Increased zigzag join costs mean that accurate selectivity estimation is
// needed to ensure this index access path can be picked.
seekCost := memo.Cost(randIOCostFactor + lookupJoinRetrieveRowCost)
// Double the cost of emitting rows as well as the cost of seeking rows,
// given two indexes will be accessed.
cost := memo.Cost(rowCount) * (2*(cpuCostFactor+seekCost) + scanCost + filterPerRow)
cost += filterSetup
// Add a penalty if the cardinality exceeds the row count estimate. Adding a
// few rows worth of cost helps prevent surprising plans for very small tables
// or for when stats are stale. This is also needed to ensure parity with the
// cost of scans.
//
// Note: we add this directly to the cost rather than the rowCount, so that
// the number of index columns does not have an outsized effect on the cost of
// the zigzag join. See issue #68556.
cost += c.largeCardinalityCostPenalty(join.Relational().Cardinality, rowCount)
return cost
}
// isStreamingSetOperator returns true if relation is a streaming set operator.
func isStreamingSetOperator(relation memo.RelExpr) bool {
if opt.IsSetOp(relation) {
return !relation.Private().(*memo.SetPrivate).Ordering.Any()
}
return false
}
func (c *coster) computeSetCost(set memo.RelExpr) memo.Cost {
// Add the CPU cost of emitting the rows.
outputRowCount := set.Relational().Statistics().RowCount
cost := memo.Cost(outputRowCount) * cpuCostFactor
// A set operation must process every row from both tables once. UnionAll and
// LocalityOptimizedSearch can avoid any extra computation, but all other set
// operations must perform a hash table lookup or update for each input row.
//
// The exception is if this is a streaming set operation, in which case there
// is no need to build a hash table. We can detect that this is a streaming
// operation by checking whether the ordering is defined in the set private
// (see isStreamingSetOperator).
if set.Op() != opt.UnionAllOp && set.Op() != opt.LocalityOptimizedSearchOp &&
!isStreamingSetOperator(set) {
leftRowCount := set.Child(0).(memo.RelExpr).Relational().Statistics().RowCount
rightRowCount := set.Child(1).(memo.RelExpr).Relational().Statistics().RowCount
cost += memo.Cost(leftRowCount+rightRowCount) * cpuCostFactor
// Add a cost for buffering rows that takes into account increased memory
// pressure and the possibility of spilling to disk.
switch set.Op() {
case opt.UnionOp:
// Hash Union is implemented as UnionAll followed by Hash Distinct.
cost += c.rowBufferCost(outputRowCount)
case opt.IntersectOp, opt.ExceptOp:
// Hash Intersect and Except are implemented as Hash Distinct on each
// input followed by a Hash Join that builds the hash table from the right
// input.
cost += c.rowBufferCost(leftRowCount) + 2*c.rowBufferCost(rightRowCount)
case opt.IntersectAllOp, opt.ExceptAllOp:
// Hash IntersectAll and ExceptAll are implemented as a Hash Join that
// builds the hash table from the right input.
cost += c.rowBufferCost(rightRowCount)
default:
panic(errors.AssertionFailedf("unhandled operator %s", set.Op()))
}
}
return cost
}
func (c *coster) computeGroupingCost(grouping memo.RelExpr, required *physical.Required) memo.Cost {
// Start with some extra fixed overhead, since the grouping operators have
// setup overhead that is greater than other operators like Project. This
// can matter for rules like ReplaceMaxWithLimit.
cost := memo.Cost(cpuCostFactor)
// Add the CPU cost of emitting the rows.
outputRowCount := grouping.Relational().Statistics().RowCount
cost += memo.Cost(outputRowCount) * cpuCostFactor
private := grouping.Private().(*memo.GroupingPrivate)
groupingColCount := private.GroupingCols.Len()
aggsCount := grouping.Child(1).ChildCount()
// Normally, a grouping expression must process each input row once.
inputRowCount := grouping.Child(0).(memo.RelExpr).Relational().Statistics().RowCount
// If this is a streaming GroupBy with a limit hint, l, we only need to
// process enough input rows to output l rows.
streamingType := private.GroupingOrderType(&required.Ordering)
if (streamingType != memo.NoStreaming) && grouping.Op() == opt.GroupByOp && required.LimitHint > 0 {
inputRowCount = streamingGroupByInputLimitHint(inputRowCount, outputRowCount, required.LimitHint)
outputRowCount = math.Min(outputRowCount, required.LimitHint)
}
// Cost per row depends on the number of grouping columns and the number of
// aggregates.
cost += memo.Cost(inputRowCount) * memo.Cost(aggsCount+groupingColCount) * cpuCostFactor
// Add a cost that reflects the use of a hash table - unless we are doing a
// streaming aggregation.
//
// The cost is chosen so that it's always less than the cost to sort the
// input.
if groupingColCount > 0 && streamingType != memo.Streaming {
// Add the cost to build the hash table.
cost += memo.Cost(inputRowCount) * cpuCostFactor
// Add a cost for buffering rows that takes into account increased memory
// pressure and the possibility of spilling to disk.
cost += c.rowBufferCost(outputRowCount)
}
return cost
}
func (c *coster) computeLimitCost(limit *memo.LimitExpr) memo.Cost {
// Add the CPU cost of emitting the rows.
cost := memo.Cost(limit.Relational().Statistics().RowCount) * cpuCostFactor
return cost
}
func (c *coster) computeOffsetCost(offset *memo.OffsetExpr) memo.Cost {
// Add the CPU cost of emitting the rows.
cost := memo.Cost(offset.Relational().Statistics().RowCount) * cpuCostFactor
return cost
}
func (c *coster) computeOrdinalityCost(ord *memo.OrdinalityExpr) memo.Cost {
// Add the CPU cost of emitting the rows.
cost := memo.Cost(ord.Relational().Statistics().RowCount) * cpuCostFactor
return cost
}
func (c *coster) computeProjectSetCost(projectSet *memo.ProjectSetExpr) memo.Cost {
// Add the CPU cost of emitting the rows.
cost := memo.Cost(projectSet.Relational().Statistics().RowCount) * cpuCostFactor
return cost
}
// getOrderingColStats returns the column statistic for the columns in the
// OrderingChoice oc. The OrderingChoice should be a member of expr. We include
// the Memo as an argument so that functions that call this function can be used
// both inside and outside the coster.
func getOrderingColStats(
mem *memo.Memo, expr memo.RelExpr, oc props.OrderingChoice,
) *props.ColumnStatistic {
if oc.Any() {
return nil
}
stats := expr.Relational().Statistics()
orderedCols := oc.ColSet()
orderedStats, ok := stats.ColStats.Lookup(orderedCols)
if !ok {
orderedStats, ok = mem.RequestColStat(expr, orderedCols)
if !ok {
// I don't think we can ever get here. Since we don't allow the memo
// to be optimized twice, the coster should never be used after
// logPropsBuilder.clear() is called.
panic(errors.AssertionFailedf("could not request the stats for ColSet %v", orderedCols))
}
}
return orderedStats
}
// countSegments calculates the number of segments that will be used to execute
// the sort. If no input ordering is provided, there's only one segment.
func (c *coster) countSegments(sort *memo.SortExpr) float64 {
orderedStats := getOrderingColStats(c.mem, sort, sort.InputOrdering)
if orderedStats == nil {
return 1
}
return orderedStats.DistinctCount
}
// rowCmpCost is the CPU cost to compare a pair of rows, which depends on the
// number of columns in the sort key.
func (c *coster) rowCmpCost(numKeyCols int) memo.Cost {
// Sorting involves comparisons on the key columns, but the cost isn't
// directly proportional: we only compare the second column if the rows are
// equal on the first column; and so on. We also account for a fixed
// "non-comparison" cost related to processing the
// row. The formula is:
//
// cpuCostFactor * [ 1 + Sum eqProb^(i-1) with i=1 to numKeyCols ]
//
const eqProb = 0.1
cost := cpuCostFactor
for i, c := 0, cpuCostFactor; i < numKeyCols; i, c = i+1, c*eqProb {
// c is cpuCostFactor * eqProb^i.
cost += c
}
// There is a fixed "non-comparison" cost and a comparison cost proportional
// to the key columns. Note that the cost has to be high enough so that a
// sort is almost always more expensive than a reverse scan or an index scan.
return memo.Cost(cost)
}
// rowScanCost is the CPU cost to scan one row, which depends on the average
// size of the columns in the index and the average size of the columns we are
// scanning.
func (c *coster) rowScanCost(tabID opt.TableID, idxOrd int, scannedCols opt.ColSet) memo.Cost {
md := c.mem.Metadata()
tab := md.Table(tabID)
idx := tab.Index(idxOrd)
numCols := idx.ColumnCount()
// Remove any system columns from numCols.
for i := 0; i < idx.ColumnCount(); i++ {
if idx.Column(i).Kind() == cat.System {
numCols--
}
}
// Adjust cost based on how well the current locality matches the index's
// zone constraints.
var costFactor memo.Cost = cpuCostFactor
if !tab.IsVirtualTable() && len(c.locality.Tiers) != 0 {
// If 0% of locality tiers have matching constraints, then add additional
// cost. If 100% of locality tiers have matching constraints, then add no
// additional cost. Anything in between is proportional to the number of
// matches.
adjustment := 1.0 - localityMatchScore(idx.Zone(), c.locality)
costFactor += latencyCostFactor * memo.Cost(adjustment)
}
// The number of the columns in the index matter because more columns means
// more data to scan. The number of columns we actually return also matters
// because that is the amount of data that we could potentially transfer over
// the network.
if c.evalCtx != nil && c.evalCtx.SessionData().CostScansWithDefaultColSize {
numScannedCols := scannedCols.Len()
return memo.Cost(numCols+numScannedCols) * costFactor
}
var cost memo.Cost
for i := 0; i < idx.ColumnCount(); i++ {
colID := tabID.ColumnID(idx.Column(i).Ordinal())
isScannedCol := scannedCols.Contains(colID)
isSystemCol := idx.Column(i).Kind() == cat.System
if isSystemCol && !isScannedCol {
continue
}
avgSize := c.mem.RequestColAvgSize(tabID, colID)
// Scanned columns are double-counted due to the cost of transferring data
// over the network.
var networkCostFactor memo.Cost = 1
if isScannedCol && !isSystemCol {
networkCostFactor = 2
}
// Divide the column size by the default column size (4 bytes), so that by
// default the cost of plans involving tables that use the default AvgSize
// (e.g., if the stat is not available) is the same as if
// CostScansWithDefaultColSize were true.
cost += memo.Cost(float64(avgSize)/4) * costFactor * networkCostFactor
}
return cost
}
// rowBufferCost adds a cost for buffering rows according to a ramp function:
//
// cost
// factor
//
// | spillRowCount
// spillCostFactor _| ___________ _ _ _
// | /
// | /
// | /
// 0 _| _ _ _________/______________________ row
// | count
// noSpillRowCount
//
// This function models the fact that operators that buffer rows become more
// expensive the more rows they need to buffer, since eventually they will need
// to spill to disk. The exact number of rows that cause spilling to disk varies
// depending on a number of factors that we don't model here. Therefore, we use
// a ramp function rather than a step function to account for the uncertainty
// and avoid sudden surprising plan changes due to a small change in stats.
func (c *coster) rowBufferCost(rowCount float64) memo.Cost {
if rowCount <= noSpillRowCount {
return 0
}
var fraction memo.Cost
if rowCount >= spillRowCount {
fraction = 1
} else {
fraction = memo.Cost(rowCount-noSpillRowCount) / (spillRowCount - noSpillRowCount)
}
return memo.Cost(rowCount) * spillCostFactor * fraction
}
// largeCardinalityCostPenalty returns a penalty that should be added to the
// cost of scans. It is non-zero for expressions with unbounded maximum
// cardinality or with maximum cardinality exceeding the row count estimate.
// Adding a few rows worth of cost helps prevent surprising plans for very small
// tables or for when stats are stale.
func (c *coster) largeCardinalityCostPenalty(
cardinality props.Cardinality, rowCount float64,
) memo.Cost {
if cardinality.IsUnbounded() {
return unboundedMaxCardinalityScanCostPenalty
}
if maxCard := float64(cardinality.Max); maxCard > rowCount {
penalty := maxCard - rowCount
if penalty > largeMaxCardinalityScanCostPenalty {
penalty = largeMaxCardinalityScanCostPenalty
}
return memo.Cost(penalty)
}
return 0
}
// localityMatchScore returns a number from 0.0 to 1.0 that describes how well
// the current node's locality matches the given zone constraints and
// leaseholder preferences, with 0.0 indicating 0% and 1.0 indicating 100%. This
// is the basic algorithm:
//
// t = total # of locality tiers
//
// Match each locality tier against the constraint set, and compute a value
// for each tier:
//
// 0 = key not present in constraint set or key matches prohibited
// constraint, but value doesn't match
// +1 = key matches required constraint, and value does match
// -1 = otherwise
//
// m = length of longest locality prefix that ends in a +1 value and doesn't
// contain a -1 value.
//
// Compute "m" for both the ReplicaConstraints constraints set, as well as for
// the LeasePreferences constraints set:
//
// constraint-score = m / t
// lease-pref-score = m / t
//
// if there are no lease preferences, then final-score = lease-pref-score
// else final-score = (constraint-score * 2 + lease-pref-score) / 3
//
// Here are some scoring examples:
//
// Locality = region=us,dc=east
// 0.0 = [] // No constraints to match
// 0.0 = [+region=eu,+dc=uk] // None of the tiers match
// 0.0 = [+region=eu,+dc=east] // 2nd tier matches, but 1st tier doesn't
// 0.0 = [-region=us,+dc=east] // 1st tier matches PROHIBITED constraint
// 0.0 = [-region=eu] // 1st tier PROHIBITED and non-matching
// 0.5 = [+region=us] // 1st tier matches
// 0.5 = [+region=us,-dc=east] // 1st tier matches, 2nd tier PROHIBITED
// 0.5 = [+region=us,+dc=west] // 1st tier matches, but 2nd tier doesn't
// 1.0 = [+region=us,+dc=east] // Both tiers match
// 1.0 = [+dc=east] // 2nd tier matches, no constraints for 1st
// 1.0 = [+region=us,+dc=east,+rack=1,-ssd] // Extra constraints ignored
//
// Note that constraints need not be specified in any particular order, so all
// constraints are scanned when matching each locality tier. In cases where
// there are multiple replica constraint groups (i.e. where a subset of replicas
// can have different constraints than another subset), the minimum constraint
// score among the groups is used.
//
// While matching leaseholder preferences are considered in the final score,
// leaseholder preferences are not guaranteed, so its score is weighted at half
// of the replica constraint score, in order to reflect the possibility that the
// leaseholder has moved from the preferred location.
func localityMatchScore(zone cat.Zone, locality roachpb.Locality) float64 {
// Fast path: if there are no constraints or leaseholder preferences, then
// locality can't match.
if zone.ReplicaConstraintsCount() == 0 && zone.LeasePreferenceCount() == 0 {
return 0.0
}
// matchTier matches a tier to a set of constraints and returns:
//
// 0 = key not present in constraint set or key only matches prohibited
// constraints where value doesn't match
// +1 = key matches any required constraint key + value
// -1 = otherwise
//
matchTier := func(tier roachpb.Tier, set cat.ConstraintSet) int {
foundNoMatch := false
for j, n := 0, set.ConstraintCount(); j < n; j++ {
con := set.Constraint(j)
if con.GetKey() != tier.Key {
// Ignore constraints that don't have matching key.
continue
}
if con.GetValue() == tier.Value {
if !con.IsRequired() {
// Matching prohibited constraint, so result is -1.
return -1
}
// Matching required constraint, so result is +1.
return +1
}
if con.IsRequired() {
// Remember that non-matching required constraint was found.
foundNoMatch = true
}
}
if foundNoMatch {
// At least one non-matching required constraint was found, and no
// matching constraints.
return -1
}
// Key not present in constraint set, or key only matches prohibited
// constraints where value doesn't match.
return 0
}
// matchConstraints returns the number of tiers that match the given
// constraint set ("m" in algorithm described above).
matchConstraints := func(set cat.ConstraintSet) int {
matchCount := 0
for i, tier := range locality.Tiers {
switch matchTier(tier, set) {
case +1:
matchCount = i + 1
case -1:
return matchCount
}
}
return matchCount
}
// Score any replica constraints.
var constraintScore float64
if zone.ReplicaConstraintsCount() != 0 {
// Iterate over the replica constraints and determine the minimum value
// returned by matchConstraints for any replica. For example:
//
// 3: [+region=us,+dc=east]
// 2: [+region=us]
//
// For the [region=us,dc=east] locality, the result is min(2, 1).
minCount := intsets.MaxInt
for i := 0; i < zone.ReplicaConstraintsCount(); i++ {
matchCount := matchConstraints(zone.ReplicaConstraints(i))
if matchCount < minCount {
minCount = matchCount
}
}
constraintScore = float64(minCount) / float64(len(locality.Tiers))
}
// If there are no lease preferences, then use replica constraint score.
if zone.LeasePreferenceCount() == 0 {
return constraintScore
}
// Score the first lease preference, if one is available. Ignore subsequent
// lease preferences, since they only apply in edge cases.
matchCount := matchConstraints(zone.LeasePreference(0))
leaseScore := float64(matchCount) / float64(len(locality.Tiers))
// Weight the constraintScore twice as much as the lease score.
return (constraintScore*2 + leaseScore) / 3
}
// streamingGroupByLimitHint calculates an appropriate limit hint for the input
// to a streaming GroupBy expression.
func streamingGroupByInputLimitHint(
inputRowCount, outputRowCount, outputLimitHint float64,
) float64 {
if outputRowCount == 0 {
return 0
}
// Estimate the number of input rows needed to output LimitHint rows.
inputLimitHint := outputLimitHint * inputRowCount / outputRowCount
return math.Min(inputRowCount, inputLimitHint)
}
// lookupJoinInputLimitHint calculates an appropriate limit hint for the input
// to a lookup join.
func lookupJoinInputLimitHint(inputRowCount, outputRowCount, outputLimitHint float64) float64 {
if outputRowCount == 0 {
return 0
}
// Estimate the number of lookups needed to output LimitHint rows.
expectedLookupCount := outputLimitHint * inputRowCount / outputRowCount
// Round up to the nearest multiple of a batch.
expectedLookupCount = math.Ceil(expectedLookupCount/joinReaderBatchSize) * joinReaderBatchSize
return math.Min(inputRowCount, expectedLookupCount)
}
// topKInputLimitHint calculates an appropriate limit hint for the input
// to a Top K expression when the input is partially sorted.
func topKInputLimitHint(
mem *memo.Memo, topk *memo.TopKExpr, inputRowCount, outputRowCount, K float64,
) float64 {
if outputRowCount == 0 {
return 0
}
orderedStats := getOrderingColStats(mem, topk, topk.PartialOrdering)
if orderedStats == nil {
return inputRowCount
}
// In order to find the top K rows of a partially sorted input, we estimate
// the number of rows we'll need to ingest by rounding up the nearest multiple
// of the number of rows per distinct values to K. For example, let's say we
// have 2000 input rows, 100 distinct values, and a K of 10. If we assume that
// each distinct value is found in the same number of input rows, each
// distinct value has 2000/100 = 20 rowsPerDistinctVal. Processing the rows
// for one distinct value is sufficient to find the top K 10 rows. If K were
// 50 instead, we would need to process more distinct values to find the top
// K, so we need to multiply the rowsPerDistinctVal by the minimum number of
// distinct values to process, which we can find by dividing K by the rows per
// distinct values and rounding up, or ceil(50/20) = 3. So if K is 50, we need
// to process approximately 3 * 20 = 60 rows to find the top 50 rows.
rowsPerDistinctVal := inputRowCount / orderedStats.DistinctCount
expectedRows := math.Ceil(K/rowsPerDistinctVal) * rowsPerDistinctVal
return math.Min(inputRowCount, expectedRows)
}
// indexLookupJoinPerLookupCost accounts for the cost of performing lookups for
// a single input row. It accounts for the random IOs incurred for each span
// (multiple spans mean multiple IOs). It also accounts for the extra CPU cost
// of the lookupExpr, if there is one.
func indexLookupJoinPerLookupCost(join memo.RelExpr) memo.Cost {
// The rows in the (left) input are used to probe into the (right) table.
// Since the matching rows in the table may not all be in the same range,
// this counts as random I/O.
cost := memo.Cost(randIOCostFactor)
lookupJoin, ok := join.(*memo.LookupJoinExpr)
if ok && len(lookupJoin.LookupExpr) > 0 {
numSpans := 1
var getNumSpans func(opt.ScalarExpr)
getNumSpans = func(expr opt.ScalarExpr) {
// The lookup expression will have been validated by isCanonicalFilter in
// lookupjoin/constraint_builder.go to only contain a subset of possible
// filter condition types.
switch t := expr.(type) {
case *memo.RangeExpr:
getNumSpans(t.And)
case *memo.AndExpr:
getNumSpans(t.Left)
getNumSpans(t.Right)
case *memo.InExpr:
in := t.Right.(*memo.TupleExpr)
numSpans *= len(in.Elems)
default:
// Equalities and inequalities do not change the number of spans.
}
}
if numSpans == 0 {
panic(errors.AssertionFailedf("lookup expr has contradiction"))
}
for i := range lookupJoin.LookupExpr {
getNumSpans(lookupJoin.LookupExpr[i].Condition)
}
if numSpans > 1 {
// Account for the random IO incurred by looking up the extra spans.
cost += memo.Cost(randIOCostFactor * (numSpans - 1))
}
// 1.1 is a fudge factor that pushes some plans over the edge when choosing
// between a partial index vs full index plus lookup expr in the
// regional_by_row.
// TODO(treilly): do some empirical analysis and model this better
cost += cpuCostFactor * memo.Cost(len(lookupJoin.LookupExpr)) * 1.1
}
return cost
}
| pkg/sql/opt/xform/coster.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.004528558347374201,
0.00019966153195127845,
0.00016408089140895754,
0.0001707587216515094,
0.0003275452181696892
] |
{
"id": 4,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/util/stop\"\n",
")\n",
"\n",
"// createDummyStream creates the server and client side of a FlowStream stream.\n",
"// This can be use by tests to pretend that then have received a FlowStream RPC.\n",
"// The stream can be used to send messages (ConsumerSignal's) on it (within a\n",
"// gRPC window limit since nobody's reading from the stream), for example\n",
"// Handshake messages.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// This can be use by tests to pretend that they have received a FlowStream RPC.\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9948956370353699,
0.1441493183374405,
0.00017509542522020638,
0.0003732314216904342,
0.34733644127845764
] |
{
"id": 4,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/util/stop\"\n",
")\n",
"\n",
"// createDummyStream creates the server and client side of a FlowStream stream.\n",
"// This can be use by tests to pretend that then have received a FlowStream RPC.\n",
"// The stream can be used to send messages (ConsumerSignal's) on it (within a\n",
"// gRPC window limit since nobody's reading from the stream), for example\n",
"// Handshake messages.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// This can be use by tests to pretend that they have received a FlowStream RPC.\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import "sort"
type stringSet map[string]struct{}
func (ss stringSet) add(s string) { ss[s] = struct{}{} }
func (ss stringSet) removeAll(other stringSet) {
for s := range other {
delete(ss, s)
}
}
func (ss stringSet) addAll(other stringSet) {
for s := range other {
ss.add(s)
}
}
func (ss stringSet) ordered() []string {
list := make([]string, 0, len(ss))
for s := range ss {
list = append(list, s)
}
sort.Strings(list)
return list
}
func (ss stringSet) contains(name string) bool {
_, exists := ss[name]
return exists
}
func (ss stringSet) intersection(other stringSet) stringSet {
intersection := stringSet{}
for s := range ss {
if other.contains(s) {
intersection.add(s)
}
}
return intersection
}
| pkg/sql/sem/tree/evalgen/string_set.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00019920553313568234,
0.00017517882224638015,
0.00016647014126647264,
0.00017017166828736663,
0.000011128899132017978
] |
{
"id": 4,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/util/stop\"\n",
")\n",
"\n",
"// createDummyStream creates the server and client side of a FlowStream stream.\n",
"// This can be use by tests to pretend that then have received a FlowStream RPC.\n",
"// The stream can be used to send messages (ConsumerSignal's) on it (within a\n",
"// gRPC window limit since nobody's reading from the stream), for example\n",
"// Handshake messages.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// This can be use by tests to pretend that they have received a FlowStream RPC.\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 25
} | zip
----
debug zip --concurrency=1 --cpu-profile-duration=0s /dev/null
[cluster] establishing RPC connection to ...
[cluster] retrieving the node status to get the SQL address... done
[cluster] using SQL address: ...
[cluster] creating output file /dev/null... done
[cluster] requesting data for debug/events... received response... converting to JSON... writing binary output: debug/events.json... done
[cluster] requesting data for debug/rangelog... received response... converting to JSON... writing binary output: debug/rangelog.json... done
[cluster] requesting data for debug/settings... received response... converting to JSON... writing binary output: debug/settings.json... done
[cluster] requesting data for debug/reports/problemranges... received response... converting to JSON... writing binary output: debug/reports/problemranges.json... done
[cluster] retrieving SQL data for "".crdb_internal.create_function_statements... writing output: debug/crdb_internal.create_function_statements.txt... done
[cluster] retrieving SQL data for "".crdb_internal.create_schema_statements... writing output: debug/crdb_internal.create_schema_statements.txt... done
[cluster] retrieving SQL data for "".crdb_internal.create_statements... writing output: debug/crdb_internal.create_statements.txt... done
[cluster] retrieving SQL data for "".crdb_internal.create_type_statements... writing output: debug/crdb_internal.create_type_statements.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_contention_events... writing output: debug/crdb_internal.cluster_contention_events.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_database_privileges... writing output: debug/crdb_internal.cluster_database_privileges.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_distsql_flows... writing output: debug/crdb_internal.cluster_distsql_flows.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_execution_insights... writing output: debug/crdb_internal.cluster_execution_insights.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_locks... writing output: debug/crdb_internal.cluster_locks.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_queries... writing output: debug/crdb_internal.cluster_queries.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_sessions... writing output: debug/crdb_internal.cluster_sessions.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_settings... writing output: debug/crdb_internal.cluster_settings.txt... done
[cluster] retrieving SQL data for crdb_internal.cluster_transactions... writing output: debug/crdb_internal.cluster_transactions.txt... done
[cluster] retrieving SQL data for crdb_internal.default_privileges... writing output: debug/crdb_internal.default_privileges.txt... done
[cluster] retrieving SQL data for crdb_internal.index_usage_statistics... writing output: debug/crdb_internal.index_usage_statistics.txt... done
[cluster] retrieving SQL data for crdb_internal.invalid_objects... writing output: debug/crdb_internal.invalid_objects.txt... done
[cluster] retrieving SQL data for crdb_internal.jobs... writing output: debug/crdb_internal.jobs.txt... done
[cluster] retrieving SQL data for crdb_internal.kv_node_liveness... writing output: debug/crdb_internal.kv_node_liveness.txt... done
[cluster] retrieving SQL data for crdb_internal.kv_node_status... writing output: debug/crdb_internal.kv_node_status.txt... done
[cluster] retrieving SQL data for crdb_internal.kv_store_status... writing output: debug/crdb_internal.kv_store_status.txt... done
[cluster] retrieving SQL data for crdb_internal.partitions... writing output: debug/crdb_internal.partitions.txt... done
[cluster] retrieving SQL data for crdb_internal.regions... writing output: debug/crdb_internal.regions.txt... done
[cluster] retrieving SQL data for crdb_internal.schema_changes... writing output: debug/crdb_internal.schema_changes.txt... done
[cluster] retrieving SQL data for crdb_internal.super_regions... writing output: debug/crdb_internal.super_regions.txt... done
[cluster] retrieving SQL data for crdb_internal.table_indexes... writing output: debug/crdb_internal.table_indexes.txt... done
[cluster] retrieving SQL data for crdb_internal.transaction_contention_events... writing output: debug/crdb_internal.transaction_contention_events.txt... done
[cluster] retrieving SQL data for crdb_internal.zones... writing output: debug/crdb_internal.zones.txt... done
[cluster] retrieving SQL data for system.database_role_settings... writing output: debug/system.database_role_settings.txt... done
[cluster] retrieving SQL data for system.descriptor... writing output: debug/system.descriptor.txt... done
[cluster] retrieving SQL data for system.eventlog... writing output: debug/system.eventlog.txt... done
[cluster] retrieving SQL data for system.external_connections... writing output: debug/system.external_connections.txt... done
[cluster] retrieving SQL data for system.jobs... writing output: debug/system.jobs.txt... done
[cluster] retrieving SQL data for system.lease... writing output: debug/system.lease.txt... done
[cluster] retrieving SQL data for system.locations... writing output: debug/system.locations.txt... done
[cluster] retrieving SQL data for system.migrations... writing output: debug/system.migrations.txt... done
[cluster] retrieving SQL data for system.namespace... writing output: debug/system.namespace.txt... done
[cluster] retrieving SQL data for system.privileges... writing output: debug/system.privileges.txt... done
[cluster] retrieving SQL data for system.protected_ts_meta... writing output: debug/system.protected_ts_meta.txt... done
[cluster] retrieving SQL data for system.protected_ts_records... writing output: debug/system.protected_ts_records.txt... done
[cluster] retrieving SQL data for system.rangelog... writing output: debug/system.rangelog.txt... done
[cluster] retrieving SQL data for system.replication_constraint_stats... writing output: debug/system.replication_constraint_stats.txt... done
[cluster] retrieving SQL data for system.replication_critical_localities... writing output: debug/system.replication_critical_localities.txt... done
[cluster] retrieving SQL data for system.replication_stats... writing output: debug/system.replication_stats.txt... done
[cluster] retrieving SQL data for system.reports_meta... writing output: debug/system.reports_meta.txt... done
[cluster] retrieving SQL data for system.role_id_seq... writing output: debug/system.role_id_seq.txt... done
[cluster] retrieving SQL data for system.role_members... writing output: debug/system.role_members.txt... done
[cluster] retrieving SQL data for system.role_options... writing output: debug/system.role_options.txt... done
[cluster] retrieving SQL data for system.scheduled_jobs... writing output: debug/system.scheduled_jobs.txt... done
[cluster] retrieving SQL data for system.settings... writing output: debug/system.settings.txt... done
[cluster] retrieving SQL data for system.span_configurations... writing output: debug/system.span_configurations.txt... done
[cluster] retrieving SQL data for system.sql_instances... writing output: debug/system.sql_instances.txt... done
[cluster] retrieving SQL data for system.sqlliveness... writing output: debug/system.sqlliveness.txt... done
[cluster] retrieving SQL data for system.statement_diagnostics... writing output: debug/system.statement_diagnostics.txt... done
[cluster] retrieving SQL data for system.statement_diagnostics_requests... writing output: debug/system.statement_diagnostics_requests.txt... done
[cluster] retrieving SQL data for system.table_statistics... writing output: debug/system.table_statistics.txt... done
[cluster] retrieving SQL data for system.tenant_settings... writing output: debug/system.tenant_settings.txt... done
[cluster] retrieving SQL data for system.tenant_usage... writing output: debug/system.tenant_usage.txt... done
[cluster] retrieving SQL data for system.tenants... writing output: debug/system.tenants.txt... done
[cluster] requesting nodes... received response... converting to JSON... writing binary output: debug/nodes.json... done
[cluster] requesting liveness... received response... converting to JSON... writing binary output: debug/liveness.json... done
[cluster] requesting tenant ranges... received response...
[cluster] requesting tenant ranges: last request failed: rpc error: ...
[cluster] requesting tenant ranges: creating error output: debug/tenant_ranges.err.txt... done
[node 1] node status... converting to JSON... writing binary output: debug/nodes/1/status.json... done
[node 1] using SQL connection URL: postgresql://...
[node 1] retrieving SQL data for crdb_internal.active_range_feeds... writing output: debug/nodes/1/crdb_internal.active_range_feeds.txt... done
[node 1] retrieving SQL data for crdb_internal.feature_usage... writing output: debug/nodes/1/crdb_internal.feature_usage.txt... done
[node 1] retrieving SQL data for crdb_internal.gossip_alerts... writing output: debug/nodes/1/crdb_internal.gossip_alerts.txt... done
[node 1] retrieving SQL data for crdb_internal.gossip_liveness... writing output: debug/nodes/1/crdb_internal.gossip_liveness.txt... done
[node 1] retrieving SQL data for crdb_internal.gossip_network... writing output: debug/nodes/1/crdb_internal.gossip_network.txt... done
[node 1] retrieving SQL data for crdb_internal.gossip_nodes... writing output: debug/nodes/1/crdb_internal.gossip_nodes.txt... done
[node 1] retrieving SQL data for crdb_internal.leases... writing output: debug/nodes/1/crdb_internal.leases.txt... done
[node 1] retrieving SQL data for crdb_internal.node_build_info... writing output: debug/nodes/1/crdb_internal.node_build_info.txt... done
[node 1] retrieving SQL data for crdb_internal.node_contention_events... writing output: debug/nodes/1/crdb_internal.node_contention_events.txt... done
[node 1] retrieving SQL data for crdb_internal.node_distsql_flows... writing output: debug/nodes/1/crdb_internal.node_distsql_flows.txt... done
[node 1] retrieving SQL data for crdb_internal.node_execution_insights... writing output: debug/nodes/1/crdb_internal.node_execution_insights.txt... done
[node 1] retrieving SQL data for crdb_internal.node_inflight_trace_spans... writing output: debug/nodes/1/crdb_internal.node_inflight_trace_spans.txt... done
[node 1] retrieving SQL data for crdb_internal.node_metrics... writing output: debug/nodes/1/crdb_internal.node_metrics.txt... done
[node 1] retrieving SQL data for crdb_internal.node_queries... writing output: debug/nodes/1/crdb_internal.node_queries.txt... done
[node 1] retrieving SQL data for crdb_internal.node_runtime_info... writing output: debug/nodes/1/crdb_internal.node_runtime_info.txt... done
[node 1] retrieving SQL data for crdb_internal.node_sessions... writing output: debug/nodes/1/crdb_internal.node_sessions.txt... done
[node 1] retrieving SQL data for crdb_internal.node_statement_statistics... writing output: debug/nodes/1/crdb_internal.node_statement_statistics.txt... done
[node 1] retrieving SQL data for crdb_internal.node_transaction_statistics... writing output: debug/nodes/1/crdb_internal.node_transaction_statistics.txt... done
[node 1] retrieving SQL data for crdb_internal.node_transactions... writing output: debug/nodes/1/crdb_internal.node_transactions.txt... done
[node 1] retrieving SQL data for crdb_internal.node_txn_stats... writing output: debug/nodes/1/crdb_internal.node_txn_stats.txt... done
[node 1] requesting data for debug/nodes/1/details... received response... converting to JSON... writing binary output: debug/nodes/1/details.json... done
[node 1] requesting data for debug/nodes/1/gossip... received response... converting to JSON... writing binary output: debug/nodes/1/gossip.json... done
[node 1] requesting data for debug/nodes/1/enginestats... received response... converting to JSON... writing binary output: debug/nodes/1/enginestats.json... done
[node 1] requesting stacks... received response... writing binary output: debug/nodes/1/stacks.txt... done
[node 1] requesting stacks with labels... received response... writing binary output: debug/nodes/1/stacks_with_labels.txt... done
[node 1] requesting heap profile... received response... writing binary output: debug/nodes/1/heap.pprof... done
[node 1] requesting heap file list... received response...
[node 1] requesting heap file list: last request failed: rpc error: ...
[node 1] requesting heap file list: creating error output: debug/nodes/1/heapprof.err.txt... done
[node 1] requesting goroutine dump list... received response...
[node 1] requesting goroutine dump list: last request failed: rpc error: ...
[node 1] requesting goroutine dump list: creating error output: debug/nodes/1/goroutines.err.txt... done
[node 1] requesting log file ...
[node 1] 1 log file ...
[node 1] [log file ...
[node 1] requesting ranges... received response... done
[node 1] 54 ranges found
[node 1] writing range 1... converting to JSON... writing binary output: debug/nodes/1/ranges/1.json... done
[node 1] writing range 2... converting to JSON... writing binary output: debug/nodes/1/ranges/2.json... done
[node 1] writing range 3... converting to JSON... writing binary output: debug/nodes/1/ranges/3.json... done
[node 1] writing range 4... converting to JSON... writing binary output: debug/nodes/1/ranges/4.json... done
[node 1] writing range 5... converting to JSON... writing binary output: debug/nodes/1/ranges/5.json... done
[node 1] writing range 6... converting to JSON... writing binary output: debug/nodes/1/ranges/6.json... done
[node 1] writing range 7... converting to JSON... writing binary output: debug/nodes/1/ranges/7.json... done
[node 1] writing range 8... converting to JSON... writing binary output: debug/nodes/1/ranges/8.json... done
[node 1] writing range 9... converting to JSON... writing binary output: debug/nodes/1/ranges/9.json... done
[node 1] writing range 10... converting to JSON... writing binary output: debug/nodes/1/ranges/10.json... done
[node 1] writing range 11... converting to JSON... writing binary output: debug/nodes/1/ranges/11.json... done
[node 1] writing range 12... converting to JSON... writing binary output: debug/nodes/1/ranges/12.json... done
[node 1] writing range 13... converting to JSON... writing binary output: debug/nodes/1/ranges/13.json... done
[node 1] writing range 14... converting to JSON... writing binary output: debug/nodes/1/ranges/14.json... done
[node 1] writing range 15... converting to JSON... writing binary output: debug/nodes/1/ranges/15.json... done
[node 1] writing range 16... converting to JSON... writing binary output: debug/nodes/1/ranges/16.json... done
[node 1] writing range 17... converting to JSON... writing binary output: debug/nodes/1/ranges/17.json... done
[node 1] writing range 18... converting to JSON... writing binary output: debug/nodes/1/ranges/18.json... done
[node 1] writing range 19... converting to JSON... writing binary output: debug/nodes/1/ranges/19.json... done
[node 1] writing range 20... converting to JSON... writing binary output: debug/nodes/1/ranges/20.json... done
[node 1] writing range 21... converting to JSON... writing binary output: debug/nodes/1/ranges/21.json... done
[node 1] writing range 22... converting to JSON... writing binary output: debug/nodes/1/ranges/22.json... done
[node 1] writing range 23... converting to JSON... writing binary output: debug/nodes/1/ranges/23.json... done
[node 1] writing range 24... converting to JSON... writing binary output: debug/nodes/1/ranges/24.json... done
[node 1] writing range 25... converting to JSON... writing binary output: debug/nodes/1/ranges/25.json... done
[node 1] writing range 26... converting to JSON... writing binary output: debug/nodes/1/ranges/26.json... done
[node 1] writing range 27... converting to JSON... writing binary output: debug/nodes/1/ranges/27.json... done
[node 1] writing range 28... converting to JSON... writing binary output: debug/nodes/1/ranges/28.json... done
[node 1] writing range 29... converting to JSON... writing binary output: debug/nodes/1/ranges/29.json... done
[node 1] writing range 30... converting to JSON... writing binary output: debug/nodes/1/ranges/30.json... done
[node 1] writing range 31... converting to JSON... writing binary output: debug/nodes/1/ranges/31.json... done
[node 1] writing range 32... converting to JSON... writing binary output: debug/nodes/1/ranges/32.json... done
[node 1] writing range 33... converting to JSON... writing binary output: debug/nodes/1/ranges/33.json... done
[node 1] writing range 34... converting to JSON... writing binary output: debug/nodes/1/ranges/34.json... done
[node 1] writing range 35... converting to JSON... writing binary output: debug/nodes/1/ranges/35.json... done
[node 1] writing range 36... converting to JSON... writing binary output: debug/nodes/1/ranges/36.json... done
[node 1] writing range 37... converting to JSON... writing binary output: debug/nodes/1/ranges/37.json... done
[node 1] writing range 38... converting to JSON... writing binary output: debug/nodes/1/ranges/38.json... done
[node 1] writing range 39... converting to JSON... writing binary output: debug/nodes/1/ranges/39.json... done
[node 1] writing range 40... converting to JSON... writing binary output: debug/nodes/1/ranges/40.json... done
[node 1] writing range 41... converting to JSON... writing binary output: debug/nodes/1/ranges/41.json... done
[node 1] writing range 42... converting to JSON... writing binary output: debug/nodes/1/ranges/42.json... done
[node 1] writing range 43... converting to JSON... writing binary output: debug/nodes/1/ranges/43.json... done
[node 1] writing range 44... converting to JSON... writing binary output: debug/nodes/1/ranges/44.json... done
[node 1] writing range 45... converting to JSON... writing binary output: debug/nodes/1/ranges/45.json... done
[node 1] writing range 46... converting to JSON... writing binary output: debug/nodes/1/ranges/46.json... done
[node 1] writing range 47... converting to JSON... writing binary output: debug/nodes/1/ranges/47.json... done
[node 1] writing range 48... converting to JSON... writing binary output: debug/nodes/1/ranges/48.json... done
[node 1] writing range 49... converting to JSON... writing binary output: debug/nodes/1/ranges/49.json... done
[node 1] writing range 50... converting to JSON... writing binary output: debug/nodes/1/ranges/50.json... done
[node 1] writing range 51... converting to JSON... writing binary output: debug/nodes/1/ranges/51.json... done
[node 1] writing range 52... converting to JSON... writing binary output: debug/nodes/1/ranges/52.json... done
[node 1] writing range 53... converting to JSON... writing binary output: debug/nodes/1/ranges/53.json... done
[node 1] writing range 54... converting to JSON... writing binary output: debug/nodes/1/ranges/54.json... done
[node 2] node status... converting to JSON... writing binary output: debug/nodes/2/status.json... done
[node 2] using SQL connection URL: postgresql://...
[node 2] retrieving SQL data for crdb_internal.active_range_feeds... writing output: debug/nodes/2/crdb_internal.active_range_feeds.txt...
[node 2] retrieving SQL data for crdb_internal.active_range_feeds: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.active_range_feeds: creating error output: debug/nodes/2/crdb_internal.active_range_feeds.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.feature_usage... writing output: debug/nodes/2/crdb_internal.feature_usage.txt...
[node 2] retrieving SQL data for crdb_internal.feature_usage: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.feature_usage: creating error output: debug/nodes/2/crdb_internal.feature_usage.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.gossip_alerts... writing output: debug/nodes/2/crdb_internal.gossip_alerts.txt...
[node 2] retrieving SQL data for crdb_internal.gossip_alerts: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.gossip_alerts: creating error output: debug/nodes/2/crdb_internal.gossip_alerts.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.gossip_liveness... writing output: debug/nodes/2/crdb_internal.gossip_liveness.txt...
[node 2] retrieving SQL data for crdb_internal.gossip_liveness: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.gossip_liveness: creating error output: debug/nodes/2/crdb_internal.gossip_liveness.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.gossip_network... writing output: debug/nodes/2/crdb_internal.gossip_network.txt...
[node 2] retrieving SQL data for crdb_internal.gossip_network: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.gossip_network: creating error output: debug/nodes/2/crdb_internal.gossip_network.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.gossip_nodes... writing output: debug/nodes/2/crdb_internal.gossip_nodes.txt...
[node 2] retrieving SQL data for crdb_internal.gossip_nodes: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.gossip_nodes: creating error output: debug/nodes/2/crdb_internal.gossip_nodes.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.leases... writing output: debug/nodes/2/crdb_internal.leases.txt...
[node 2] retrieving SQL data for crdb_internal.leases: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.leases: creating error output: debug/nodes/2/crdb_internal.leases.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_build_info... writing output: debug/nodes/2/crdb_internal.node_build_info.txt...
[node 2] retrieving SQL data for crdb_internal.node_build_info: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_build_info: creating error output: debug/nodes/2/crdb_internal.node_build_info.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_contention_events... writing output: debug/nodes/2/crdb_internal.node_contention_events.txt...
[node 2] retrieving SQL data for crdb_internal.node_contention_events: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_contention_events: creating error output: debug/nodes/2/crdb_internal.node_contention_events.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_distsql_flows... writing output: debug/nodes/2/crdb_internal.node_distsql_flows.txt...
[node 2] retrieving SQL data for crdb_internal.node_distsql_flows: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_distsql_flows: creating error output: debug/nodes/2/crdb_internal.node_distsql_flows.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_execution_insights... writing output: debug/nodes/2/crdb_internal.node_execution_insights.txt...
[node 2] retrieving SQL data for crdb_internal.node_execution_insights: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_execution_insights: creating error output: debug/nodes/2/crdb_internal.node_execution_insights.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_inflight_trace_spans... writing output: debug/nodes/2/crdb_internal.node_inflight_trace_spans.txt...
[node 2] retrieving SQL data for crdb_internal.node_inflight_trace_spans: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_inflight_trace_spans: creating error output: debug/nodes/2/crdb_internal.node_inflight_trace_spans.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_metrics... writing output: debug/nodes/2/crdb_internal.node_metrics.txt...
[node 2] retrieving SQL data for crdb_internal.node_metrics: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_metrics: creating error output: debug/nodes/2/crdb_internal.node_metrics.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_queries... writing output: debug/nodes/2/crdb_internal.node_queries.txt...
[node 2] retrieving SQL data for crdb_internal.node_queries: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_queries: creating error output: debug/nodes/2/crdb_internal.node_queries.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_runtime_info... writing output: debug/nodes/2/crdb_internal.node_runtime_info.txt...
[node 2] retrieving SQL data for crdb_internal.node_runtime_info: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_runtime_info: creating error output: debug/nodes/2/crdb_internal.node_runtime_info.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_sessions... writing output: debug/nodes/2/crdb_internal.node_sessions.txt...
[node 2] retrieving SQL data for crdb_internal.node_sessions: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_sessions: creating error output: debug/nodes/2/crdb_internal.node_sessions.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_statement_statistics... writing output: debug/nodes/2/crdb_internal.node_statement_statistics.txt...
[node 2] retrieving SQL data for crdb_internal.node_statement_statistics: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_statement_statistics: creating error output: debug/nodes/2/crdb_internal.node_statement_statistics.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_transaction_statistics... writing output: debug/nodes/2/crdb_internal.node_transaction_statistics.txt...
[node 2] retrieving SQL data for crdb_internal.node_transaction_statistics: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_transaction_statistics: creating error output: debug/nodes/2/crdb_internal.node_transaction_statistics.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_transactions... writing output: debug/nodes/2/crdb_internal.node_transactions.txt...
[node 2] retrieving SQL data for crdb_internal.node_transactions: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_transactions: creating error output: debug/nodes/2/crdb_internal.node_transactions.txt.err.txt... done
[node 2] retrieving SQL data for crdb_internal.node_txn_stats... writing output: debug/nodes/2/crdb_internal.node_txn_stats.txt...
[node 2] retrieving SQL data for crdb_internal.node_txn_stats: last request failed: dial tcp ...
[node 2] retrieving SQL data for crdb_internal.node_txn_stats: creating error output: debug/nodes/2/crdb_internal.node_txn_stats.txt.err.txt... done
[node 2] requesting data for debug/nodes/2/details... received response...
[node 2] requesting data for debug/nodes/2/details: last request failed: rpc error: ...
[node 2] requesting data for debug/nodes/2/details: creating error output: debug/nodes/2/details.json.err.txt... done
[node 2] requesting data for debug/nodes/2/gossip... received response...
[node 2] requesting data for debug/nodes/2/gossip: last request failed: rpc error: ...
[node 2] requesting data for debug/nodes/2/gossip: creating error output: debug/nodes/2/gossip.json.err.txt... done
[node 2] requesting data for debug/nodes/2/enginestats... received response...
[node 2] requesting data for debug/nodes/2/enginestats: last request failed: rpc error: ...
[node 2] requesting data for debug/nodes/2/enginestats: creating error output: debug/nodes/2/enginestats.json.err.txt... done
[node 2] requesting stacks... received response...
[node 2] requesting stacks: last request failed: rpc error: ...
[node 2] requesting stacks: creating error output: debug/nodes/2/stacks.txt.err.txt... done
[node 2] requesting stacks with labels... received response...
[node 2] requesting stacks with labels: last request failed: rpc error: ...
[node 2] requesting stacks with labels: creating error output: debug/nodes/2/stacks_with_labels.txt.err.txt... done
[node 2] requesting heap profile... received response...
[node 2] requesting heap profile: last request failed: rpc error: ...
[node 2] requesting heap profile: creating error output: debug/nodes/2/heap.pprof.err.txt... done
[node 2] requesting heap file list... received response...
[node 2] requesting heap file list: last request failed: rpc error: ...
[node 2] requesting heap file list: creating error output: debug/nodes/2/heapprof.err.txt... done
[node 2] requesting goroutine dump list... received response...
[node 2] requesting goroutine dump list: last request failed: rpc error: ...
[node 2] requesting goroutine dump list: creating error output: debug/nodes/2/goroutines.err.txt... done
[node 2] requesting log file ...
[node 2] requesting log file ...
[node 2] requesting log file ...
[node 2] requesting ranges... received response...
[node 2] requesting ranges: last request failed: rpc error: ...
[node 2] requesting ranges: creating error output: debug/nodes/2/ranges.err.txt... done
[node 3] node status... converting to JSON... writing binary output: debug/nodes/3/status.json... done
[node 3] using SQL connection URL: postgresql://...
[node 3] retrieving SQL data for crdb_internal.active_range_feeds... writing output: debug/nodes/3/crdb_internal.active_range_feeds.txt... done
[node 3] retrieving SQL data for crdb_internal.feature_usage... writing output: debug/nodes/3/crdb_internal.feature_usage.txt... done
[node 3] retrieving SQL data for crdb_internal.gossip_alerts... writing output: debug/nodes/3/crdb_internal.gossip_alerts.txt... done
[node 3] retrieving SQL data for crdb_internal.gossip_liveness... writing output: debug/nodes/3/crdb_internal.gossip_liveness.txt... done
[node 3] retrieving SQL data for crdb_internal.gossip_network... writing output: debug/nodes/3/crdb_internal.gossip_network.txt... done
[node 3] retrieving SQL data for crdb_internal.gossip_nodes... writing output: debug/nodes/3/crdb_internal.gossip_nodes.txt... done
[node 3] retrieving SQL data for crdb_internal.leases... writing output: debug/nodes/3/crdb_internal.leases.txt... done
[node 3] retrieving SQL data for crdb_internal.node_build_info... writing output: debug/nodes/3/crdb_internal.node_build_info.txt... done
[node 3] retrieving SQL data for crdb_internal.node_contention_events... writing output: debug/nodes/3/crdb_internal.node_contention_events.txt... done
[node 3] retrieving SQL data for crdb_internal.node_distsql_flows... writing output: debug/nodes/3/crdb_internal.node_distsql_flows.txt... done
[node 3] retrieving SQL data for crdb_internal.node_execution_insights... writing output: debug/nodes/3/crdb_internal.node_execution_insights.txt... done
[node 3] retrieving SQL data for crdb_internal.node_inflight_trace_spans... writing output: debug/nodes/3/crdb_internal.node_inflight_trace_spans.txt... done
[node 3] retrieving SQL data for crdb_internal.node_metrics... writing output: debug/nodes/3/crdb_internal.node_metrics.txt... done
[node 3] retrieving SQL data for crdb_internal.node_queries... writing output: debug/nodes/3/crdb_internal.node_queries.txt... done
[node 3] retrieving SQL data for crdb_internal.node_runtime_info... writing output: debug/nodes/3/crdb_internal.node_runtime_info.txt... done
[node 3] retrieving SQL data for crdb_internal.node_sessions... writing output: debug/nodes/3/crdb_internal.node_sessions.txt... done
[node 3] retrieving SQL data for crdb_internal.node_statement_statistics... writing output: debug/nodes/3/crdb_internal.node_statement_statistics.txt... done
[node 3] retrieving SQL data for crdb_internal.node_transaction_statistics... writing output: debug/nodes/3/crdb_internal.node_transaction_statistics.txt... done
[node 3] retrieving SQL data for crdb_internal.node_transactions... writing output: debug/nodes/3/crdb_internal.node_transactions.txt... done
[node 3] retrieving SQL data for crdb_internal.node_txn_stats... writing output: debug/nodes/3/crdb_internal.node_txn_stats.txt... done
[node 3] requesting data for debug/nodes/3/details... received response... converting to JSON... writing binary output: debug/nodes/3/details.json... done
[node 3] requesting data for debug/nodes/3/gossip... received response... converting to JSON... writing binary output: debug/nodes/3/gossip.json... done
[node 3] requesting data for debug/nodes/3/enginestats... received response... converting to JSON... writing binary output: debug/nodes/3/enginestats.json... done
[node 3] requesting stacks... received response... writing binary output: debug/nodes/3/stacks.txt... done
[node 3] requesting stacks with labels... received response... writing binary output: debug/nodes/3/stacks_with_labels.txt... done
[node 3] requesting heap profile... received response... writing binary output: debug/nodes/3/heap.pprof... done
[node 3] requesting heap file list... received response...
[node 3] requesting heap file list: last request failed: rpc error: ...
[node 3] requesting heap file list: creating error output: debug/nodes/3/heapprof.err.txt... done
[node 3] requesting goroutine dump list... received response...
[node 3] requesting goroutine dump list: last request failed: rpc error: ...
[node 3] requesting goroutine dump list: creating error output: debug/nodes/3/goroutines.err.txt... done
[node 3] requesting log file ...
[node 3] 1 log file ...
[node 3] [log file ...
[node 3] requesting ranges... received response... done
[node 3] 54 ranges found
[node 3] writing range 1... converting to JSON... writing binary output: debug/nodes/3/ranges/1.json... done
[node 3] writing range 2... converting to JSON... writing binary output: debug/nodes/3/ranges/2.json... done
[node 3] writing range 3... converting to JSON... writing binary output: debug/nodes/3/ranges/3.json... done
[node 3] writing range 4... converting to JSON... writing binary output: debug/nodes/3/ranges/4.json... done
[node 3] writing range 5... converting to JSON... writing binary output: debug/nodes/3/ranges/5.json... done
[node 3] writing range 6... converting to JSON... writing binary output: debug/nodes/3/ranges/6.json... done
[node 3] writing range 7... converting to JSON... writing binary output: debug/nodes/3/ranges/7.json... done
[node 3] writing range 8... converting to JSON... writing binary output: debug/nodes/3/ranges/8.json... done
[node 3] writing range 9... converting to JSON... writing binary output: debug/nodes/3/ranges/9.json... done
[node 3] writing range 10... converting to JSON... writing binary output: debug/nodes/3/ranges/10.json... done
[node 3] writing range 11... converting to JSON... writing binary output: debug/nodes/3/ranges/11.json... done
[node 3] writing range 12... converting to JSON... writing binary output: debug/nodes/3/ranges/12.json... done
[node 3] writing range 13... converting to JSON... writing binary output: debug/nodes/3/ranges/13.json... done
[node 3] writing range 14... converting to JSON... writing binary output: debug/nodes/3/ranges/14.json... done
[node 3] writing range 15... converting to JSON... writing binary output: debug/nodes/3/ranges/15.json... done
[node 3] writing range 16... converting to JSON... writing binary output: debug/nodes/3/ranges/16.json... done
[node 3] writing range 17... converting to JSON... writing binary output: debug/nodes/3/ranges/17.json... done
[node 3] writing range 18... converting to JSON... writing binary output: debug/nodes/3/ranges/18.json... done
[node 3] writing range 19... converting to JSON... writing binary output: debug/nodes/3/ranges/19.json... done
[node 3] writing range 20... converting to JSON... writing binary output: debug/nodes/3/ranges/20.json... done
[node 3] writing range 21... converting to JSON... writing binary output: debug/nodes/3/ranges/21.json... done
[node 3] writing range 22... converting to JSON... writing binary output: debug/nodes/3/ranges/22.json... done
[node 3] writing range 23... converting to JSON... writing binary output: debug/nodes/3/ranges/23.json... done
[node 3] writing range 24... converting to JSON... writing binary output: debug/nodes/3/ranges/24.json... done
[node 3] writing range 25... converting to JSON... writing binary output: debug/nodes/3/ranges/25.json... done
[node 3] writing range 26... converting to JSON... writing binary output: debug/nodes/3/ranges/26.json... done
[node 3] writing range 27... converting to JSON... writing binary output: debug/nodes/3/ranges/27.json... done
[node 3] writing range 28... converting to JSON... writing binary output: debug/nodes/3/ranges/28.json... done
[node 3] writing range 29... converting to JSON... writing binary output: debug/nodes/3/ranges/29.json... done
[node 3] writing range 30... converting to JSON... writing binary output: debug/nodes/3/ranges/30.json... done
[node 3] writing range 31... converting to JSON... writing binary output: debug/nodes/3/ranges/31.json... done
[node 3] writing range 32... converting to JSON... writing binary output: debug/nodes/3/ranges/32.json... done
[node 3] writing range 33... converting to JSON... writing binary output: debug/nodes/3/ranges/33.json... done
[node 3] writing range 34... converting to JSON... writing binary output: debug/nodes/3/ranges/34.json... done
[node 3] writing range 35... converting to JSON... writing binary output: debug/nodes/3/ranges/35.json... done
[node 3] writing range 36... converting to JSON... writing binary output: debug/nodes/3/ranges/36.json... done
[node 3] writing range 37... converting to JSON... writing binary output: debug/nodes/3/ranges/37.json... done
[node 3] writing range 38... converting to JSON... writing binary output: debug/nodes/3/ranges/38.json... done
[node 3] writing range 39... converting to JSON... writing binary output: debug/nodes/3/ranges/39.json... done
[node 3] writing range 40... converting to JSON... writing binary output: debug/nodes/3/ranges/40.json... done
[node 3] writing range 41... converting to JSON... writing binary output: debug/nodes/3/ranges/41.json... done
[node 3] writing range 42... converting to JSON... writing binary output: debug/nodes/3/ranges/42.json... done
[node 3] writing range 43... converting to JSON... writing binary output: debug/nodes/3/ranges/43.json... done
[node 3] writing range 44... converting to JSON... writing binary output: debug/nodes/3/ranges/44.json... done
[node 3] writing range 45... converting to JSON... writing binary output: debug/nodes/3/ranges/45.json... done
[node 3] writing range 46... converting to JSON... writing binary output: debug/nodes/3/ranges/46.json... done
[node 3] writing range 47... converting to JSON... writing binary output: debug/nodes/3/ranges/47.json... done
[node 3] writing range 48... converting to JSON... writing binary output: debug/nodes/3/ranges/48.json... done
[node 3] writing range 49... converting to JSON... writing binary output: debug/nodes/3/ranges/49.json... done
[node 3] writing range 50... converting to JSON... writing binary output: debug/nodes/3/ranges/50.json... done
[node 3] writing range 51... converting to JSON... writing binary output: debug/nodes/3/ranges/51.json... done
[node 3] writing range 52... converting to JSON... writing binary output: debug/nodes/3/ranges/52.json... done
[node 3] writing range 53... converting to JSON... writing binary output: debug/nodes/3/ranges/53.json... done
[node 3] writing range 54... converting to JSON... writing binary output: debug/nodes/3/ranges/54.json... done
[cluster] pprof summary script... writing binary output: debug/pprof-summary.sh... done
[cluster] hot range summary script... writing binary output: debug/hot-ranges.sh... done
[cluster] tenant hot range summary script... writing binary output: debug/hot-ranges-tenant.sh... done
| pkg/cli/testdata/zip/partial1 | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00021403402206487954,
0.00016940051864366978,
0.00016348871577065438,
0.0001682148576946929,
0.000007954395186970942
] |
{
"id": 4,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/util/stop\"\n",
")\n",
"\n",
"// createDummyStream creates the server and client side of a FlowStream stream.\n",
"// This can be use by tests to pretend that then have received a FlowStream RPC.\n",
"// The stream can be used to send messages (ConsumerSignal's) on it (within a\n",
"// gRPC window limit since nobody's reading from the stream), for example\n",
"// Handshake messages.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// This can be use by tests to pretend that they have received a FlowStream RPC.\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package slprovider exposes an implementation of the sqlliveness.Provider
// interface.
package slprovider
import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slinstance"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slstorage"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// New constructs a new Provider.
//
// sessionEvents, if not nil, gets notified of some session state transitions.
func New(
ambientCtx log.AmbientContext,
stopper *stop.Stopper,
clock *hlc.Clock,
db *kv.DB,
codec keys.SQLCodec,
settings *cluster.Settings,
testingKnobs *sqlliveness.TestingKnobs,
sessionEvents slinstance.SessionEventListener,
) sqlliveness.Provider {
storage := slstorage.NewStorage(ambientCtx, stopper, clock, db, codec, settings)
instance := slinstance.NewSQLInstance(stopper, clock, storage, settings, testingKnobs, sessionEvents)
return &provider{
Storage: storage,
Instance: instance,
}
}
type provider struct {
*slstorage.Storage
*slinstance.Instance
}
var _ sqlliveness.Provider = &provider{}
func (p *provider) Start(ctx context.Context, regionPhysicalRep []byte) {
p.Storage.Start(ctx)
p.Instance.Start(ctx, regionPhysicalRep)
}
func (p *provider) Metrics() metric.Struct {
return p.Storage.Metrics()
}
| pkg/sql/sqlliveness/slprovider/slprovider.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0002857382351066917,
0.00020070192113053054,
0.00016508936823811382,
0.00017595215467736125,
0.00004085379987373017
] |
{
"id": 5,
"code_window": [
"// Handshake messages.\n",
"//\n",
"// We do this by creating a mock server, dialing into it and capturing the\n",
"// server stream. The server-side RPC call will be blocked until the caller\n",
"// calls the returned cleanup function.\n",
"func createDummyStream() (\n",
"\tserverStream execinfrapb.DistSQL_FlowStreamServer,\n",
"\tclientStream execinfrapb.DistSQL_FlowStreamClient,\n",
"\tcleanup func(),\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// calls the returned cleanup function. The cleanup function also \"drains\" the\n",
"// client-side stream.\n",
"func createDummyStream(\n",
"\tt *testing.T,\n",
") (\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 32
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9733074307441711,
0.31883567571640015,
0.00017426611157134175,
0.02203325182199478,
0.4213305115699768
] |
{
"id": 5,
"code_window": [
"// Handshake messages.\n",
"//\n",
"// We do this by creating a mock server, dialing into it and capturing the\n",
"// server stream. The server-side RPC call will be blocked until the caller\n",
"// calls the returned cleanup function.\n",
"func createDummyStream() (\n",
"\tserverStream execinfrapb.DistSQL_FlowStreamServer,\n",
"\tclientStream execinfrapb.DistSQL_FlowStreamClient,\n",
"\tcleanup func(),\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// calls the returned cleanup function. The cleanup function also \"drains\" the\n",
"// client-side stream.\n",
"func createDummyStream(\n",
"\tt *testing.T,\n",
") (\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 32
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package scpb
// GetTransientEquivalent returns the equivalent TRANSIENT_ Status for a given
// status. If false is returned, there is no equivalent for the provided
// Status.
func GetTransientEquivalent(s Status) (Status, bool) {
equiv, ok := transientEquivalent[s]
return equiv, ok
}
var transientEquivalent = map[Status]Status{
Status_DELETE_ONLY: Status_TRANSIENT_DELETE_ONLY,
Status_WRITE_ONLY: Status_TRANSIENT_WRITE_ONLY,
Status_ABSENT: Status_TRANSIENT_ABSENT,
Status_PUBLIC: Status_TRANSIENT_PUBLIC,
Status_BACKFILL_ONLY: Status_TRANSIENT_BACKFILL_ONLY,
Status_BACKFILLED: Status_TRANSIENT_BACKFILLED,
Status_MERGE_ONLY: Status_TRANSIENT_MERGE_ONLY,
Status_MERGED: Status_TRANSIENT_MERGED,
Status_VALIDATED: Status_TRANSIENT_VALIDATED,
Status_DROPPED: Status_TRANSIENT_DROPPED,
}
| pkg/sql/schemachanger/scpb/transient.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0003173653094563633,
0.00024047339684329927,
0.00016893506108317524,
0.00023779658658895642,
0.00006859130371594802
] |
{
"id": 5,
"code_window": [
"// Handshake messages.\n",
"//\n",
"// We do this by creating a mock server, dialing into it and capturing the\n",
"// server stream. The server-side RPC call will be blocked until the caller\n",
"// calls the returned cleanup function.\n",
"func createDummyStream() (\n",
"\tserverStream execinfrapb.DistSQL_FlowStreamServer,\n",
"\tclientStream execinfrapb.DistSQL_FlowStreamClient,\n",
"\tcleanup func(),\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// calls the returned cleanup function. The cleanup function also \"drains\" the\n",
"// client-side stream.\n",
"func createDummyStream(\n",
"\tt *testing.T,\n",
") (\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 32
} | load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data")
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "whoownsit_lib",
srcs = ["whoownsit.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/cmd/whoownsit",
visibility = ["//visibility:private"],
deps = [
"//pkg/internal/codeowners",
"//pkg/internal/reporoot",
],
)
go_binary(
name = "whoownsit",
embed = [":whoownsit_lib"],
visibility = ["//visibility:public"],
)
get_x_data(name = "get_x_data")
| pkg/cmd/whoownsit/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0001716468104859814,
0.00017066141299437732,
0.00017005795962177217,
0.0001702794397715479,
7.026302455415134e-7
] |
{
"id": 5,
"code_window": [
"// Handshake messages.\n",
"//\n",
"// We do this by creating a mock server, dialing into it and capturing the\n",
"// server stream. The server-side RPC call will be blocked until the caller\n",
"// calls the returned cleanup function.\n",
"func createDummyStream() (\n",
"\tserverStream execinfrapb.DistSQL_FlowStreamServer,\n",
"\tclientStream execinfrapb.DistSQL_FlowStreamClient,\n",
"\tcleanup func(),\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// calls the returned cleanup function. The cleanup function also \"drains\" the\n",
"// client-side stream.\n",
"func createDummyStream(\n",
"\tt *testing.T,\n",
") (\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 32
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package schemaexpr
import (
"context"
"strconv"
"strings"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/transform"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sem/volatility"
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/errors"
)
// ValidateComputedColumnExpression verifies that an expression is a valid
// computed column expression. It returns the serialized expression and its type
// if valid, and an error otherwise. The returned type is only useful if d has
// type Any which indicates the expression's type is unknown and does not have
// to match a specific type.
//
// A computed column expression is valid if all of the following are true:
//
// - It does not have a default value.
// - It does not reference other computed columns.
//
// TODO(mgartner): Add unit tests for Validate.
func ValidateComputedColumnExpression(
ctx context.Context,
desc catalog.TableDescriptor,
d *tree.ColumnTableDef,
tn *tree.TableName,
context string,
semaCtx *tree.SemaContext,
) (serializedExpr string, _ *types.T, _ error) {
if d.HasDefaultExpr() {
return "", nil, pgerror.Newf(
pgcode.InvalidTableDefinition,
"%s cannot have default values",
context,
)
}
var depColIDs catalog.TableColSet
// First, check that no column in the expression is an inaccessible or
// computed column.
err := iterColDescriptors(desc, d.Computed.Expr, func(c catalog.Column) error {
if c.IsInaccessible() {
return pgerror.Newf(
pgcode.UndefinedColumn,
"column %q is inaccessible and cannot be referenced in a computed column expression",
c.GetName(),
)
}
if c.IsComputed() {
return pgerror.Newf(
pgcode.InvalidTableDefinition,
"%s expression cannot reference computed columns",
context,
)
}
depColIDs.Add(c.GetID())
return nil
})
if err != nil {
return "", nil, err
}
// Resolve the type of the computed column expression.
defType, err := tree.ResolveType(ctx, d.Type, semaCtx.GetTypeResolver())
if err != nil {
return "", nil, err
}
// Check that the type of the expression is of type defType and that there
// are no variable expressions (besides dummyColumnItems) and no impure
// functions. In order to safely serialize user defined types and their
// members, we need to serialize the typed expression here.
expr, typ, _, err := DequalifyAndValidateExpr(
ctx,
desc,
d.Computed.Expr,
defType,
context,
semaCtx,
volatility.Immutable,
tn,
)
if err != nil {
return "", nil, err
}
// Virtual computed columns must not refer to mutation columns because it
// would not be safe in the case that the mutation column was being
// backfilled and the virtual computed column value needed to be computed
// for the purpose of writing to a secondary index.
if d.IsVirtual() {
var mutationColumnNames []string
var err error
depColIDs.ForEach(func(colID descpb.ColumnID) {
if err != nil {
return
}
var col catalog.Column
if col, err = desc.FindColumnWithID(colID); err != nil {
err = errors.WithAssertionFailure(err)
return
}
if !col.Public() {
mutationColumnNames = append(mutationColumnNames,
strconv.Quote(col.GetName()))
}
})
if err != nil {
return "", nil, err
}
if len(mutationColumnNames) > 0 {
if context == "index element" {
return "", nil, unimplemented.Newf(
"index element expression referencing mutation columns",
"index element expression referencing columns (%s) added in the current transaction",
strings.Join(mutationColumnNames, ", "))
}
return "", nil, unimplemented.Newf(
"virtual computed columns referencing mutation columns",
"virtual computed column %q referencing columns (%s) added in the "+
"current transaction", d.Name, strings.Join(mutationColumnNames, ", "))
}
}
return expr, typ, nil
}
// ValidateColumnHasNoDependents verifies that the input column has no dependent
// computed columns. It returns an error if any existing or ADD mutation
// computed columns reference the given column.
// TODO(mgartner): Add unit tests.
func ValidateColumnHasNoDependents(desc catalog.TableDescriptor, col catalog.Column) error {
for _, c := range desc.NonDropColumns() {
if !c.IsComputed() {
continue
}
expr, err := parser.ParseExpr(c.GetComputeExpr())
if err != nil {
// At this point, we should be able to parse the computed expression.
return errors.WithAssertionFailure(err)
}
err = iterColDescriptors(desc, expr, func(colVar catalog.Column) error {
if colVar.GetID() == col.GetID() {
return sqlerrors.NewColumnReferencedByComputedColumnError(col.GetName(), c.GetName())
}
return nil
})
if err != nil {
return err
}
}
return nil
}
// MakeComputedExprs returns a slice of the computed expressions for the
// slice of input column descriptors, or nil if none of the input column
// descriptors have computed expressions. The caller provides the set of
// sourceColumns to which the expr may refer.
//
// The length of the result slice matches the length of the input column
// descriptors. For every column that has no computed expression, a NULL
// expression is reported.
//
// Note that the order of input is critical. Expressions cannot reference
// columns that come after them in input.
func MakeComputedExprs(
ctx context.Context,
input, sourceColumns []catalog.Column,
tableDesc catalog.TableDescriptor,
tn *tree.TableName,
evalCtx *eval.Context,
semaCtx *tree.SemaContext,
) (_ []tree.TypedExpr, refColIDs catalog.TableColSet, _ error) {
// Check to see if any of the columns have computed expressions. If there
// are none, we don't bother with constructing the map as the expressions
// are all NULL.
haveComputed := false
for i := range input {
if input[i].IsComputed() {
haveComputed = true
break
}
}
if !haveComputed {
return nil, catalog.TableColSet{}, nil
}
// Build the computed expressions map from the parsed statement.
computedExprs := make([]tree.TypedExpr, 0, len(input))
exprStrings := make([]string, 0, len(input))
for _, col := range input {
if col.IsComputed() {
exprStrings = append(exprStrings, col.GetComputeExpr())
}
}
exprs, err := parser.ParseExprs(exprStrings)
if err != nil {
return nil, catalog.TableColSet{}, err
}
nr := newNameResolver(evalCtx, tableDesc.GetID(), tn, sourceColumns)
nr.addIVarContainerToSemaCtx(semaCtx)
var txCtx transform.ExprTransformContext
compExprIdx := 0
for _, col := range input {
if !col.IsComputed() {
computedExprs = append(computedExprs, tree.DNull)
nr.addColumn(col)
continue
}
// Collect all column IDs that are referenced in the partial index
// predicate expression.
colIDs, err := ExtractColumnIDs(tableDesc, exprs[compExprIdx])
if err != nil {
return nil, refColIDs, err
}
refColIDs.UnionWith(colIDs)
expr, err := nr.resolveNames(exprs[compExprIdx])
if err != nil {
return nil, catalog.TableColSet{}, err
}
typedExpr, err := tree.TypeCheck(ctx, expr, semaCtx, col.GetType())
if err != nil {
return nil, catalog.TableColSet{}, err
}
if typedExpr, err = txCtx.NormalizeExpr(ctx, evalCtx, typedExpr); err != nil {
return nil, catalog.TableColSet{}, err
}
computedExprs = append(computedExprs, typedExpr)
compExprIdx++
nr.addColumn(col)
}
return computedExprs, refColIDs, nil
}
| pkg/sql/catalog/schemaexpr/computed_column.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0006263030809350312,
0.00023148121545091271,
0.00016654682985972613,
0.0001753466931404546,
0.00013147658319212496
] |
{
"id": 6,
"code_window": [
"\tserverStream execinfrapb.DistSQL_FlowStreamServer,\n",
"\tclientStream execinfrapb.DistSQL_FlowStreamClient,\n",
"\tcleanup func(),\n",
"\terr error,\n",
") {\n",
"\tstopper := stop.NewStopper()\n",
"\tctx := context.Background()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 37
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9985670447349548,
0.5490066409111023,
0.00017707677034195513,
0.8481491208076477,
0.47748300433158875
] |
{
"id": 6,
"code_window": [
"\tserverStream execinfrapb.DistSQL_FlowStreamServer,\n",
"\tclientStream execinfrapb.DistSQL_FlowStreamClient,\n",
"\tcleanup func(),\n",
"\terr error,\n",
") {\n",
"\tstopper := stop.NewStopper()\n",
"\tctx := context.Background()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 37
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package streamingtest
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/stretchr/testify/require"
)
// EncodeKV encodes primary key with the specified "values". Values must be
// specified in the same order as the columns in the primary family.
func EncodeKV(
t *testing.T, codec keys.SQLCodec, descr catalog.TableDescriptor, pkeyVals ...interface{},
) roachpb.KeyValue {
require.Equal(t, 1, descr.NumFamilies(), "there can be only one")
primary := descr.GetPrimaryIndex()
require.LessOrEqual(t, primary.NumKeyColumns(), len(pkeyVals))
var datums tree.Datums
var colMap catalog.TableColMap
for i, val := range pkeyVals {
datums = append(datums, nativeToDatum(t, val))
col, err := descr.FindColumnWithID(descpb.ColumnID(i + 1))
require.NoError(t, err)
colMap.Set(col.GetID(), col.Ordinal())
}
const includeEmpty = true
indexEntries, err := rowenc.EncodePrimaryIndex(codec, descr, primary,
colMap, datums, includeEmpty)
require.NoError(t, err)
require.Equal(t, 1, len(indexEntries))
indexEntries[0].Value.InitChecksum(indexEntries[0].Key)
return roachpb.KeyValue{Key: indexEntries[0].Key, Value: indexEntries[0].Value}
}
func nativeToDatum(t *testing.T, native interface{}) tree.Datum {
t.Helper()
switch v := native.(type) {
case bool:
return tree.MakeDBool(tree.DBool(v))
case int:
return tree.NewDInt(tree.DInt(v))
case string:
return tree.NewDString(v)
case nil:
return tree.DNull
case tree.Datum:
return v
default:
t.Fatalf("unexpected value type %T", v)
return nil
}
}
| pkg/ccl/streamingccl/streamingtest/encoding.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.000176616755197756,
0.00017314557044301182,
0.00016555596084799618,
0.00017505150754004717,
0.000004129182798351394
] |
{
"id": 6,
"code_window": [
"\tserverStream execinfrapb.DistSQL_FlowStreamServer,\n",
"\tclientStream execinfrapb.DistSQL_FlowStreamClient,\n",
"\tcleanup func(),\n",
"\terr error,\n",
") {\n",
"\tstopper := stop.NewStopper()\n",
"\tctx := context.Background()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 37
} | # LogicTest: local
statement ok
CREATE TABLE kv (
k INT PRIMARY KEY,
v INT
)
# Use implicit target columns (which can use blind KV Put).
query T
EXPLAIN (VERBOSE) UPSERT INTO kv TABLE kv ORDER BY v DESC LIMIT 2
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: kv(k, v)
│ auto commit
│
└── • project
│ columns: (k, v, v)
│
└── • top-k
│ columns: (k, v)
│ ordering: -v
│ estimated row count: 2 (missing stats)
│ order: -v
│ k: 2
│
└── • scan
columns: (k, v)
estimated row count: 1,000 (missing stats)
table: kv@kv_pkey
spans: FULL SCAN
# Use explicit target columns (which can use blind KV Put).
query T
EXPLAIN (VERBOSE) UPSERT INTO kv (k, v) TABLE kv ORDER BY v DESC LIMIT 2
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: kv(k, v)
│ auto commit
│
└── • project
│ columns: (k, v, v)
│
└── • top-k
│ columns: (k, v)
│ ordering: -v
│ estimated row count: 2 (missing stats)
│ order: -v
│ k: 2
│
└── • scan
columns: (k, v)
estimated row count: 1,000 (missing stats)
table: kv@kv_pkey
spans: FULL SCAN
# Add RETURNING clause (should still use blind KV Put).
query T
EXPLAIN (VERBOSE) UPSERT INTO kv (k, v) TABLE kv ORDER BY v DESC LIMIT 2 RETURNING *
----
distribution: local
vectorized: true
·
• upsert
│ columns: (k, v)
│ estimated row count: 2 (missing stats)
│ into: kv(k, v)
│ auto commit
│
└── • project
│ columns: (k, v, v)
│
└── • top-k
│ columns: (k, v)
│ ordering: -v
│ estimated row count: 2 (missing stats)
│ order: -v
│ k: 2
│
└── • scan
columns: (k, v)
estimated row count: 1,000 (missing stats)
table: kv@kv_pkey
spans: FULL SCAN
# Use subset of explicit target columns (which cannot use blind KV Put).
query T
EXPLAIN (VERBOSE) UPSERT INTO kv (k) SELECT k FROM kv ORDER BY v DESC LIMIT 2
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: kv(k, v)
│ auto commit
│ arbiter indexes: kv_pkey
│
└── • lookup join (inner)
│ columns: (k, v_default, k)
│ estimated row count: 2 (missing stats)
│ table: kv@kv_pkey
│ equality: (k) = (k)
│ equality cols are key
│
└── • distinct
│ columns: (v_default, k)
│ estimated row count: 2 (missing stats)
│ distinct on: k
│ nulls are distinct
│ error on duplicate
│
└── • render
│ columns: (v_default, k)
│ render v_default: CAST(NULL AS INT8)
│ render k: k
│
└── • top-k
│ columns: (k, v)
│ ordering: -v
│ estimated row count: 2 (missing stats)
│ order: -v
│ k: 2
│
└── • scan
columns: (k, v)
estimated row count: 1,000 (missing stats)
table: kv@kv_pkey
spans: FULL SCAN
# Use Upsert with indexed table, default columns, computed columns, and check
# columns.
statement ok
CREATE TABLE indexed (
a INT PRIMARY KEY,
b INT,
c INT DEFAULT(10),
d INT AS (a + c) STORED,
FAMILY (a, b, c, d),
UNIQUE INDEX secondary (d, b),
CHECK (c > 0)
)
# Should fetch existing values since there is a secondary index.
query T
EXPLAIN (VERBOSE) UPSERT INTO indexed VALUES (1)
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: indexed(a, b, c, d)
│ auto commit
│ arbiter indexes: indexed_pkey
│
└── • project
│ columns: (column1, b_default, c_default, d_comp, a, b, c, d, b_default, c_default, d_comp, a, check1)
│
└── • render
│ columns: (check1, column1, b_default, c_default, d_comp, a, b, c, d)
│ render check1: c_default > 0
│ render column1: column1
│ render b_default: b_default
│ render c_default: c_default
│ render d_comp: d_comp
│ render a: a
│ render b: b
│ render c: c
│ render d: d
│
└── • cross join (left outer)
│ columns: (column1, b_default, c_default, d_comp, a, b, c, d)
│ estimated row count: 1 (missing stats)
│
├── • values
│ columns: (column1, b_default, c_default, d_comp)
│ size: 4 columns, 1 row
│ row 0, expr 0: 1
│ row 0, expr 1: CAST(NULL AS INT8)
│ row 0, expr 2: 10
│ row 0, expr 3: 11
│
└── • scan
columns: (a, b, c, d)
estimated row count: 1 (missing stats)
table: indexed@indexed_pkey
spans: /1/0
locking strength: for update
query T
EXPLAIN (VERBOSE) UPSERT INTO indexed VALUES (1), (2), (3), (4)
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: indexed(a, b, c, d)
│ auto commit
│ arbiter indexes: indexed_pkey
│
└── • project
│ columns: (column1, b_default, c_default, d_comp, a, b, c, d, b_default, c_default, d_comp, a, check1)
│
└── • render
│ columns: (check1, column1, b_default, c_default, d_comp, a, b, c, d)
│ render check1: c_default > 0
│ render column1: column1
│ render b_default: b_default
│ render c_default: c_default
│ render d_comp: d_comp
│ render a: a
│ render b: b
│ render c: c
│ render d: d
│
└── • lookup join (left outer)
│ columns: (d_comp, b_default, c_default, column1, a, b, c, d)
│ estimated row count: 4 (missing stats)
│ table: indexed@indexed_pkey
│ equality: (column1) = (a)
│ equality cols are key
│ locking strength: for update
│
└── • render
│ columns: (d_comp, b_default, c_default, column1)
│ render d_comp: column1 + 10
│ render b_default: CAST(NULL AS INT8)
│ render c_default: 10
│ render column1: column1
│
└── • values
columns: (column1)
size: 1 column, 4 rows
row 0, expr 0: 1
row 1, expr 0: 2
row 2, expr 0: 3
row 3, expr 0: 4
query T
EXPLAIN (VERBOSE)
INSERT INTO indexed
VALUES (1, 2, 3)
ON CONFLICT (a)
DO UPDATE SET b = 2, c = 3
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: indexed(a, b, c, d)
│ auto commit
│ arbiter indexes: indexed_pkey
│
└── • project
│ columns: (column1, column2, column3, d_comp, a, b, c, d, upsert_b, upsert_c, upsert_d, a, check1)
│
└── • render
│ columns: (check1, column1, column2, column3, d_comp, a, b, c, d, upsert_b, upsert_c, upsert_d)
│ render check1: upsert_c > 0
│ render column1: column1
│ render column2: column2
│ render column3: column3
│ render d_comp: d_comp
│ render a: a
│ render b: b
│ render c: c
│ render d: d
│ render upsert_b: upsert_b
│ render upsert_c: upsert_c
│ render upsert_d: upsert_d
│
└── • render
│ columns: (upsert_b, upsert_c, upsert_d, column1, column2, column3, d_comp, a, b, c, d)
│ render upsert_b: CASE WHEN a IS NULL THEN column2 ELSE 2 END
│ render upsert_c: CASE WHEN a IS NULL THEN column3 ELSE 3 END
│ render upsert_d: CASE WHEN a IS NULL THEN d_comp ELSE a + 3 END
│ render column1: column1
│ render column2: column2
│ render column3: column3
│ render d_comp: d_comp
│ render a: a
│ render b: b
│ render c: c
│ render d: d
│
└── • cross join (left outer)
│ columns: (column1, column2, column3, d_comp, a, b, c, d)
│ estimated row count: 1 (missing stats)
│
├── • values
│ columns: (column1, column2, column3, d_comp)
│ size: 4 columns, 1 row
│ row 0, expr 0: 1
│ row 0, expr 1: 2
│ row 0, expr 2: 3
│ row 0, expr 3: 4
│
└── • scan
columns: (a, b, c, d)
estimated row count: 1 (missing stats)
table: indexed@indexed_pkey
spans: /1/0
locking strength: for update
# Drop index and verify that existing values no longer need to be fetched.
statement ok
DROP INDEX indexed@secondary CASCADE
query T
EXPLAIN (VERBOSE) UPSERT INTO indexed VALUES (1) RETURNING *
----
distribution: local
vectorized: true
·
• upsert
│ columns: (a, b, c, d)
│ estimated row count: 1
│ into: indexed(a, b, c, d)
│ auto commit
│
└── • project
│ columns: (column1, b_default, c_default, d_comp, b_default, c_default, d_comp, check1)
│
└── • values
columns: (column1, b_default, c_default, d_comp, check1)
size: 5 columns, 1 row
row 0, expr 0: 1
row 0, expr 1: CAST(NULL AS INT8)
row 0, expr 2: 10
row 0, expr 3: 11
row 0, expr 4: true
subtest regression_32473
statement ok
CREATE TABLE customers (
customer_id serial PRIMARY KEY,
name VARCHAR UNIQUE,
email VARCHAR NOT NULL
);
statement ok
INSERT INTO customers (name, email) VALUES ('bob', '[email protected]') ON CONFLICT (name)
DO UPDATE SET (name, email) = (
SELECT 'bob', '[email protected]'
)
query TT
SELECT name, email FROM customers
----
bob [email protected]
# This statement only works with the optimizer enabled.
statement ok
INSERT INTO customers (name, email) VALUES ('bob', '[email protected]') ON CONFLICT (name)
DO UPDATE SET (name, email) = (
SELECT 'bob2', '[email protected]'
)
query TT
SELECT name, email FROM customers
----
bob2 [email protected]
statement ok
DROP TABLE customers
# The CBO behaves differently than the HP and PG in this case. It only checks
# constraints if an insert or update actually occurs. In this case, the DO
# NOTHING clause skips the update, so there is no need to check the constraint.
statement ok
CREATE TABLE t5 (k INT PRIMARY KEY, a INT, b int CHECK (a > b))
statement ok
INSERT INTO t5 VALUES (1, 10, 9) ON CONFLICT (k) DO NOTHING
statement ok
INSERT INTO t5 VALUES (1, 10, 20) ON CONFLICT (k) DO NOTHING
# Regression test for #35564: make sure we use the Upsert's input required
# ordering for the internal projection.
statement ok
CREATE TABLE abc (a INT, b INT, c INT, INDEX(c) STORING(a,b))
statement ok
CREATE TABLE xyz (x INT, y INT, z INT)
query T
EXPLAIN (VERBOSE) SELECT * FROM [UPSERT INTO xyz SELECT a, b, c FROM abc RETURNING z] ORDER BY z
----
distribution: local
vectorized: true
·
• root
│ columns: (z)
│
├── • sort
│ │ columns: (z)
│ │ ordering: +z
│ │ estimated row count: 1,000 (missing stats)
│ │ order: +z
│ │
│ └── • scan buffer
│ columns: (z)
│ estimated row count: 1,000 (missing stats)
│ label: buffer 1
│
└── • subquery
│ id: @S1
│ original sql: UPSERT INTO xyz SELECT a, b, c FROM abc RETURNING z
│ exec mode: all rows
│
└── • buffer
│ columns: (z)
│ label: buffer 1
│
└── • project
│ columns: (z)
│
└── • upsert
│ columns: (z, rowid)
│ estimated row count: 1,000 (missing stats)
│ into: xyz(x, y, z, rowid)
│
└── • project
│ columns: (a, b, c, rowid_default, a, b, c)
│
└── • render
│ columns: (rowid_default, a, b, c)
│ render rowid_default: unique_rowid()
│ render a: a
│ render b: b
│ render c: c
│
└── • scan
columns: (a, b, c)
estimated row count: 1,000 (missing stats)
table: abc@abc_pkey
spans: FULL SCAN
# ------------------------------------------------------------------------------
# Regression for #35364. This tests behavior that is different between the CBO
# and the HP. The CBO will (deliberately) round any input columns *before*
# evaluating any computed columns, as well as rounding the output.
# ------------------------------------------------------------------------------
statement ok
CREATE TABLE t35364(
x DECIMAL(10,0) CHECK(round(x) = x) PRIMARY KEY,
y DECIMAL(10,0) DEFAULT (1.5),
z DECIMAL(10,0) AS (x+y+2.5) STORED CHECK(z >= 7)
)
query TTT
UPSERT INTO t35364 (x) VALUES (1.5) RETURNING *
----
2 2 7
query TTT
UPSERT INTO t35364 (x, y) VALUES (1.5, 2.5) RETURNING *
----
2 3 8
query TTT
INSERT INTO t35364 (x) VALUES (1.5) ON CONFLICT (x) DO UPDATE SET x=2.5 RETURNING *
----
3 3 9
statement error pq: failed to satisfy CHECK constraint \(z >= 7:::DECIMAL\)
UPSERT INTO t35364 (x) VALUES (0)
# ------------------------------------------------------------------------------
# Regression for #38627. Combined with the equivalent logic test, make sure that
# UPSERT in the presence of column mutations uses a lookup join without a
# problem.
# ------------------------------------------------------------------------------
statement ok
CREATE TABLE table38627 (a INT PRIMARY KEY, b INT, FAMILY (a, b)); INSERT INTO table38627 VALUES(1,1)
statement ok
BEGIN; ALTER TABLE table38627 ADD COLUMN c INT NOT NULL DEFAULT 5
query T
EXPLAIN (VERBOSE) UPSERT INTO table38627 SELECT * FROM table38627 WHERE a=1
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: table38627(a, b)
│ arbiter indexes: table38627_pkey
│
└── • project
│ columns: (a, b, a, b, c, b, a)
│
└── • lookup join (inner)
│ columns: (a, b, a, b, c)
│ estimated row count: 1 (missing stats)
│ table: table38627@table38627_pkey
│ equality: (a) = (a)
│ equality cols are key
│
└── • scan
columns: (a, b)
estimated row count: 1 (missing stats)
table: table38627@table38627_pkey
spans: /1/0
statement ok
COMMIT
# ------------------------------------------------------------------------------
# Show UPSERT plans with Distinct execution operator.
# ------------------------------------------------------------------------------
statement ok
CREATE TABLE tdup (x INT PRIMARY KEY, y INT, z INT, UNIQUE (y, z))
# Show unsorted upsert-distinct-on. Plan should not contain "order key".
# Ensure this test stays synchronized to the test in logic_test/upsert.
query T
EXPLAIN (VERBOSE)
INSERT INTO tdup VALUES (2, 2, 2), (3, 2, 2) ON CONFLICT (z, y) DO UPDATE SET z=1
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: tdup(x, y, z)
│ auto commit
│ arbiter indexes: tdup_y_z_key
│
└── • project
│ columns: (column1, column2, column3, x, y, z, upsert_z, x)
│
└── • render
│ columns: (upsert_z, column1, column2, column3, x, y, z)
│ render upsert_z: CASE WHEN x IS NULL THEN column3 ELSE 1 END
│ render column1: column1
│ render column2: column2
│ render column3: column3
│ render x: x
│ render y: y
│ render z: z
│
└── • lookup join (left outer)
│ columns: (column1, column2, column3, x, y, z)
│ estimated row count: 2 (missing stats)
│ table: tdup@tdup_y_z_key
│ equality: (column2, column3) = (y,z)
│ equality cols are key
│
└── • distinct
│ columns: (column1, column2, column3)
│ estimated row count: 2
│ distinct on: column2, column3
│ nulls are distinct
│ error on duplicate
│
└── • values
columns: (column1, column2, column3)
size: 3 columns, 2 rows
row 0, expr 0: 2
row 0, expr 1: 2
row 0, expr 2: 2
row 1, expr 0: 3
row 1, expr 1: 2
row 1, expr 2: 2
statement ok
CREATE TABLE target (a INT PRIMARY KEY, b INT, c INT, UNIQUE (b, c))
statement ok
CREATE TABLE source (x INT PRIMARY KEY, y INT, z INT, INDEX (y, z))
# Show sorted upsert-distinct-on. "order key = y, z" should be set below.
# Ensure this test stays synchronized to the test in logic_test/upsert.
query T
EXPLAIN (VERBOSE)
INSERT INTO target SELECT x, y, z FROM source WHERE (y IS NULL OR y > 0) AND x <> 1
ON CONFLICT (b, c) DO UPDATE SET b=5
----
distribution: local
vectorized: true
·
• upsert
│ columns: ()
│ estimated row count: 0 (missing stats)
│ into: target(a, b, c)
│ auto commit
│ arbiter indexes: target_b_c_key
│
└── • project
│ columns: (x, y, z, a, b, c, upsert_b, a)
│
└── • render
│ columns: (upsert_b, x, y, z, a, b, c)
│ render upsert_b: CASE WHEN a IS NULL THEN y ELSE 5 END
│ render x: x
│ render y: y
│ render z: z
│ render a: a
│ render b: b
│ render c: c
│
└── • merge join (right outer)
│ columns: (a, b, c, x, y, z)
│ estimated row count: 311 (missing stats)
│ equality: (b, c) = (y, z)
│ merge ordering: +"(b=y)",+"(c=z)"
│
├── • scan
│ columns: (a, b, c)
│ ordering: +b,+c
│ estimated row count: 1,000 (missing stats)
│ table: target@target_b_c_key
│ spans: FULL SCAN
│
└── • distinct
│ columns: (x, y, z)
│ ordering: +y,+z
│ estimated row count: 311 (missing stats)
│ distinct on: y, z
│ nulls are distinct
│ error on duplicate
│ order key: y, z
│
└── • filter
│ columns: (x, y, z)
│ ordering: +y,+z
│ estimated row count: 311 (missing stats)
│ filter: x != 1
│
└── • scan
columns: (x, y, z)
ordering: +y,+z
estimated row count: 333 (missing stats)
table: source@source_y_z_idx
spans: /NULL-/!NULL /1-
# Regression test for #25726.
# UPSERT over tables with column families, on the fast path, use the
# INSERT logic. This has special casing for column families of 1
# column, and another special casing for column families of 2+
# columns. The special casing is only for families that do not include
# the primary key. So we need a table with 3 families: 1 for the PK, 1
# with just 1 col, and 1 with 2+ cols.
statement ok
CREATE TABLE tu (a INT PRIMARY KEY, b INT, c INT, d INT, FAMILY (a), FAMILY (b), FAMILY (c,d));
INSERT INTO tu VALUES (1, 2, 3, 4)
# Force the leasing of the `tu` descriptor, otherwise the trace below is
# polluted.
statement ok
SELECT 1 FROM tu;
statement ok
SET tracing = on,kv,results; UPSERT INTO tu VALUES (1, NULL, NULL, NULL); SET tracing = off
query T
SELECT message FROM [SHOW KV TRACE FOR SESSION]
WHERE operation != 'dist sender send'
----
Put /Table/117/1/1/0 -> /TUPLE/
Del /Table/117/1/1/1/1
Del /Table/117/1/1/2/1
fast path completed
rows affected: 1
# KV operations.
statement ok
CREATE DATABASE t; CREATE TABLE t.kv(k INT PRIMARY KEY, v INT, FAMILY "primary" (k, v))
statement ok
CREATE UNIQUE INDEX woo ON t.kv(v)
statement ok
SET tracing = on,kv,results; UPSERT INTO t.kv(k, v) VALUES (2,3); SET tracing = off
query TT
SELECT operation, message FROM [SHOW KV TRACE FOR SESSION]
WHERE operation != 'dist sender send' AND operation != 'kv.DistSender: sending partial batch'
----
colbatchscan Scan /Table/120/1/2/0
count CPut /Table/120/1/2/0 -> /TUPLE/2:2:Int/3
count InitPut /Table/120/2/3/0 -> /BYTES/0x8a
count fast path completed
sql query rows affected: 1
statement ok
SET tracing = on,kv,results; UPSERT INTO t.kv(k, v) VALUES (1,2); SET tracing = off
query TT
SELECT operation, message FROM [SHOW KV TRACE FOR SESSION]
WHERE operation != 'dist sender send' AND operation != 'kv.DistSender: sending partial batch'
----
colbatchscan Scan /Table/120/1/1/0
count CPut /Table/120/1/1/0 -> /TUPLE/2:2:Int/2
count InitPut /Table/120/2/2/0 -> /BYTES/0x89
count fast path completed
sql query rows affected: 1
statement error duplicate key value
SET tracing = on,kv,results; UPSERT INTO t.kv(k, v) VALUES (2,2); SET tracing = off
query TT
set tracing=off;
SELECT operation, message FROM [SHOW KV TRACE FOR SESSION]
WHERE operation != 'dist sender send' AND operation != 'kv.DistSender: sending partial batch'
----
colbatchscan Scan /Table/120/1/2/0
colbatchscan fetched: /kv/kv_pkey/2/v -> /3
count Put /Table/120/1/2/0 -> /TUPLE/2:2:Int/2
count Del /Table/120/2/3/0
count CPut /Table/120/2/2/0 -> /BYTES/0x8a (expecting does not exist)
sql query execution failed after 0 rows: duplicate key value violates unique constraint "woo"
| pkg/sql/opt/exec/execbuilder/testdata/upsert | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0002252771082567051,
0.00017199874855577946,
0.0001624822907615453,
0.00017135395319201052,
0.000007906897735665552
] |
{
"id": 6,
"code_window": [
"\tserverStream execinfrapb.DistSQL_FlowStreamServer,\n",
"\tclientStream execinfrapb.DistSQL_FlowStreamClient,\n",
"\tcleanup func(),\n",
"\terr error,\n",
") {\n",
"\tstopper := stop.NewStopper()\n",
"\tctx := context.Background()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 37
} | # The MVCC merge operator is non-transactional and stores data in the
# intent, so that subsequent reads diregard the MVCC values stored on
# the same key.
#
# This is a very unique feature and is currently only used in CockroachDB's
# built-in timeseries database.
run ok
with t=A
txn_begin ts=11
put k=a v=abc resolve
txn_remove
----
>> at end:
data: "a"/11.000000000,0 -> /BYTES/abc
# Merge appends data in the intent and ignores the regular k/v pairs.
run trace ok
merge k=a v=def ts=22
merge k=a v=ghi ts=22
----
>> merge k=a v=def ts=22
meta: "a"/0,0 -> txn={<nil>} ts=0,0 del=false klen=0 vlen=0 raw=/BYTES/def mergeTs=<nil> txnDidNotUpdateMeta=false
data: "a"/11.000000000,0 -> /BYTES/abc
>> merge k=a v=ghi ts=22
meta: "a"/0,0 -> txn={<nil>} ts=0,0 del=false klen=0 vlen=0 raw=/BYTES/defghi mergeTs=<nil> txnDidNotUpdateMeta=false
data: "a"/11.000000000,0 -> /BYTES/abc
# After a merge, only the data in the intent is every used.
run ok
with t=A
txn_begin ts=33
get k=a
----
get: "a" -> /BYTES/defghi @0,0
>> at end:
txn: "A" meta={id=00000000 key=/Min pri=0.00000000 epo=0 ts=33.000000000,0 min=0,0 seq=0} lock=true stat=PENDING rts=33.000000000,0 wto=false gul=0,0
| pkg/storage/testdata/mvcc_histories/merges | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017351155111100525,
0.00017182083684019744,
0.00017035624478012323,
0.00017170778301078826,
0.0000011215767017347389
] |
{
"id": 7,
"code_window": [
"\tctx := context.Background()\n",
"\tclock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)\n",
"\tstorageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\n",
"\trpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 44
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9981603026390076,
0.2856783866882324,
0.0001621158153284341,
0.00017821912479121238,
0.44999822974205017
] |
{
"id": 7,
"code_window": [
"\tctx := context.Background()\n",
"\tclock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)\n",
"\tstorageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\n",
"\trpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 44
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package timeutil
// FullTimeFormat is the time format used to display any unknown timestamp
// type, and always shows the full time zone offset.
const FullTimeFormat = "2006-01-02 15:04:05.999999-07:00:00"
// TimestampWithTZFormat is the time format used to display
// timestamps with a time zone offset. The minutes and seconds
// offsets are only added if they are non-zero.
const TimestampWithTZFormat = "2006-01-02 15:04:05.999999-07"
// TimestampWithoutTZFormat is the time format used to display
// timestamps without a time zone offset. The minutes and seconds
// offsets are only added if they are non-zero.
const TimestampWithoutTZFormat = "2006-01-02 15:04:05.999999"
// TimeWithTZFormat is the time format used to display a time
// with a time zone offset.
const TimeWithTZFormat = "15:04:05.999999-07"
// TimeWithoutTZFormat is the time format used to display a time
// without a time zone offset.
const TimeWithoutTZFormat = "15:04:05.999999"
// DateFormat is the time format used to display a date.
const DateFormat = "2006-01-02"
| pkg/util/timeutil/timeutil.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00020725095237139612,
0.0001816702279029414,
0.0001639493857510388,
0.00017774029402062297,
0.00001583237281010952
] |
{
"id": 7,
"code_window": [
"\tctx := context.Background()\n",
"\tclock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)\n",
"\tstorageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\n",
"\trpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 44
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package bulk
import (
"time"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/metric"
)
// Metrics contains pointers to the metrics for
// monitoring bulk operations.
type Metrics struct {
MaxBytesHist *metric.Histogram
CurBytesCount *metric.Gauge
}
// MetricStruct implements the metrics.Struct interface.
func (Metrics) MetricStruct() {}
var _ metric.Struct = Metrics{}
var (
metaMemMaxBytes = metric.Metadata{
Name: "sql.mem.bulk.max",
Help: "Memory usage per sql statement for bulk operations",
Measurement: "Memory",
Unit: metric.Unit_BYTES,
}
metaMemCurBytes = metric.Metadata{
Name: "sql.mem.bulk.current",
Help: "Current sql statement memory usage for bulk operations",
Measurement: "Memory",
Unit: metric.Unit_BYTES,
}
)
// MakeBulkMetrics instantiates the metrics holder for bulk operation monitoring.
func MakeBulkMetrics(histogramWindow time.Duration) Metrics {
return Metrics{
MaxBytesHist: metric.NewHistogram(metaMemMaxBytes, histogramWindow, metric.MemoryUsage64MBBuckets),
CurBytesCount: metric.NewGauge(metaMemCurBytes),
}
}
type sz int64
func (b sz) String() string { return string(humanizeutil.IBytes(int64(b))) }
func (b sz) SafeValue() {}
type timing time.Duration
func (t timing) String() string { return time.Duration(t).Round(time.Second).String() }
func (t timing) SafeValue() {}
| pkg/kv/bulk/bulk_metrics.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0001787460205378011,
0.00017023437249008566,
0.0001591839682077989,
0.0001722040178719908,
0.000007567438842670526
] |
{
"id": 7,
"code_window": [
"\tctx := context.Background()\n",
"\tclock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)\n",
"\tstorageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\n",
"\trpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 44
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import React from "react";
import { Anchor, Tooltip, Text } from "src/components";
import { nodeLivenessIssues, howItWork, capacityMetrics } from "src/util/docs";
import { LivenessStatus } from "src/redux/nodes";
import { NodeStatusRow } from "src/views/cluster/containers/nodesOverview/index";
import { AggregatedNodeStatus } from ".";
import { TooltipProps } from "src/components/tooltip/tooltip";
export const getStatusDescription = (status: LivenessStatus) => {
switch (status) {
case LivenessStatus.NODE_STATUS_LIVE:
return (
<div className="tooltip__table--title">
<p>
{"This node is online and updating its "}
<Anchor href={nodeLivenessIssues} target="_blank">
liveness record
</Anchor>
.
</p>
</div>
);
case LivenessStatus.NODE_STATUS_UNKNOWN:
case LivenessStatus.NODE_STATUS_UNAVAILABLE:
return (
<div className="tooltip__table--title">
<p>
{"This node has an "}
<Anchor href={nodeLivenessIssues} target="_blank">
unavailable liveness
</Anchor>
{" status."}
</p>
</div>
);
case LivenessStatus.NODE_STATUS_DEAD:
return (
<div className="tooltip__table--title">
<p>
{"This node has not updated its "}
<Anchor href={nodeLivenessIssues} target="_blank">
liveness record
</Anchor>
{" for 5 minutes. CockroachDB "}
<Anchor href={howItWork} target="_blank">
automatically rebalances replicas
</Anchor>
{" from dead nodes to live nodes."}
</p>
</div>
);
case LivenessStatus.NODE_STATUS_DECOMMISSIONING:
return (
<div className="tooltip__table--title">
<p>
{"This node is in the "}
<Anchor href={howItWork} target="_blank">
process of decommissioning
</Anchor>
{
" , and may need time to transfer its data to other nodes. When finished, the node will appear below in the list of decommissioned nodes."
}
</p>
</div>
);
default:
return (
"This node has not recently reported as being live. " +
"It may not be functioning correctly, but no automatic action has yet been taken."
);
}
};
export const getNodeStatusDescription = (status: AggregatedNodeStatus) => {
switch (status) {
case AggregatedNodeStatus.LIVE:
return (
<div className="tooltip__table--title">
<p>All nodes in this locality are live.</p>
</div>
);
case AggregatedNodeStatus.WARNING:
return (
<div className="tooltip__table--title">
<p>
This locality has 1 or more <code>SUSPECT</code> or{" "}
<code>DECOMMISSIONING</code> nodes.
</p>
</div>
);
case AggregatedNodeStatus.DEAD:
return (
<div className="tooltip__table--title">
<p>
This locality has 1 or more <code>DEAD</code> nodes.
</p>
</div>
);
default:
return "This node is decommissioned and has been permanently removed from this cluster.";
}
};
type PlainTooltip = React.FC<TooltipProps>;
export const NodeCountTooltip: PlainTooltip = props => (
<Tooltip
{...props}
placement="bottom"
title={
<div className="tooltip__table--title">
<p>Number of nodes in the locality.</p>
</div>
}
>
<span className={"column-title"}>{props.children}</span>
</Tooltip>
);
export const UptimeTooltip: PlainTooltip = props => (
<Tooltip
{...props}
placement="bottom"
title={
<div className="tooltip__table--title">
<p>Amount of time the node has been running.</p>
</div>
}
>
<span className={"column-title"}>{props.children}</span>
</Tooltip>
);
export const ReplicasTooltip: PlainTooltip = props => (
<Tooltip
{...props}
placement="bottom"
title={
<div className="tooltip__table--title">
<p>Number of replicas on the node or in the locality.</p>
</div>
}
>
<span className={"column-title"}>{props.children}</span>
</Tooltip>
);
export const NodelistCapacityUsageTooltip: PlainTooltip = props => (
<Tooltip
{...props}
placement="bottom"
title={
<div className="tooltip__table--title">
<p>
Percentage of usable disk space occupied by CockroachDB data at the
locality or node.
</p>
<p>
<Anchor href={capacityMetrics} target="_blank">
How is this metric calculated?
</Anchor>
</p>
</div>
}
>
<span className={"column-title"}>{props.children}</span>
</Tooltip>
);
export const MemoryUseTooltip: PlainTooltip = props => (
<Tooltip
{...props}
placement="bottom"
title={
<div className="tooltip__table--title">
<p>
Percentage of total memory at the locality or node in use by
CockroachDB.
</p>
</div>
}
>
<span className={"column-title"}>{props.children}</span>
</Tooltip>
);
export const CPUsTooltip: PlainTooltip = props => (
<Tooltip
{...props}
placement="bottom"
title={
<div className="tooltip__table--title">
<p>Number of vCPUs on the machine.</p>
</div>
}
>
<span className={"column-title"}>{props.children}</span>
</Tooltip>
);
export const VersionTooltip: PlainTooltip = props => (
<Tooltip
{...props}
placement="bottom"
title={
<div className="tooltip__table--title">
<p>Build tag of the CockroachDB version installed on the node.</p>
</div>
}
>
<span className={"column-title"}>{props.children}</span>
</Tooltip>
);
export const StatusTooltip: PlainTooltip = props => (
<Tooltip
{...props}
placement="bottom"
title={
<div className="tooltip__table--title">
<p>
Node status can be live, suspect, dead, decommissioning, or
decommissioned. Hover over the status for each node to learn more.
</p>
</div>
}
>
<span className={"column-title"}>{props.children}</span>
</Tooltip>
);
export const plainNodeTooltips: PlainTooltip[] = [
NodeCountTooltip,
UptimeTooltip,
ReplicasTooltip,
NodelistCapacityUsageTooltip,
MemoryUseTooltip,
CPUsTooltip,
VersionTooltip,
StatusTooltip,
];
export const NodeLocalityColumn: React.FC<{
record: NodeStatusRow;
visible?: boolean;
}> = ({ record: { tiers, region }, ...props }) => {
return (
<Text>
<Tooltip
{...props}
placement={"bottom"}
title={
<div>
{tiers.map((tier, idx) => (
<div key={idx}>{`${tier.key} = ${tier.value}`}</div>
))}
</div>
}
>
{region}
</Tooltip>
</Text>
);
};
| pkg/ui/workspaces/db-console/src/views/cluster/containers/nodesOverview/tooltips.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017920609388966113,
0.00017272743571083993,
0.0001641496055526659,
0.0001724237808957696,
0.0000025776653274078853
] |
{
"id": 8,
"code_window": [
"\n",
"\trpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)\n",
"\tconn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),\n",
"\t\trpc.DefaultClass).Connect(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\tclient := execinfrapb.NewDistSQLClient(conn)\n",
"\tclientStream, err = client.FlowStream(ctx)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 51
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9979923963546753,
0.4276203513145447,
0.00015985385107342154,
0.004252910614013672,
0.49145960807800293
] |
{
"id": 8,
"code_window": [
"\n",
"\trpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)\n",
"\tconn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),\n",
"\t\trpc.DefaultClass).Connect(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\tclient := execinfrapb.NewDistSQLClient(conn)\n",
"\tclientStream, err = client.FlowStream(ctx)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 51
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package uncertainty
import (
"time"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
)
// ComputeInterval returns the provided request's uncertainty interval to be
// used when evaluating under the specified lease.
//
// If the function returns an empty Interval{} then the request should bypass
// all uncertainty checks.
//
// The computation uses observed timestamps gathered from the leaseholder's node
// to limit the interval's local uncertainty limit. This prevents unnecessary
// uncertainty restarts caused by reading a value written at a timestamp between
// txn.ReadTimestamp and txn.GlobalUncertaintyLimit.
//
// There is another case that impacts the use of the transactions observed
// timestamp.
//
// If both these conditions hold:
// - A transaction already has an observed timestamp value for a node
// - That node was not the leaseholder for some or all of the range as of the
// time of the observed timestamp, but it is now.
//
// Then the transaction's observed timestamp is not (entirely) respected when
// computing a local uncertainty limit.
//
// As background, for efficiency reasons, observed timestamp tracking is done at
// a node level, but more precisely it refers to the ranges that node is the
// leaseholder for. This discrepancy is accounted for by the
// minValidObservedTimestamp parameter, and two specific cases are covered in
// more detail below.
//
// Here is the hazard that can occur without this field for a lease transfer.
//
// 1. put(k on leaseholder n1), gateway chooses t=1.0
// 2. begin; read(unrelated key on n2); gateway chooses t=0.98
// 3. pick up observed timestamp for n2 of t=0.99
// 4. n1 transfers lease for range with k to n2 @ t=1.1
// 5. read(k) on leaseholder n2 at ReadTimestamp=0.98 should get
// ReadWithinUncertaintyInterval because of the write in step 1, so
// even though we observed n2's timestamp in step 3 we must expand
// the uncertainty interval to the lease's start time, which is
// guaranteed to be greater than any write which occurred under
// the previous leaseholder.
//
// A similar hazard applies to range merges.
//
// 1. put(k2 on n2, r2); gateway chooses t=1.0
// 2. begin; read(k on n1, r1); gateway chooses t=0.98
// 3. pick up observed timestamp for n1 of t=0.99
// 4. r1 merged right-hand neighbor r2 @ t=1.1
// 5. read(k2) on joint range at ReadTimestamp=0.98 should get
// ReadWithinUncertaintyInterval because of the write in step 1, so
// even though we observed n1's timestamp in step 3 we must expand
// the uncertainty interval to the range merge freeze time, which
// is guaranteed to be greater than any write which occurred on the
// right-hand side.
func ComputeInterval(
h *roachpb.Header, status kvserverpb.LeaseStatus, maxOffset time.Duration,
) Interval {
if h.Txn != nil {
return computeIntervalForTxn(h.Txn, status)
}
return computeIntervalForNonTxn(h, status, maxOffset)
}
func computeIntervalForTxn(txn *roachpb.Transaction, status kvserverpb.LeaseStatus) Interval {
in := Interval{
// The transaction's global uncertainty limit is computed by its coordinator
// when the transaction is initiated. It stays constant across all requests
// issued by the transaction and across all retries.
GlobalLimit: txn.GlobalUncertaintyLimit,
}
if status.State != kvserverpb.LeaseState_VALID {
// If the lease is invalid, this must be a follower read. In such cases, we
// must use the most pessimistic uncertainty limit.
return in
}
// For calls that read data within a txn, we keep track of timestamps observed
// from the various participating nodes' HLC clocks. If we have a timestamp on
// file for the leaseholder's node which is smaller than GlobalLimit, we can
// lower LocalLimit accordingly. If GlobalLimit drops below ReadTimestamp, we
// effectively can't see uncertainty restarts anymore.
//
// Note that we care about an observed timestamp from the leaseholder's node,
// even if this is a follower read on a different node. See the comment in
// doc.go about "Follower Reads" for more.
obsTs, ok := txn.GetObservedTimestamp(status.Lease.Replica.NodeID)
if !ok {
return in
}
in.LocalLimit = obsTs
// Adjust the uncertainty interval to account for lease changes or merges.
// See the comment on ComputeInterval for an explanation of cases where observed
// timestamps captured on the current leaseholder's node are not applicable to
// data written by prior leaseholders.
in.LocalLimit.Forward(status.MinValidObservedTimestamp)
// The local uncertainty limit should always be <= the global uncertainty
// limit.
in.LocalLimit.BackwardWithTimestamp(in.GlobalLimit)
return in
}
func computeIntervalForNonTxn(
h *roachpb.Header, status kvserverpb.LeaseStatus, maxOffset time.Duration,
) Interval {
if h.TimestampFromServerClock == nil || h.ReadConsistency != roachpb.CONSISTENT {
// Non-transactional requests with client-provided timestamps do not
// guarantee linearizability. Neither do entirely inconsistent requests.
// As a result, they do not have uncertainty intervals.
return Interval{}
}
// Non-transactional requests that defer their timestamp allocation to the
// leaseholder of their (single) range do have uncertainty intervals. As a
// result, they do guarantee linearizability.
in := Interval{
// Even though the non-transactional request received its timestamp from the
// leaseholder of its range, it can still observe writes that were performed
// before it in real-time that have MVCC timestamps above its timestamp. In
// these cases, it needs to perform an uncertainty restart.
//
// For example, the non-transactional request may observe an intent with a
// provisional timestamp below its server-assigned timestamp. It will begin
// waiting on this intent. It is possible for the intent to then be resolved
// (asynchronously with respect to the intent's txn commit) with a timestamp
// above its server-assigned timestamp. To guarantee linearizability, the
// non-transactional request must observe the effect of the intent write, so
// it must perform a (server-side) uncertainty restart to a timestamp above
// the now-resolved write.
//
// See the comment on D7 in doc.go for an example.
GlobalLimit: h.TimestampFromServerClock.ToTimestamp().Add(maxOffset.Nanoseconds(), 0),
}
if status.State != kvserverpb.LeaseState_VALID {
// If the lease is invalid, this is either a lease request or we are computing
// the request's uncertainty interval before grabbing latches and checking for
// the current lease. Either way, return without a local limit.
return in
}
// The request's timestamp was selected on this server, so it can serve the
// role of an observed timestamp and as the local uncertainty limit.
in.LocalLimit = *h.TimestampFromServerClock
// Adjust the uncertainty interval to account for lease changes or merges.
// See the comment on ComputeInterval for an explanation of cases where observed
// timestamps captured on the current leaseholder's node are not applicable to
// data written by prior leaseholders.
in.LocalLimit.Forward(status.MinValidObservedTimestamp)
// The local uncertainty limit should always be <= the global uncertainty
// limit.
in.LocalLimit.BackwardWithTimestamp(in.GlobalLimit)
return in
}
| pkg/kv/kvserver/uncertainty/compute.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017957916134037077,
0.00017091320478357375,
0.00016616469656582922,
0.00017063991981558502,
0.000003264449787820922
] |
{
"id": 8,
"code_window": [
"\n",
"\trpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)\n",
"\tconn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),\n",
"\t\trpc.DefaultClass).Connect(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\tclient := execinfrapb.NewDistSQLClient(conn)\n",
"\tclientStream, err = client.FlowStream(ctx)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 51
} | subtest create-external-connection-global-privilege
initialize tenant=10
----
exec-sql
CREATE USER testuser;
----
exec-sql user=testuser
CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo'
----
pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION
exec-sql
GRANT SYSTEM EXTERNALCONNECTION TO testuser;
----
exec-sql user=testuser
CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo'
----
inspect-system-table
----
global-privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} testuser
exec-sql
DROP EXTERNAL CONNECTION "global-privileged";
----
exec-sql
REVOKE SYSTEM EXTERNALCONNECTION FROM testuser;
----
exec-sql user=testuser
CREATE EXTERNAL CONNECTION "global-privileged" AS 'nodelocal://1/foo'
----
pq: only users with the EXTERNALCONNECTION system privilege are allowed to CREATE EXTERNAL CONNECTION
subtest end
subtest drop-external-storage-privilege
exec-sql
CREATE EXTERNAL CONNECTION "drop-privileged" AS 'nodelocal://1/foo'
----
# Create another External Connection.
exec-sql
CREATE EXTERNAL CONNECTION 'drop-privileged-dup' AS 'nodelocal://1/foo'
----
exec-sql user=testuser
DROP EXTERNAL CONNECTION "drop-privileged"
----
pq: user testuser does not have DROP privilege on external_connection drop-privileged
inspect-system-table
----
drop-privileged STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} root
drop-privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} root
exec-sql
GRANT DROP ON EXTERNAL CONNECTION "drop-privileged" TO testuser;
----
exec-sql user=testuser
DROP EXTERNAL CONNECTION "drop-privileged"
----
# Try to drop the second external connection, testuser should be disallowed.
exec-sql user=testuser
DROP EXTERNAL CONNECTION 'drop-privileged-dup'
----
pq: user testuser does not have DROP privilege on external_connection drop-privileged-dup
inspect-system-table
----
drop-privileged-dup STORAGE {"provider": "nodelocal", "simpleUri": {"uri": "nodelocal://1/foo"}} root
exec-sql
DROP EXTERNAL CONNECTION 'drop-privileged-dup'
----
subtest end
subtest create-grants-all
# Reset the user.
exec-sql
DROP USER testuser
----
exec-sql
CREATE USER testuser
----
exec-sql
GRANT SYSTEM EXTERNALCONNECTION TO testuser
----
# Create an EC as root, testuser cannot use this.
exec-sql
CREATE EXTERNAL CONNECTION root AS 'userfile:///foo'
----
exec-sql user=testuser
CREATE TABLE foo (id INT)
----
exec-sql user=testuser
BACKUP TABLE foo INTO 'external://foo'
----
pq: user testuser does not have USAGE privilege on external_connection foo
# Now create an EC as testuser, they should be able to use this EC since on
# creation they are given `ALL` privileges.
exec-sql user=testuser
CREATE EXTERNAL CONNECTION 'not-root' AS 'userfile:///bar'
----
exec-sql user=testuser
BACKUP TABLE foo INTO 'external://not-root'
----
subtest end
| pkg/ccl/cloudccl/externalconn/testdata/multi-tenant/privileges_external_connection | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00023646181216463447,
0.00017212714010383934,
0.0001600475370651111,
0.00016400254389736801,
0.000020071747712790966
] |
{
"id": 8,
"code_window": [
"\n",
"\trpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)\n",
"\tconn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),\n",
"\t\trpc.DefaultClass).Connect(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\tclient := execinfrapb.NewDistSQLClient(conn)\n",
"\tclientStream, err = client.FlowStream(ctx)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 51
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package workload
import (
"context"
"strings"
"sync/atomic"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"golang.org/x/sync/errgroup"
)
// MultiConnPool maintains a set of pgx ConnPools (to different servers).
type MultiConnPool struct {
Pools []*pgxpool.Pool
// Atomic counter used by Get().
counter uint32
mu struct {
syncutil.RWMutex
// preparedStatements is a map from name to SQL. The statements in the map
// are prepared whenever a new connection is acquired from the pool.
preparedStatements map[string]string
}
}
// MultiConnPoolCfg encapsulates the knobs passed to NewMultiConnPool.
type MultiConnPoolCfg struct {
// MaxTotalConnections is the total maximum number of connections across all
// pools.
MaxTotalConnections int
// MaxConnsPerPool is the maximum number of connections in any single pool.
// Limiting this is useful especially for prepared statements, which are
// prepared on each connection inside a pool (serially).
// If 0, there is no per-pool maximum (other than the total maximum number of
// connections which still applies).
MaxConnsPerPool int
}
// pgxLogger implements the pgx.Logger interface.
type pgxLogger struct{}
var _ pgx.Logger = pgxLogger{}
// Log implements the pgx.Logger interface.
func (p pgxLogger) Log(
ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{},
) {
if ctx.Err() != nil {
// Don't log anything from pgx if the context was canceled by the workload
// runner. It would result in spam at the end of every workload.
return
}
if strings.Contains(msg, "restart transaction") {
// Our workloads have a lot of contention, so "restart transaction" messages
// are expected and noisy.
return
}
// data may contain error with "restart transaction" -- skip those as well.
if data != nil {
ev := data["err"]
if err, ok := ev.(error); ok && strings.Contains(err.Error(), "restart transaction") {
return
}
}
log.Infof(ctx, "pgx logger [%s]: %s logParams=%v", level.String(), msg, data)
}
// NewMultiConnPool creates a new MultiConnPool.
//
// Each URL gets one or more pools, and each pool has at most MaxConnsPerPool
// connections.
//
// The pools have approximately the same number of max connections, adding up to
// MaxTotalConnections.
func NewMultiConnPool(
ctx context.Context, cfg MultiConnPoolCfg, urls ...string,
) (*MultiConnPool, error) {
m := &MultiConnPool{}
m.mu.preparedStatements = map[string]string{}
connsPerURL := distribute(cfg.MaxTotalConnections, len(urls))
maxConnsPerPool := cfg.MaxConnsPerPool
if maxConnsPerPool == 0 {
maxConnsPerPool = cfg.MaxTotalConnections
}
var warmupConns [][]*pgxpool.Conn
for i := range urls {
connsPerPool := distributeMax(connsPerURL[i], maxConnsPerPool)
for _, numConns := range connsPerPool {
connCfg, err := pgxpool.ParseConfig(urls[i])
if err != nil {
return nil, err
}
// Disable the automatic prepared statement cache. We've seen a lot of
// churn in this cache since workloads create many of different queries.
connCfg.ConnConfig.BuildStatementCache = nil
connCfg.ConnConfig.LogLevel = pgx.LogLevelWarn
connCfg.ConnConfig.Logger = pgxLogger{}
connCfg.MaxConns = int32(numConns)
connCfg.BeforeAcquire = func(ctx context.Context, conn *pgx.Conn) bool {
m.mu.RLock()
defer m.mu.RUnlock()
for name, sql := range m.mu.preparedStatements {
// Note that calling `Prepare` with a name that has already been
// prepared is idempotent and short-circuits before doing any
// communication to the server.
if _, err := conn.Prepare(ctx, name, sql); err != nil {
log.Warningf(ctx, "error preparing statement. name=%s sql=%s %v", name, sql, err)
return false
}
}
return true
}
p, err := pgxpool.ConnectConfig(ctx, connCfg)
if err != nil {
return nil, err
}
warmupConns = append(warmupConns, make([]*pgxpool.Conn, numConns))
m.Pools = append(m.Pools, p)
}
}
// "Warm up" the pools so we don't have to establish connections later (which
// would affect the observed latencies of the first requests, especially when
// prepared statements are used). We do this by
// acquiring connections (in parallel), then releasing them back to the
// pool.
var g errgroup.Group
// Limit concurrent connection establishment. Allowing this to run
// at maximum parallelism would trigger syn flood protection on the
// host, which combined with any packet loss could cause Acquire to
// return an error and fail the whole function. The value 100 is
// chosen because it is less than the default value for SOMAXCONN
// (128).
sem := make(chan struct{}, 100)
for i, p := range m.Pools {
p := p
conns := warmupConns[i]
for j := range conns {
j := j
sem <- struct{}{}
g.Go(func() error {
var err error
conns[j], err = p.Acquire(ctx)
<-sem
return err
})
}
}
if err := g.Wait(); err != nil {
return nil, err
}
for i := range m.Pools {
for _, c := range warmupConns[i] {
c.Release()
}
}
return m, nil
}
// AddPreparedStatement adds the given sql statement to the map of
// statements that will be prepared when a new connection is retrieved
// from the pool.
func (m *MultiConnPool) AddPreparedStatement(name string, statement string) {
m.mu.Lock()
defer m.mu.Unlock()
m.mu.preparedStatements[name] = statement
}
// Get returns one of the pools, in round-robin manner.
func (m *MultiConnPool) Get() *pgxpool.Pool {
if len(m.Pools) == 1 {
return m.Pools[0]
}
i := atomic.AddUint32(&m.counter, 1) - 1
return m.Pools[i%uint32(len(m.Pools))]
}
// Close closes all the pools.
func (m *MultiConnPool) Close() {
for _, p := range m.Pools {
p.Close()
}
}
// distribute returns a slice of <num> integers that add up to <total> and are
// within +/-1 of each other.
func distribute(total, num int) []int {
res := make([]int, num)
for i := range res {
// Use the average number of remaining connections.
div := len(res) - i
res[i] = (total + div/2) / div
total -= res[i]
}
return res
}
// distributeMax returns a slice of integers that are at most `max` and add up
// to <total>. The slice is as short as possible and the values are within +/-1
// of each other.
func distributeMax(total, max int) []int {
return distribute(total, (total+max-1)/max)
}
| pkg/workload/pgx_helpers.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.39653846621513367,
0.017790978774428368,
0.0001618591049918905,
0.00017008227587211877,
0.08075565844774246
] |
{
"id": 9,
"code_window": [
"\t}\n",
"\tclient := execinfrapb.NewDistSQLClient(conn)\n",
"\tclientStream, err = client.FlowStream(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\tstreamNotification := <-mockServer.InboundStreams\n",
"\tserverStream = streamNotification.Stream\n",
"\tcleanup = func() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 56
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9981957077980042,
0.2862091064453125,
0.0001785179483704269,
0.002203670796006918,
0.4499210715293884
] |
{
"id": 9,
"code_window": [
"\t}\n",
"\tclient := execinfrapb.NewDistSQLClient(conn)\n",
"\tclientStream, err = client.FlowStream(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\tstreamNotification := <-mockServer.InboundStreams\n",
"\tserverStream = streamNotification.Stream\n",
"\tcleanup = func() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 56
} | statement ok
CREATE TABLE t (
a INT PRIMARY KEY,
b STRING COLLATE en
)
statement ok
INSERT INTO t VALUES (1, 'foo' COLLATE en), (2, NULL), (3, 'bar' COLLATE en)
statement ok
CREATE INDEX ON t (b, a)
# Test that unspecifying b is like specifying NULL.
statement ok
INSERT INTO t (a) VALUES(4)
statement ok
INSERT INTO t VALUES(5)
query T
SELECT b FROM t ORDER BY b
----
NULL
NULL
NULL
bar
foo
| pkg/sql/logictest/testdata/logic_test/collatedstring_nullinindex | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00016537579358555377,
0.00016304239397868514,
0.00015988935774657875,
0.00016386201605200768,
0.000002313595359737519
] |
{
"id": 9,
"code_window": [
"\t}\n",
"\tclient := execinfrapb.NewDistSQLClient(conn)\n",
"\tclientStream, err = client.FlowStream(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\tstreamNotification := <-mockServer.InboundStreams\n",
"\tserverStream = streamNotification.Stream\n",
"\tcleanup = func() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 56
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package server_test
import (
"context"
"io"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
"github.com/kr/pretty"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
)
// TestDrain tests the Drain RPC.
func TestDrain(t *testing.T) {
defer leaktest.AfterTest(t)()
skip.UnderRaceWithIssue(t, 86974, "flaky test")
defer log.Scope(t).Close(t)
doTestDrain(t)
}
// doTestDrain runs the drain test.
func doTestDrain(tt *testing.T) {
var drainSleepCallCount = 0
t := newTestDrainContext(tt, &drainSleepCallCount)
defer t.Close()
// Issue a probe. We're not draining yet, so the probe should
// reflect that.
resp := t.sendProbe()
t.assertDraining(resp, false)
t.assertRemaining(resp, false)
t.assertEqual(0, drainSleepCallCount)
// Issue a drain without shutdown, so we can probe more afterwards.
resp = t.sendDrainNoShutdown()
t.assertDraining(resp, true)
t.assertRemaining(resp, true)
t.assertEqual(1, drainSleepCallCount)
// Issue another probe. This checks that the server is still running
// (i.e. Shutdown: false was effective), the draining status is
// still properly reported, and the server slept once.
resp = t.sendProbe()
t.assertDraining(resp, true)
// probe-only has no remaining.
t.assertRemaining(resp, false)
t.assertEqual(1, drainSleepCallCount)
// Issue another drain. Verify that the remaining is zero (i.e. complete)
// and that the server did not sleep again.
resp = t.sendDrainNoShutdown()
t.assertDraining(resp, true)
t.assertRemaining(resp, false)
t.assertEqual(1, drainSleepCallCount)
// Now issue a drain request without drain but with shutdown.
// We're expecting the node to be shut down after that.
resp = t.sendShutdown()
if resp != nil {
t.assertDraining(resp, true)
t.assertRemaining(resp, false)
t.assertEqual(1, drainSleepCallCount)
}
// Now expect the server to be shut down.
testutils.SucceedsSoon(t, func() error {
_, err := t.c.Drain(context.Background(), &serverpb.DrainRequest{Shutdown: false})
if grpcutil.IsClosedConnection(err) {
return nil
}
// It is incorrect to use errors.Wrap since that will result in a nil
// return value if err is nil, which is not desired.
return errors.Newf("server not yet refusing RPC, got %v", err) // nolint:errwrap
})
}
func TestEnsureSQLStatsAreFlushedDuringDrain(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
var drainSleepCallCount = 0
drainCtx := newTestDrainContext(t, &drainSleepCallCount)
defer drainCtx.Close()
var (
ts = drainCtx.tc.Server(0).SQLServer().(*sql.Server)
sqlDB = sqlutils.MakeSQLRunner(drainCtx.tc.ServerConn(0))
)
// Issue queries to be registered in stats.
sqlDB.Exec(t, `
CREATE DATABASE t;
CREATE TABLE t.test (x INT PRIMARY KEY);
INSERT INTO t.test VALUES (1);
INSERT INTO t.test VALUES (2);
INSERT INTO t.test VALUES (3);
`)
// Find the in-memory stats for the queries.
stats, err := ts.GetScrubbedStmtStats(ctx)
require.NoError(t, err)
require.Truef(t,
func(stats []roachpb.CollectedStatementStatistics) bool {
for _, stat := range stats {
if stat.Key.Query == "INSERT INTO _ VALUES (_)" {
return true
}
}
return false
}(stats),
"expected to find in-memory stats",
)
// Sanity check: verify that the statement statistics system table is empty.
sqlDB.CheckQueryResults(t,
`SELECT count(*) FROM system.statement_statistics WHERE node_id = 1`,
[][]string{{"0"}},
)
// Issue a drain.
drainCtx.sendDrainNoShutdown()
// Open a new SQL connection.
sqlDB = sqlutils.MakeSQLRunner(drainCtx.tc.ServerConn(1))
// Check that the stats were flushed into the statement stats system table.
// Verify that the number of statistics for node 1 are non-zero.
sqlDB.CheckQueryResults(t,
`SELECT count(*) > 0 FROM system.statement_statistics WHERE node_id = 1`,
[][]string{{"true"}},
)
}
type testDrainContext struct {
*testing.T
tc *testcluster.TestCluster
c serverpb.AdminClient
connCloser func()
}
func newTestDrainContext(t *testing.T, drainSleepCallCount *int) *testDrainContext {
tc := &testDrainContext{
T: t,
tc: testcluster.StartTestCluster(t, 3, base.TestClusterArgs{
// We need to start the cluster insecure in order to not
// care about TLS settings for the RPC client connection.
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DrainSleepFn: func(time.Duration) {
*drainSleepCallCount++
},
},
},
Insecure: true,
},
}),
}
// We'll have the RPC talk to the first node.
var err error
tc.c, tc.connCloser, err = getAdminClientForServer(tc.tc.Server(0))
if err != nil {
tc.Close()
t.Fatal(err)
}
return tc
}
func (t *testDrainContext) Close() {
if t.connCloser != nil {
t.connCloser()
}
t.tc.Stopper().Stop(context.Background())
}
func (t *testDrainContext) sendProbe() *serverpb.DrainResponse {
return t.drainRequest(false /* drain */, false /* shutdown */)
}
func (t *testDrainContext) sendDrainNoShutdown() *serverpb.DrainResponse {
return t.drainRequest(true /* drain */, false /* shutdown */)
}
func (t *testDrainContext) drainRequest(drain, shutdown bool) *serverpb.DrainResponse {
// Issue a simple drain probe.
req := &serverpb.DrainRequest{Shutdown: shutdown}
if drain {
req.DoDrain = true
}
drainStream, err := t.c.Drain(context.Background(), req)
if err != nil {
t.Fatal(err)
}
resp, err := t.getDrainResponse(drainStream)
if err != nil {
t.Fatal(err)
}
return resp
}
func (t *testDrainContext) sendShutdown() *serverpb.DrainResponse {
req := &serverpb.DrainRequest{Shutdown: true}
drainStream, err := t.c.Drain(context.Background(), req)
if err != nil {
t.Fatal(err)
}
resp, err := t.getDrainResponse(drainStream)
if err != nil {
// It's possible we're getting "connection reset by peer" or some
// gRPC initialization failure because the server is shutting
// down. Tolerate that.
log.Infof(context.Background(), "RPC error: %v", err)
}
return resp
}
func (t *testDrainContext) assertDraining(resp *serverpb.DrainResponse, drain bool) {
t.Helper()
if resp.IsDraining != drain {
t.Fatalf("expected draining %v, got %v", drain, resp.IsDraining)
}
}
func (t *testDrainContext) assertRemaining(resp *serverpb.DrainResponse, remaining bool) {
t.Helper()
if actualRemaining := (resp.DrainRemainingIndicator > 0); remaining != actualRemaining {
t.Fatalf("expected remaining %v, got %v", remaining, actualRemaining)
}
}
func (t *testDrainContext) assertEqual(expected int, actual int) {
t.Helper()
if expected == actual {
return
}
t.Fatalf("expected sleep call count to be %v, got %v", expected, actual)
}
func (t *testDrainContext) getDrainResponse(
stream serverpb.Admin_DrainClient,
) (*serverpb.DrainResponse, error) {
resp, err := stream.Recv()
if err != nil {
return nil, err
}
unexpected, err := stream.Recv()
if err != io.EOF {
if unexpected != nil {
t.Fatalf("unexpected additional response: %# v // %v", pretty.Formatter(unexpected), err)
}
if err == nil {
err = errors.New("unexpected response")
}
return nil, err
}
return resp, nil
}
func getAdminClientForServer(
s serverutils.TestServerInterface,
) (c serverpb.AdminClient, closer func(), err error) {
//lint:ignore SA1019 grpc.WithInsecure is deprecated
conn, err := grpc.Dial(s.ServingRPCAddr(), grpc.WithInsecure())
if err != nil {
return nil, nil, err
}
client := serverpb.NewAdminClient(conn)
return client, func() {
_ = conn.Close() // nolint:grpcconnclose
}, nil
}
| pkg/server/drain_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0030163340270519257,
0.00041487120324745774,
0.00016309492639265954,
0.00017354346346110106,
0.0006251787417568266
] |
{
"id": 9,
"code_window": [
"\t}\n",
"\tclient := execinfrapb.NewDistSQLClient(conn)\n",
"\tclientStream, err = client.FlowStream(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, nil, nil, err\n",
"\t}\n",
"\tstreamNotification := <-mockServer.InboundStreams\n",
"\tserverStream = streamNotification.Stream\n",
"\tcleanup = func() {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tt.Fatal(err)\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 56
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package json
import (
"github.com/cockroachdb/apd/v3"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/errors"
)
// This file implements the format described in the JSONB encoding RFC.
const offlenStride = 32
const arrayContainerTag = 0x80000000
const objectContainerTag = 0x40000000
const scalarContainerTag = 0x20000000
const containerHeaderTypeMask = 0xE0000000
const containerHeaderLenMask = 0x1FFFFFFF
const maxByteLength = int(jEntryOffLenMask)
const containerHeaderLen = 4
const jEntryLen = 4
// checkLength ensures that an encoded value is not too long to fit into the
// JEntry header. This should never come up, since it would require a ~250MB
// JSON value, but check it just to be safe.
func checkLength(length int) error {
if length > maxByteLength {
return errors.Newf("JSON value too large: %d bytes", errors.Safe(length))
}
return nil
}
// Note: the encoding of each of null, true, and false are the encoding of length 0.
// Their values are purely dictated by their type.
func (jsonNull) encode(appendTo []byte) (e jEntry, b []byte, err error) {
return nullJEntry, appendTo, nil
}
func (jsonTrue) encode(appendTo []byte) (e jEntry, b []byte, err error) {
return trueJEntry, appendTo, nil
}
func (jsonFalse) encode(appendTo []byte) (e jEntry, b []byte, err error) {
return falseJEntry, appendTo, nil
}
func (j jsonString) encode(appendTo []byte) (e jEntry, b []byte, err error) {
if err := checkLength(len(j)); err != nil {
return jEntry{}, b, err
}
return makeStringJEntry(len(j)), append(appendTo, []byte(j)...), nil
}
func (j jsonNumber) encode(appendTo []byte) (e jEntry, b []byte, err error) {
decOffset := len(appendTo)
dec := apd.Decimal(j)
appendTo = encoding.EncodeUntaggedDecimalValue(appendTo, &dec)
lengthInBytes := len(appendTo) - decOffset
if err := checkLength(lengthInBytes); err != nil {
return jEntry{}, b, err
}
return makeNumberJEntry(lengthInBytes), appendTo, nil
}
// encodingModeForIdx determines which encoding mode we choose to use for a
// given i-th entry in an array or object.
func encodingModeForIdx(i int, offset uint32) encodingMode {
if i%offlenStride == 0 {
return offsetEncode(offset)
}
return lengthMode
}
func (j jsonArray) encode(appendTo []byte) (e jEntry, b []byte, err error) {
encodingStartPosition := len(appendTo)
// Array container header.
appendTo = encoding.EncodeUint32Ascending(appendTo, arrayContainerTag|uint32(len(j)))
// Reserve space for the JEntries and store where they start so we can fill them in later.
jEntryIdx := len(appendTo)
for i := 0; i < len(j); i++ {
appendTo = append(appendTo, 0, 0, 0, 0)
}
offset := uint32(0)
for i := 0; i < len(j); i++ {
var nextJEntry jEntry
nextJEntry, appendTo, err = j[i].encode(appendTo)
if err != nil {
return jEntry{}, appendTo, err
}
length := nextJEntry.length
offset += length
appendTo = encoding.PutUint32Ascending(appendTo, nextJEntry.encoded(encodingModeForIdx(i, offset)), jEntryIdx+i*4)
}
lengthInBytes := len(appendTo) - encodingStartPosition
if err := checkLength(lengthInBytes); err != nil {
return jEntry{}, b, err
}
return makeContainerJEntry(lengthInBytes), appendTo, nil
}
func (j jsonObject) encode(appendTo []byte) (e jEntry, b []byte, err error) {
encodingStartPosition := len(appendTo)
// Object container header.
appendTo = encoding.EncodeUint32Ascending(appendTo, objectContainerTag|uint32(len(j)))
// Reserve space for the key and value JEntries and store where they start so
// we can fill them in later.
jEntryIdx := len(appendTo)
for i := 0; i < len(j)*2; i++ {
appendTo = append(appendTo, 0, 0, 0, 0)
}
offset := uint32(0)
// Encode all keys.
for i := 0; i < len(j); i++ {
var nextJEntry jEntry
nextJEntry, appendTo, err = j[i].k.encode(appendTo)
if err != nil {
return jEntry{}, appendTo, err
}
length := nextJEntry.length
offset += length
appendTo = encoding.PutUint32Ascending(appendTo, nextJEntry.encoded(encodingModeForIdx(i, offset)), jEntryIdx+i*4)
}
// Encode all values.
for i := 0; i < len(j); i++ {
var nextJEntry jEntry
nextJEntry, appendTo, err = j[i].v.encode(appendTo)
if err != nil {
return jEntry{}, appendTo, err
}
length := nextJEntry.length
offset += length
appendTo = encoding.PutUint32Ascending(appendTo, nextJEntry.encoded(encodingModeForIdx(i, offset)), jEntryIdx+(len(j)+i)*4)
}
lengthInBytes := len(appendTo) - encodingStartPosition
if err := checkLength(lengthInBytes); err != nil {
return jEntry{}, b, err
}
return makeContainerJEntry(lengthInBytes), appendTo, nil
}
// EncodeJSON encodes a JSON value as a sequence of bytes.
func EncodeJSON(appendTo []byte, j JSON) ([]byte, error) {
switch j.Type() {
case ArrayJSONType, ObjectJSONType:
// We just discard the JEntry in these cases.
var err error
_, appendTo, err = j.encode(appendTo)
if err != nil {
return appendTo, err
}
return appendTo, nil
default: // j is a scalar, so we must construct a scalar container for it at the top level.
// Scalar container header.
appendTo = encoding.EncodeUint32Ascending(appendTo, scalarContainerTag)
// Reserve space for scalar jEntry.
jEntryIdx := len(appendTo)
appendTo = encoding.EncodeUint32Ascending(appendTo, 0)
var entry jEntry
var err error
entry, appendTo, err = j.encode(appendTo)
if err != nil {
return appendTo, err
}
appendTo = encoding.PutUint32Ascending(appendTo, entry.encoded(lengthMode), jEntryIdx)
return appendTo, nil
}
}
// DecodeJSON decodes a value encoded with EncodeJSON.
func DecodeJSON(b []byte) ([]byte, JSON, error) {
b, containerHeader, err := encoding.DecodeUint32Ascending(b)
if err != nil {
return b, nil, err
}
switch containerHeader & containerHeaderTypeMask {
case scalarContainerTag:
var entry jEntry
var err error
b, entry, err = decodeJEntry(b, 0)
if err != nil {
return b, nil, err
}
return decodeJSONValue(entry, b)
case arrayContainerTag:
return decodeJSONArray(containerHeader, b)
case objectContainerTag:
return decodeJSONObject(containerHeader, b)
}
return b, nil, errors.AssertionFailedf(
"error decoding JSON value, header: %x", errors.Safe(containerHeader))
}
// FromEncoding returns a JSON value which is lazily decoded.
func FromEncoding(b []byte) (JSON, error) {
return newEncodedFromRoot(b)
}
func decodeJSONArray(containerHeader uint32, b []byte) ([]byte, JSON, error) {
length := containerHeader & containerHeaderLenMask
b, jEntries, err := decodeJEntries(int(length), b)
if err != nil {
return b, nil, err
}
result := make(jsonArray, length)
for i := uint32(0); i < length; i++ {
var nextJSON JSON
b, nextJSON, err = decodeJSONValue(jEntries[i], b)
if err != nil {
return b, nil, err
}
result[i] = nextJSON
}
return b, result, nil
}
func decodeJEntries(n int, b []byte) ([]byte, []jEntry, error) {
var err error
jEntries := make([]jEntry, n)
off := uint32(0)
for i := 0; i < n; i++ {
var nextJEntry jEntry
b, nextJEntry, err = decodeJEntry(b, off)
if err != nil {
return b, nil, err
}
off += nextJEntry.length
jEntries[i] = nextJEntry
}
return b, jEntries, nil
}
func decodeJSONObject(containerHeader uint32, b []byte) ([]byte, JSON, error) {
length := int(containerHeader & containerHeaderLenMask)
b, jEntries, err := decodeJEntries(length*2, b)
if err != nil {
return b, nil, err
}
// There are `length` key entries at the start and `length` value entries at the back.
keyJEntries := jEntries[:length]
valueJEntries := jEntries[length:]
result := make(jsonObject, length)
// Decode the keys.
for i := 0; i < length; i++ {
var nextJSON JSON
b, nextJSON, err = decodeJSONValue(keyJEntries[i], b)
if err != nil {
return b, nil, err
}
if key, ok := nextJSON.(jsonString); ok {
result[i].k = key
} else {
return b, nil, errors.AssertionFailedf(
"key encoded as non-string: %T", nextJSON)
}
}
// Decode the values.
for i := 0; i < length; i++ {
var nextJSON JSON
b, nextJSON, err = decodeJSONValue(valueJEntries[i], b)
if err != nil {
return b, nil, err
}
result[i].v = nextJSON
}
return b, result, nil
}
func decodeJSONNumber(b []byte) ([]byte, JSON, error) {
b, d, err := encoding.DecodeUntaggedDecimalValue(b)
if err != nil {
return b, nil, err
}
return b, jsonNumber(d), nil
}
func decodeJSONValue(e jEntry, b []byte) ([]byte, JSON, error) {
switch e.typCode {
case trueTag:
return b, TrueJSONValue, nil
case falseTag:
return b, FalseJSONValue, nil
case nullTag:
return b, NullJSONValue, nil
case stringTag:
return b[e.length:], jsonString(b[:e.length]), nil
case numberTag:
return decodeJSONNumber(b)
case containerTag:
return DecodeJSON(b)
}
return b, nil, errors.AssertionFailedf(
"error decoding JSON value, unexpected type code: %d", errors.Safe(e.typCode))
}
| pkg/util/json/encode.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0003401620197109878,
0.0001761620515026152,
0.0001610375038580969,
0.00016948668053373694,
0.00003109542012680322
] |
{
"id": 10,
"code_window": [
"\tserverStream = streamNotification.Stream\n",
"\tcleanup = func() {\n",
"\t\tclose(streamNotification.Donec)\n",
"\t\tstopper.Stop(ctx)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\t// After the RPC is unblocked, we have to drain the client side in order\n",
"\t\t// to simulate what happens in production (where the watchdog goroutine\n",
"\t\t// of the outbox does this).\n",
"\t\tfor {\n",
"\t\t\t_, err := clientStream.Recv()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tif err == io.EOF {\n",
"\t\t\t\t\tbreak\n",
"\t\t\t\t}\n",
"\t\t\t\tt.Fatal(err)\n",
"\t\t\t}\n",
"\t\t}\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "add",
"edit_start_line_idx": 62
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9976623058319092,
0.14604099094867706,
0.00016787188360467553,
0.0006487283390015364,
0.3477427065372467
] |
{
"id": 10,
"code_window": [
"\tserverStream = streamNotification.Stream\n",
"\tcleanup = func() {\n",
"\t\tclose(streamNotification.Donec)\n",
"\t\tstopper.Stop(ctx)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\t// After the RPC is unblocked, we have to drain the client side in order\n",
"\t\t// to simulate what happens in production (where the watchdog goroutine\n",
"\t\t// of the outbox does this).\n",
"\t\tfor {\n",
"\t\t\t_, err := clientStream.Recv()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tif err == io.EOF {\n",
"\t\t\t\t\tbreak\n",
"\t\t\t\t}\n",
"\t\t\t\tt.Fatal(err)\n",
"\t\t\t}\n",
"\t\t}\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "add",
"edit_start_line_idx": 62
} | setup
CREATE TABLE defaultdb.customers (id INT PRIMARY KEY, email STRING UNIQUE);
CREATE TABLE IF NOT EXISTS defaultdb.orders (
id INT PRIMARY KEY,
customer INT UNIQUE NOT NULL REFERENCES customers (id),
orderTotal DECIMAL(9,2),
INDEX (customer)
);
CREATE SEQUENCE defaultdb.SQ2;
CREATE TYPE defaultdb.typ AS ENUM('a');
CREATE TABLE defaultdb.shipments (
tracking_number UUID DEFAULT gen_random_uuid() PRIMARY KEY,
carrier STRING,
status STRING,
customer_id INT,
randcol INT DEFAULT nextval('defaultdb.sq2'),
val defaultdb.TYP AS ('a'::defaultdb.typ) STORED,
CONSTRAINT fk_customers FOREIGN KEY (customer_id) REFERENCES customers(id),
CONSTRAINT fk_orders FOREIGN KEY (customer_id) REFERENCES orders(customer)
);
CREATE SEQUENCE defaultdb.SQ1 OWNED BY defaultdb.shipments.carrier;
CREATE VIEW v1 as (select customer_id, carrier from defaultdb.shipments);
COMMENT ON TABLE defaultdb.shipments IS 'shipment is important';
COMMENT ON COLUMN defaultdb.shipments.tracking_number IS 'tracking_number is good';
COMMENT ON INDEX defaultdb.shipments@shipments_pkey IS 'pkey is good';
COMMENT ON CONSTRAINT fk_customers ON defaultdb.shipments IS 'customer is important';
----
build
DROP TABLE defaultdb.shipments CASCADE;
----
- [[Namespace:{DescID: 109, Name: shipments, ReferencedDescID: 100}, ABSENT], PUBLIC]
{databaseId: 100, descriptorId: 109, name: shipments, schemaId: 101}
- [[Owner:{DescID: 109}, ABSENT], PUBLIC]
{descriptorId: 109, owner: root}
- [[UserPrivileges:{DescID: 109, Name: admin}, ABSENT], PUBLIC]
{descriptorId: 109, privileges: 2, userName: admin}
- [[UserPrivileges:{DescID: 109, Name: root}, ABSENT], PUBLIC]
{descriptorId: 109, privileges: 2, userName: root}
- [[Table:{DescID: 109}, ABSENT], PUBLIC]
{tableId: 109}
- [[ObjectParent:{DescID: 109, ReferencedDescID: 101}, ABSENT], PUBLIC]
{objectId: 109, parentSchemaId: 101}
- [[TableComment:{DescID: 109, Comment: shipment is important}, ABSENT], PUBLIC]
{comment: shipment is important, tableId: 109}
- [[ColumnFamily:{DescID: 109, Name: primary, ColumnFamilyID: 0}, ABSENT], PUBLIC]
{name: primary, tableId: 109}
- [[Column:{DescID: 109, ColumnID: 1}, ABSENT], PUBLIC]
{columnId: 1, pgAttributeNum: 1, tableId: 109}
- [[ColumnName:{DescID: 109, Name: tracking_number, ColumnID: 1}, ABSENT], PUBLIC]
{columnId: 1, name: tracking_number, tableId: 109}
- [[ColumnType:{DescID: 109, ColumnFamilyID: 0, ColumnID: 1}, ABSENT], PUBLIC]
{columnId: 1, tableId: 109, type: {family: UuidFamily, oid: 2950}}
- [[ColumnDefaultExpression:{DescID: 109, ColumnID: 1}, ABSENT], PUBLIC]
{columnId: 1, expr: gen_random_uuid(), tableId: 109}
- [[ColumnComment:{DescID: 109, ColumnID: 1, Comment: tracking_number is good}, ABSENT], PUBLIC]
{columnId: 1, comment: tracking_number is good, pgAttributeNum: 1, tableId: 109}
- [[Column:{DescID: 109, ColumnID: 2}, ABSENT], PUBLIC]
{columnId: 2, pgAttributeNum: 2, tableId: 109}
- [[ColumnName:{DescID: 109, Name: carrier, ColumnID: 2}, ABSENT], PUBLIC]
{columnId: 2, name: carrier, tableId: 109}
- [[ColumnType:{DescID: 109, ColumnFamilyID: 0, ColumnID: 2}, ABSENT], PUBLIC]
{columnId: 2, isNullable: true, tableId: 109, type: {family: StringFamily, oid: 25}}
- [[SequenceOwner:{DescID: 109, ColumnID: 2, ReferencedDescID: 110}, ABSENT], PUBLIC]
{columnId: 2, sequenceId: 110, tableId: 109}
- [[Column:{DescID: 109, ColumnID: 3}, ABSENT], PUBLIC]
{columnId: 3, pgAttributeNum: 3, tableId: 109}
- [[ColumnName:{DescID: 109, Name: status, ColumnID: 3}, ABSENT], PUBLIC]
{columnId: 3, name: status, tableId: 109}
- [[ColumnType:{DescID: 109, ColumnFamilyID: 0, ColumnID: 3}, ABSENT], PUBLIC]
{columnId: 3, isNullable: true, tableId: 109, type: {family: StringFamily, oid: 25}}
- [[Column:{DescID: 109, ColumnID: 4}, ABSENT], PUBLIC]
{columnId: 4, pgAttributeNum: 4, tableId: 109}
- [[ColumnName:{DescID: 109, Name: customer_id, ColumnID: 4}, ABSENT], PUBLIC]
{columnId: 4, name: customer_id, tableId: 109}
- [[ColumnType:{DescID: 109, ColumnFamilyID: 0, ColumnID: 4}, ABSENT], PUBLIC]
{columnId: 4, isNullable: true, tableId: 109, type: {family: IntFamily, oid: 20, width: 64}}
- [[Column:{DescID: 109, ColumnID: 5}, ABSENT], PUBLIC]
{columnId: 5, pgAttributeNum: 5, tableId: 109}
- [[ColumnName:{DescID: 109, Name: randcol, ColumnID: 5}, ABSENT], PUBLIC]
{columnId: 5, name: randcol, tableId: 109}
- [[ColumnType:{DescID: 109, ColumnFamilyID: 0, ColumnID: 5}, ABSENT], PUBLIC]
{columnId: 5, isNullable: true, tableId: 109, type: {family: IntFamily, oid: 20, width: 64}}
- [[ColumnDefaultExpression:{DescID: 109, ColumnID: 5, ReferencedSequenceIDs: [106]}, ABSENT], PUBLIC]
{columnId: 5, expr: 'nextval(106:::REGCLASS)', tableId: 109, usesSequenceIds: [106]}
- [[Column:{DescID: 109, ColumnID: 6}, ABSENT], PUBLIC]
{columnId: 6, pgAttributeNum: 6, tableId: 109}
- [[ColumnName:{DescID: 109, Name: val, ColumnID: 6}, ABSENT], PUBLIC]
{columnId: 6, name: val, tableId: 109}
- [[ColumnType:{DescID: 109, ReferencedTypeIDs: [107 108], ColumnFamilyID: 0, ColumnID: 6}, ABSENT], PUBLIC]
{closedTypeIds: [107, 108], columnId: 6, computeExpr: {expr: 'x''80'':::@100107', usesTypeIds: [107, 108]}, isNullable: true, tableId: 109, type: {family: EnumFamily, oid: 100107, udtMetadata: {arrayTypeOid: 100108}}}
- [[Column:{DescID: 109, ColumnID: 4294967295}, ABSENT], PUBLIC]
{columnId: 4.294967295e+09, isHidden: true, isSystemColumn: true, pgAttributeNum: 4.294967295e+09, tableId: 109}
- [[ColumnName:{DescID: 109, Name: crdb_internal_mvcc_timestamp, ColumnID: 4294967295}, ABSENT], PUBLIC]
{columnId: 4.294967295e+09, name: crdb_internal_mvcc_timestamp, tableId: 109}
- [[ColumnType:{DescID: 109, ColumnFamilyID: 0, ColumnID: 4294967295}, ABSENT], PUBLIC]
{columnId: 4.294967295e+09, isNullable: true, tableId: 109, type: {family: DecimalFamily, oid: 1700}}
- [[Column:{DescID: 109, ColumnID: 4294967294}, ABSENT], PUBLIC]
{columnId: 4.294967294e+09, isHidden: true, isSystemColumn: true, pgAttributeNum: 4.294967294e+09, tableId: 109}
- [[ColumnName:{DescID: 109, Name: tableoid, ColumnID: 4294967294}, ABSENT], PUBLIC]
{columnId: 4.294967294e+09, name: tableoid, tableId: 109}
- [[ColumnType:{DescID: 109, ColumnFamilyID: 0, ColumnID: 4294967294}, ABSENT], PUBLIC]
{columnId: 4.294967294e+09, isNullable: true, tableId: 109, type: {family: OidFamily, oid: 26}}
- [[IndexColumn:{DescID: 109, ColumnID: 1, IndexID: 1}, ABSENT], PUBLIC]
{columnId: 1, indexId: 1, tableId: 109}
- [[IndexColumn:{DescID: 109, ColumnID: 2, IndexID: 1}, ABSENT], PUBLIC]
{columnId: 2, indexId: 1, kind: STORED, tableId: 109}
- [[IndexColumn:{DescID: 109, ColumnID: 3, IndexID: 1}, ABSENT], PUBLIC]
{columnId: 3, indexId: 1, kind: STORED, ordinalInKind: 1, tableId: 109}
- [[IndexColumn:{DescID: 109, ColumnID: 4, IndexID: 1}, ABSENT], PUBLIC]
{columnId: 4, indexId: 1, kind: STORED, ordinalInKind: 2, tableId: 109}
- [[IndexColumn:{DescID: 109, ColumnID: 5, IndexID: 1}, ABSENT], PUBLIC]
{columnId: 5, indexId: 1, kind: STORED, ordinalInKind: 3, tableId: 109}
- [[IndexColumn:{DescID: 109, ColumnID: 6, IndexID: 1}, ABSENT], PUBLIC]
{columnId: 6, indexId: 1, kind: STORED, ordinalInKind: 4, tableId: 109}
- [[PrimaryIndex:{DescID: 109, IndexID: 1, ConstraintID: 1}, ABSENT], PUBLIC]
{constraintId: 1, indexId: 1, isUnique: true, tableId: 109}
- [[IndexName:{DescID: 109, Name: shipments_pkey, IndexID: 1}, ABSENT], PUBLIC]
{indexId: 1, name: shipments_pkey, tableId: 109}
- [[IndexComment:{DescID: 109, IndexID: 1, Comment: pkey is good}, ABSENT], PUBLIC]
{comment: pkey is good, indexId: 1, tableId: 109}
- [[IndexData:{DescID: 109, IndexID: 1}, ABSENT], PUBLIC]
{indexId: 1, tableId: 109}
- [[ForeignKeyConstraint:{DescID: 109, ConstraintID: 2, ReferencedDescID: 104}, ABSENT], PUBLIC]
{columnIds: [4], constraintId: 2, referencedColumnIds: [1], referencedTableId: 104, tableId: 109}
- [[ConstraintWithoutIndexName:{DescID: 109, Name: fk_customers, ConstraintID: 2}, ABSENT], PUBLIC]
{constraintId: 2, name: fk_customers, tableId: 109}
- [[ConstraintComment:{DescID: 109, ConstraintID: 2, Comment: customer is important}, ABSENT], PUBLIC]
{comment: customer is important, constraintId: 2, tableId: 109}
- [[ForeignKeyConstraint:{DescID: 109, ConstraintID: 3, ReferencedDescID: 105}, ABSENT], PUBLIC]
{columnIds: [4], constraintId: 3, referencedColumnIds: [2], referencedTableId: 105, tableId: 109}
- [[ConstraintWithoutIndexName:{DescID: 109, Name: fk_orders, ConstraintID: 3}, ABSENT], PUBLIC]
{constraintId: 3, name: fk_orders, tableId: 109}
- [[TableData:{DescID: 109, ReferencedDescID: 100}, ABSENT], PUBLIC]
{databaseId: 100, tableId: 109}
- [[Namespace:{DescID: 110, Name: sq1, ReferencedDescID: 100}, ABSENT], PUBLIC]
{databaseId: 100, descriptorId: 110, name: sq1, schemaId: 101}
- [[Owner:{DescID: 110}, ABSENT], PUBLIC]
{descriptorId: 110, owner: root}
- [[UserPrivileges:{DescID: 110, Name: admin}, ABSENT], PUBLIC]
{descriptorId: 110, privileges: 2, userName: admin}
- [[UserPrivileges:{DescID: 110, Name: root}, ABSENT], PUBLIC]
{descriptorId: 110, privileges: 2, userName: root}
- [[Sequence:{DescID: 110}, ABSENT], PUBLIC]
{sequenceId: 110}
- [[ObjectParent:{DescID: 110, ReferencedDescID: 101}, ABSENT], PUBLIC]
{objectId: 110, parentSchemaId: 101}
- [[TableData:{DescID: 110, ReferencedDescID: 100}, ABSENT], PUBLIC]
{databaseId: 100, tableId: 110}
- [[Namespace:{DescID: 111, Name: v1, ReferencedDescID: 100}, ABSENT], PUBLIC]
{databaseId: 100, descriptorId: 111, name: v1, schemaId: 101}
- [[Owner:{DescID: 111}, ABSENT], PUBLIC]
{descriptorId: 111, owner: root}
- [[UserPrivileges:{DescID: 111, Name: admin}, ABSENT], PUBLIC]
{descriptorId: 111, privileges: 2, userName: admin}
- [[UserPrivileges:{DescID: 111, Name: root}, ABSENT], PUBLIC]
{descriptorId: 111, privileges: 2, userName: root}
- [[View:{DescID: 111}, ABSENT], PUBLIC]
{forwardReferences: [{columnIds: [2, 4], toId: 109}], usesRelationIds: [109], viewId: 111}
- [[ObjectParent:{DescID: 111, ReferencedDescID: 101}, ABSENT], PUBLIC]
{objectId: 111, parentSchemaId: 101}
- [[Column:{DescID: 111, ColumnID: 1}, ABSENT], PUBLIC]
{columnId: 1, pgAttributeNum: 1, tableId: 111}
- [[ColumnName:{DescID: 111, Name: customer_id, ColumnID: 1}, ABSENT], PUBLIC]
{columnId: 1, name: customer_id, tableId: 111}
- [[ColumnType:{DescID: 111, ColumnFamilyID: 0, ColumnID: 1}, ABSENT], PUBLIC]
{columnId: 1, isNullable: true, tableId: 111, type: {family: IntFamily, oid: 20, width: 64}}
- [[Column:{DescID: 111, ColumnID: 2}, ABSENT], PUBLIC]
{columnId: 2, pgAttributeNum: 2, tableId: 111}
- [[ColumnName:{DescID: 111, Name: carrier, ColumnID: 2}, ABSENT], PUBLIC]
{columnId: 2, name: carrier, tableId: 111}
- [[ColumnType:{DescID: 111, ColumnFamilyID: 0, ColumnID: 2}, ABSENT], PUBLIC]
{columnId: 2, isNullable: true, tableId: 111, type: {family: StringFamily, oid: 25}}
- [[Column:{DescID: 111, ColumnID: 4294967295}, ABSENT], PUBLIC]
{columnId: 4.294967295e+09, isHidden: true, isSystemColumn: true, pgAttributeNum: 4.294967295e+09, tableId: 111}
- [[ColumnName:{DescID: 111, Name: crdb_internal_mvcc_timestamp, ColumnID: 4294967295}, ABSENT], PUBLIC]
{columnId: 4.294967295e+09, name: crdb_internal_mvcc_timestamp, tableId: 111}
- [[ColumnType:{DescID: 111, ColumnFamilyID: 0, ColumnID: 4294967295}, ABSENT], PUBLIC]
{columnId: 4.294967295e+09, isNullable: true, tableId: 111, type: {family: DecimalFamily, oid: 1700}}
- [[Column:{DescID: 111, ColumnID: 4294967294}, ABSENT], PUBLIC]
{columnId: 4.294967294e+09, isHidden: true, isSystemColumn: true, pgAttributeNum: 4.294967294e+09, tableId: 111}
- [[ColumnName:{DescID: 111, Name: tableoid, ColumnID: 4294967294}, ABSENT], PUBLIC]
{columnId: 4.294967294e+09, name: tableoid, tableId: 111}
- [[ColumnType:{DescID: 111, ColumnFamilyID: 0, ColumnID: 4294967294}, ABSENT], PUBLIC]
{columnId: 4.294967294e+09, isNullable: true, tableId: 111, type: {family: OidFamily, oid: 26}}
| pkg/sql/schemachanger/scbuild/testdata/drop_table | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017323232896160334,
0.00016893382417038083,
0.000165487639605999,
0.00016921103815548122,
0.0000016775920812506229
] |
{
"id": 10,
"code_window": [
"\tserverStream = streamNotification.Stream\n",
"\tcleanup = func() {\n",
"\t\tclose(streamNotification.Donec)\n",
"\t\tstopper.Stop(ctx)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\t// After the RPC is unblocked, we have to drain the client side in order\n",
"\t\t// to simulate what happens in production (where the watchdog goroutine\n",
"\t\t// of the outbox does this).\n",
"\t\tfor {\n",
"\t\t\t_, err := clientStream.Recv()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tif err == io.EOF {\n",
"\t\t\t\t\tbreak\n",
"\t\t\t\t}\n",
"\t\t\t\tt.Fatal(err)\n",
"\t\t\t}\n",
"\t\t}\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "add",
"edit_start_line_idx": 62
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import {
StatementInsightDetails,
StatementInsightDetailsStateProps,
StatementInsightDetailsDispatchProps,
} from "@cockroachlabs/cluster-ui";
import { connect } from "react-redux";
import { RouteComponentProps, withRouter } from "react-router-dom";
import { AdminUIState } from "src/redux/state";
import { refreshExecutionInsights } from "src/redux/apiReducers";
import { selectStatementInsightDetails } from "src/views/insights/insightsSelectors";
import { setGlobalTimeScaleAction } from "src/redux/statements";
const mapStateToProps = (
state: AdminUIState,
props: RouteComponentProps,
): StatementInsightDetailsStateProps => {
const insightStatements = selectStatementInsightDetails(state, props);
const insightError = state.cachedData?.executionInsights?.lastError;
return {
insightEventDetails: insightStatements,
insightError,
};
};
const mapDispatchToProps: StatementInsightDetailsDispatchProps = {
setTimeScale: setGlobalTimeScaleAction,
refreshStatementInsights: refreshExecutionInsights,
};
const StatementInsightDetailsPage = withRouter(
connect<
StatementInsightDetailsStateProps,
StatementInsightDetailsDispatchProps,
RouteComponentProps
>(
mapStateToProps,
mapDispatchToProps,
)(StatementInsightDetails),
);
export default StatementInsightDetailsPage;
| pkg/ui/workspaces/db-console/src/views/insights/statementInsightDetailsPage.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.0001766571804182604,
0.00017100210243370384,
0.00016591390885878354,
0.00017004404799081385,
0.0000038818484426883515
] |
{
"id": 10,
"code_window": [
"\tserverStream = streamNotification.Stream\n",
"\tcleanup = func() {\n",
"\t\tclose(streamNotification.Donec)\n",
"\t\tstopper.Stop(ctx)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\t// After the RPC is unblocked, we have to drain the client side in order\n",
"\t\t// to simulate what happens in production (where the watchdog goroutine\n",
"\t\t// of the outbox does this).\n",
"\t\tfor {\n",
"\t\t\t_, err := clientStream.Recv()\n",
"\t\t\tif err != nil {\n",
"\t\t\t\tif err == io.EOF {\n",
"\t\t\t\t\tbreak\n",
"\t\t\t\t}\n",
"\t\t\t\tt.Fatal(err)\n",
"\t\t\t}\n",
"\t\t}\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "add",
"edit_start_line_idx": 62
} | // Code generated by generate-staticcheck; DO NOT EDIT.
//go:build bazel
// +build bazel
package s1000
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/simple"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range simple.Analyzers {
if analyzer.Analyzer.Name == "S1000" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
}
| build/bazelutil/staticcheckanalyzers/s1000/analyzer.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017299249884672463,
0.00017064310668502003,
0.0001677976397331804,
0.0001711392105789855,
0.0000021496080080396496
] |
{
"id": 11,
"code_window": [
"\t\tstopper.Stop(ctx)\n",
"\t}\n",
"\treturn serverStream, clientStream, cleanup, nil\n",
"}"
],
"labels": [
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\treturn serverStream, clientStream, cleanup\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 64
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package flowinfra
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/stop"
)
// createDummyStream creates the server and client side of a FlowStream stream.
// This can be use by tests to pretend that then have received a FlowStream RPC.
// The stream can be used to send messages (ConsumerSignal's) on it (within a
// gRPC window limit since nobody's reading from the stream), for example
// Handshake messages.
//
// We do this by creating a mock server, dialing into it and capturing the
// server stream. The server-side RPC call will be blocked until the caller
// calls the returned cleanup function.
func createDummyStream() (
serverStream execinfrapb.DistSQL_FlowStreamServer,
clientStream execinfrapb.DistSQL_FlowStreamClient,
cleanup func(),
err error,
) {
stopper := stop.NewStopper()
ctx := context.Background()
clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */)
storageClusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(ctx, clock, stopper, execinfra.StaticSQLInstanceID)
if err != nil {
return nil, nil, nil, err
}
rpcContext := rpc.NewInsecureTestingContextWithClusterID(ctx, clock, stopper, storageClusterID)
conn, err := rpcContext.GRPCDialNode(addr.String(), roachpb.NodeID(execinfra.StaticSQLInstanceID),
rpc.DefaultClass).Connect(ctx)
if err != nil {
return nil, nil, nil, err
}
client := execinfrapb.NewDistSQLClient(conn)
clientStream, err = client.FlowStream(ctx)
if err != nil {
return nil, nil, nil, err
}
streamNotification := <-mockServer.InboundStreams
serverStream = streamNotification.Stream
cleanup = func() {
close(streamNotification.Donec)
stopper.Stop(ctx)
}
return serverStream, clientStream, cleanup, nil
}
| pkg/sql/flowinfra/utils_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.9670515060424805,
0.1388646811246872,
0.0001652878272579983,
0.000651236274279654,
0.3381064832210541
] |
{
"id": 11,
"code_window": [
"\t\tstopper.Stop(ctx)\n",
"\t}\n",
"\treturn serverStream, clientStream, cleanup, nil\n",
"}"
],
"labels": [
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\treturn serverStream, clientStream, cleanup\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 64
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package invertedidx_test
import (
"context"
"math"
"strconv"
"testing"
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/cockroachdb/cockroach/pkg/geo/geoindex"
"github.com/cockroachdb/cockroach/pkg/geo/geopb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/inverted"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/invertedidx"
"github.com/cockroachdb/cockroach/pkg/sql/opt/norm"
"github.com/cockroachdb/cockroach/pkg/sql/opt/testutils"
"github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testcat"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/stretchr/testify/require"
)
func TestTryJoinGeoIndex(t *testing.T) {
semaCtx := tree.MakeSemaContext()
st := cluster.MakeTestingClusterSettings()
evalCtx := eval.NewTestingEvalContext(st)
tc := testcat.New()
// Create the input table.
if _, err := tc.ExecuteDDL(
"CREATE TABLE t1 (geom1 GEOMETRY, geog1 GEOGRAPHY, geom11 GEOMETRY, geog11 GEOGRAPHY, " +
"inet1 INET, bbox1 box2d)",
); err != nil {
t.Fatal(err)
}
// Create the indexed table.
if _, err := tc.ExecuteDDL(
"CREATE TABLE t2 (geom2 GEOMETRY, geog2 GEOGRAPHY, inet2 INET, bbox2 box2d, " +
"INVERTED INDEX (geom2), INVERTED INDEX (geog2))",
); err != nil {
t.Fatal(err)
}
var f norm.Factory
f.Init(context.Background(), evalCtx, tc)
md := f.Metadata()
tn1 := tree.NewUnqualifiedTableName("t1")
tn2 := tree.NewUnqualifiedTableName("t2")
tab1 := md.AddTable(tc.Table(tn1), tn1)
tab2 := md.AddTable(tc.Table(tn2), tn2)
geomOrd, geogOrd := 1, 2
testCases := []struct {
filters string
indexOrd int
invertedExpr string
}{
{
filters: "st_covers(geom1, geom2)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2)",
},
{
filters: "st_covers(geom2, geom1)",
indexOrd: geomOrd,
invertedExpr: "st_coveredby(geom1, geom2)",
},
{
filters: "st_coveredby(geog1, geog2)",
indexOrd: geogOrd,
invertedExpr: "st_coveredby(geog1, geog2)",
},
{
filters: "st_coveredby(geog2, geog1)",
indexOrd: geogOrd,
invertedExpr: "st_covers(geog1, geog2)",
},
{
filters: "st_containsproperly(geom2, geom1)",
indexOrd: geomOrd,
invertedExpr: "st_coveredby(geom1, geom2)",
},
{
filters: "st_dwithin(geog2, geog1, 1)",
indexOrd: geogOrd,
invertedExpr: "st_dwithin(geog1, geog2, 1)",
},
{
filters: "st_dfullywithin(geom2, geom1, 1)",
indexOrd: geomOrd,
invertedExpr: "st_dfullywithin(geom1, geom2, 1)",
},
{
filters: "st_intersects(geom1, geom2)",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2)",
},
{
filters: "st_overlaps(geom2, geom1)",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2)",
},
{
// Wrong index ordinal.
filters: "st_covers(geom1, geom2)",
indexOrd: geogOrd,
invertedExpr: "",
},
{
// We can perform a join using two geospatial functions on the same
// indexed column, even if the input columns are different.
filters: "st_covers(geom1, geom2) AND st_covers(geom2, geom11)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2) AND st_coveredby(geom11, geom2)",
},
{
// We can perform a join using two geospatial functions on the same
// indexed column, even if the input columns are different.
filters: "st_covers(geog2, geog1) AND st_dwithin(geog11, geog2, 10)",
indexOrd: geogOrd,
invertedExpr: "st_coveredby(geog1, geog2) AND st_dwithin(geog11, geog2, 10)",
},
{
// We can perform a join using two geospatial functions on the same
// indexed column, even if the input columns are different.
filters: "st_covers(geom1, geom2) OR st_covers(geom2, geom11)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2) OR st_coveredby(geom11, geom2)",
},
{
// When functions affecting two different geospatial variables are OR-ed,
// we cannot perform an inverted join.
filters: "st_covers(geom1, geom2) OR st_covers(geog1, geog2)",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// We can constrain either index when the functions are AND-ed.
filters: "st_covers(geom1, geom2) AND st_covers(geog1, geog2)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2)",
},
{
// We can constrain either index when the functions are AND-ed.
filters: "st_covers(geom1, geom2) AND st_covers(geog1, geog2)",
indexOrd: geogOrd,
invertedExpr: "st_covers(geog1, geog2)",
},
{
// Join conditions can be combined with index constraints.
filters: "st_covers(geom1, geom2) AND " +
"st_covers(geom2, 'LINESTRING ( 0 0, 0 2 )'::geometry)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2) AND " +
"st_coveredby('LINESTRING ( 0 0, 0 2 )'::geometry, geom2)",
},
{
// Join conditions can be combined with index constraints.
filters: "st_covers(geom1, geom2) AND " +
"st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom2) AND " +
"st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom1)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2) AND " +
"st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom2)",
},
{
// At least one column from the input is required.
filters: "st_covers(geom2, 'LINESTRING ( 0 0, 0 2 )'::geometry)",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// AND with a non-geospatial function.
filters: "st_covers(geom1, geom2) AND inet_same_family(inet1, inet2)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2)",
},
{
// OR with a non-geospatial function.
filters: "st_covers(geom1, geom2) OR inet_same_family(inet1, inet2)",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// Arbitrarily complex join condition.
filters: "st_covers(geog2, geog1) OR (" +
"st_dwithin(geog11, geog2, 100) AND st_covers(geom1, geom2) AND " +
"st_covers(geog2, 'SRID=4326;POINT(-40.23456 70.456772)'::geography)) AND " +
"st_overlaps(geom2, geom1) AND " +
"st_covers('SRID=4326;POINT(-42.89456 75.938299)'::geography, geog2)",
indexOrd: geogOrd,
invertedExpr: "st_coveredby(geog1, geog2) OR (" +
"st_dwithin(geog11, geog2, 100) AND " +
"st_coveredby('SRID=4326;POINT(-40.23456 70.456772)'::geography, geog2)) AND " +
"st_covers('SRID=4326;POINT(-42.89456 75.938299)'::geography, geog2)",
},
// Bounding box operators.
{
filters: "bbox1 ~ geom2",
indexOrd: geomOrd,
invertedExpr: "st_covers(bbox1::geometry, geom2)",
},
{
filters: "geom2 ~ bbox1",
indexOrd: geomOrd,
invertedExpr: "st_coveredby(bbox1::geometry, geom2)",
},
{
filters: "geom1 ~ geom2",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2)",
},
{
filters: "geom2 ~ geom1",
indexOrd: geomOrd,
invertedExpr: "st_coveredby(geom1, geom2)",
},
{
filters: "bbox1 && geom2",
indexOrd: geomOrd,
invertedExpr: "st_intersects(bbox1::geometry, geom2)",
},
{
filters: "geom2 && bbox1",
indexOrd: geomOrd,
invertedExpr: "st_intersects(bbox1::geometry, geom2)",
},
{
filters: "geom1 && geom2",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2)",
},
{
filters: "geom2 && geom1",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2)",
},
{
filters: "geom2 && geom1 AND 'BOX(1 2, 3 4)'::box2d ~ geom2",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2) AND " +
"st_covers('BOX(1 2, 3 4)'::box2d::geometry, geom2)",
},
{
// Wrong index ordinal.
filters: "bbox1 ~ geom2",
indexOrd: geogOrd,
invertedExpr: "",
},
{
// At least one column from the input is required.
filters: "bbox2 ~ geom2",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// At least one column from the input is required.
filters: "'BOX(1 2, 3 4)'::box2d ~ geom2",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// Wrong types.
filters: "geom1::string ~ geom2::string",
indexOrd: geomOrd,
invertedExpr: "",
},
}
for _, tc := range testCases {
t.Logf("test case: %v", tc)
filters := testutils.BuildFilters(t, &f, &semaCtx, evalCtx, tc.filters)
var inputCols opt.ColSet
for i, n := 0, md.Table(tab1).ColumnCount(); i < n; i++ {
inputCols.Add(tab1.ColumnID(i))
}
actInvertedExpr := invertedidx.TryJoinInvertedIndex(
context.Background(), &f, filters, tab2, md.Table(tab2).Index(tc.indexOrd), inputCols,
)
if actInvertedExpr == nil {
if tc.invertedExpr != "" {
t.Fatalf("expected %s, got <nil>", tc.invertedExpr)
}
continue
}
if tc.invertedExpr == "" {
t.Fatalf("expected <nil>, got %v", actInvertedExpr)
}
expInvertedExpr := testutils.BuildScalar(t, &f, &semaCtx, evalCtx, tc.invertedExpr)
if actInvertedExpr.String() != expInvertedExpr.String() {
t.Errorf("expected %v, got %v", expInvertedExpr, actInvertedExpr)
}
}
}
func TestTryFilterGeoIndex(t *testing.T) {
semaCtx := tree.MakeSemaContext()
st := cluster.MakeTestingClusterSettings()
evalCtx := eval.NewTestingEvalContext(st)
tc := testcat.New()
if _, err := tc.ExecuteDDL(
"CREATE TABLE t (geom GEOMETRY, geog GEOGRAPHY, INVERTED INDEX (geom), INVERTED INDEX (geog))",
); err != nil {
t.Fatal(err)
}
var f norm.Factory
f.Init(context.Background(), evalCtx, tc)
md := f.Metadata()
tn := tree.NewUnqualifiedTableName("t")
tab := md.AddTable(tc.Table(tn), tn)
geomOrd, geogOrd := 1, 2
testCases := []struct {
filters string
indexOrd int
ok bool
preFilterExpr string
preFilterCol opt.ColumnID
preFilterTypeFamily types.Family
}{
{
filters: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
// Still works with arguments commuted.
filters: "st_intersects(geom, 'LINESTRING ( 0 0, 0 2 )'::geometry)",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "st_covers('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
indexOrd: geogOrd,
ok: true,
preFilterExpr: "st_covers('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
preFilterCol: 2,
preFilterTypeFamily: types.GeographyFamily,
},
{
// Still works with arguments commuted.
filters: "st_covers(geog, 'SRID=4326;POINT(-40.23456 70.4567772)'::geography)",
indexOrd: geogOrd,
ok: true,
preFilterExpr: "st_coveredby('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
preFilterCol: 2,
preFilterTypeFamily: types.GeographyFamily,
},
{
// Wrong index ordinal.
filters: "st_covers('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
indexOrd: geomOrd,
ok: false,
},
{
// Wrong index ordinal.
filters: "st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
indexOrd: geogOrd,
ok: false,
},
{
// When functions affecting two different geospatial variables are OR-ed,
// we cannot constrain either index.
filters: "st_equals('LINESTRING ( 0 0, 0 2 )'::geometry, geom) OR " +
"st_coveredby(geog, 'SRID=4326;POINT(-40.23456 70.4567772)'::geography)",
indexOrd: geomOrd,
ok: false,
},
{
// We can constrain either index when the functions are AND-ed.
filters: "st_equals('LINESTRING ( 0 0, 0 2 )'::geometry, geom) AND " +
"st_coveredby(geog, 'SRID=4326;POINT(-40.23456 70.4567772)'::geography)",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_equals('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
// We can constrain either index when the functions are AND-ed.
filters: "st_equals('LINESTRING ( 0 0, 0 2 )'::geometry, geom) AND " +
"st_coveredby(geog, 'SRID=4326;POINT(-40.23456 70.4567772)'::geography)",
indexOrd: geogOrd,
ok: true,
preFilterExpr: "st_covers('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
preFilterCol: 2,
preFilterTypeFamily: types.GeographyFamily,
},
// Bounding box operators.
{
filters: "'BOX(1 2, 3 4)'::box2d ~ geom",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_covers('POLYGON (( 1 2, 1 4, 3 4, 3 2, 1 2))'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "geom ~ 'BOX(1 2, 3 4)'::box2d",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_coveredby('POLYGON (( 1 2, 1 4, 3 4, 3 2, 1 2))'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "'LINESTRING ( 0 0, 0 2 )'::geometry ~ geom",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "geom ~ 'LINESTRING ( 0 0, 0 2 )'::geometry",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_coveredby('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "'BOX(1 2, 3 4)'::box2d && geom",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('POLYGON (( 1 2, 1 4, 3 4, 3 2, 1 2))'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "geom && 'BOX(1 2, 3 4)'::box2d",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('POLYGON (( 1 2, 1 4, 3 4, 3 2, 1 2))'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "'LINESTRING ( 0 0, 0 2 )'::geometry && geom",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "geom && 'LINESTRING ( 0 0, 0 2 )'::geometry",
indexOrd: geomOrd,
preFilterExpr: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
ok: true,
},
{
// Wrong index ordinal.
filters: "'BOX(1 2, 3 4)'::box2d ~ geom",
indexOrd: geogOrd,
ok: false,
},
}
for _, tc := range testCases {
t.Logf("test case: %v", tc)
filters := testutils.BuildFilters(t, &f, &semaCtx, evalCtx, tc.filters)
// We're not testing that the correct SpanExpression is returned here;
// that is tested elsewhere. This is just testing that we are constraining
// the index when we expect to.
spanExpr, _, remainingFilters, pfState, ok := invertedidx.TryFilterInvertedIndex(
context.Background(),
evalCtx,
&f,
filters,
nil, /* optionalFilters */
tab,
md.Table(tab).Index(tc.indexOrd),
nil, /* computedColumns */
)
if tc.ok != ok {
t.Fatalf("expected %v, got %v", tc.ok, ok)
}
if ok {
if spanExpr.Unique {
t.Fatalf("span expressions for geospatial indexes should never have Unique=true")
}
if spanExpr.Tight {
t.Fatalf("span expressions for geospatial indexes should never have Tight=true")
}
if remainingFilters.String() != filters.String() {
t.Errorf("expected remainingFilters=%v, got %v", filters, remainingFilters)
}
if len(tc.preFilterExpr) == 0 {
require.Nil(t, pfState)
} else {
require.NotNil(t, pfState)
pfExpr := testutils.BuildScalar(t, &f, &semaCtx, evalCtx, tc.preFilterExpr)
require.Equal(t, pfExpr.String(), pfState.Expr.String())
require.Equal(t, tc.preFilterCol, pfState.Col)
require.Equal(t, tc.preFilterTypeFamily, pfState.Typ.Family())
}
}
}
}
func TestPreFilterer(t *testing.T) {
// Test cases do pre-filtering for (geoShapes[i], geoShapes[j]) for all i,
// j.
geoShapes := []string{
"SRID=4326;POINT(0 0)",
"SRID=4326;POINT(5 5)",
"SRID=4326;LINESTRING(8 8, 9 9)",
"SRID=4326;POLYGON((0 0, 5 0, 5 5, 0 5, 0 0))",
}
testCases := []struct {
// The typ, relationship and relationshipParams determine how the
// PreFilterer works.
typ *types.T
relationship geoindex.RelationshipType
relationshipParams []tree.Datum
shapes []string
expected [][]bool
// excludeFromPreFilters excludes shapes at the given indexes from being
// used in Bind calls.
excludeFromPreFilters []int
}{
{
typ: types.Geometry,
relationship: geoindex.Intersects,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geometry,
relationship: geoindex.Covers,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{false, false, false, true},
},
},
{
typ: types.Geometry,
relationship: geoindex.CoveredBy,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, false},
{false, true, false, false},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geometry,
relationship: geoindex.DWithin,
relationshipParams: []tree.Datum{tree.NewDFloat(3)},
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, true, true},
{false, true, true, true},
{true, true, true, true},
},
},
{
typ: types.Geometry,
relationship: geoindex.DFullyWithin,
relationshipParams: []tree.Datum{tree.NewDFloat(3)},
shapes: geoShapes,
expected: [][]bool{
{true, false, false, false},
{false, true, false, false},
{false, true, true, false},
{true, true, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.Intersects,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.Covers,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{false, false, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.CoveredBy,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, false},
{false, true, false, false},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.DWithin,
relationshipParams: []tree.Datum{tree.NewDFloat(3)},
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.DWithin,
relationshipParams: []tree.Datum{tree.NewDFloat(3)},
shapes: geoShapes,
excludeFromPreFilters: []int{2},
expected: [][]bool{
{true, false, true},
{false, true, true},
{false, false, false},
{true, true, true},
},
},
}
encodeInv := func(bbox geopb.BoundingBox) inverted.EncVal {
var b []byte
b = encoding.EncodeGeoInvertedAscending(b)
// Arbitrary cellid
b = encoding.EncodeUvarintAscending(b, math.MaxUint32)
b = encoding.EncodeGeoInvertedBBox(b, bbox.LoX, bbox.LoY, bbox.HiX, bbox.HiY)
return b
}
for i, tc := range testCases {
t.Run(strconv.Itoa(i+1), func(t *testing.T) {
filterer := invertedidx.NewPreFilterer(tc.typ, tc.relationship, tc.relationshipParams)
var toBind []tree.Datum
var toPreFilter []inverted.EncVal
includeBind := func(index int) bool {
for _, exclude := range tc.excludeFromPreFilters {
if exclude == index {
return false
}
}
return true
}
for i, shape := range tc.shapes {
switch tc.typ {
case types.Geometry:
g, err := geo.ParseGeometry(shape)
require.NoError(t, err)
if includeBind(i) {
toBind = append(toBind, tree.NewDGeometry(g))
}
toPreFilter = append(toPreFilter, encodeInv(*g.BoundingBoxRef()))
case types.Geography:
g, err := geo.ParseGeography(shape)
require.NoError(t, err)
if includeBind(i) {
toBind = append(toBind, tree.NewDGeography(g))
}
rect := g.BoundingRect()
toPreFilter = append(toPreFilter,
encodeInv(geopb.BoundingBox{
LoX: rect.Lng.Lo,
HiX: rect.Lng.Hi,
LoY: rect.Lat.Lo,
HiY: rect.Lat.Hi,
}))
}
}
var preFilterState []interface{}
for _, d := range toBind {
preFilterState = append(preFilterState, filterer.Bind(d))
}
result := make([]bool, len(preFilterState))
for i, enc := range toPreFilter {
res, err := filterer.PreFilter(enc, preFilterState, result)
require.NoError(t, err)
expectedRes := false
for _, b := range result {
expectedRes = expectedRes || b
}
require.Equal(t, expectedRes, res)
require.Equal(t, tc.expected[i], result)
}
})
}
}
// TODO(sumeer): test for NewGeoDatumsToInvertedExpr, geoDatumsToInvertedExpr.
| pkg/sql/opt/invertedidx/geo_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00019071184215135872,
0.00017042223771568388,
0.00016482167120557278,
0.00016982282977551222,
0.0000036876190279144794
] |
{
"id": 11,
"code_window": [
"\t\tstopper.Stop(ctx)\n",
"\t}\n",
"\treturn serverStream, clientStream, cleanup, nil\n",
"}"
],
"labels": [
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\treturn serverStream, clientStream, cleanup\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 64
} | echo
----
db0.Put(ctx, tk(1), sv(1)) // @0.000000001,0 <nil>
db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
txn.Put(ctx, tk(2), sv(2)) // <nil>
txn.DelRange(ctx, tk(1), tk(3), true /* @s3 */) // (/Table/100/"0000000000000001", /Table/100/"0000000000000002", <nil>)
return nil
}) // @0.000000002,0 <nil>
/Table/100/"0000000000000001"/0.000000001,0 @ s1 v1
/Table/100/"0000000000000001"/0.000000002,0 @ s3 <nil>
/Table/100/"0000000000000002"/0.000000003,0 @ s3 <nil>
committed txn non-atomic timestamps: [w]/Table/100/"0000000000000002":missing->v2@s2 [dr.d]/Table/100/"0000000000000001":0.000000002,0-><nil>@s3 [dr.d]/Table/100/"0000000000000002":0.000000003,0-><nil>@s3 [dr.s]/Table/100/"000000000000000{1"-3"}:{gap:[<min>, 0.000000001,0),[0.000000003,0, <max>)}->[]
| pkg/kv/kvnemesis/testdata/TestValidate/one_transactional_put_shadowed_by_deleterange_after_writes_with_write_timestamp_disagreement | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00017130246851593256,
0.00016878033056855202,
0.00016625819262117147,
0.00016878033056855202,
0.0000025221379473805428
] |
{
"id": 11,
"code_window": [
"\t\tstopper.Stop(ctx)\n",
"\t}\n",
"\treturn serverStream, clientStream, cleanup, nil\n",
"}"
],
"labels": [
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\treturn serverStream, clientStream, cleanup\n"
],
"file_path": "pkg/sql/flowinfra/utils_test.go",
"type": "replace",
"edit_start_line_idx": 64
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package poison
import (
"fmt"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/errors"
)
// NewPoisonedError instantiates a *PoisonedError referencing a poisoned latch
// (as identified by span and timestamp).
func NewPoisonedError(span roachpb.Span, ts hlc.Timestamp) *PoisonedError {
return &PoisonedError{Span: span, Timestamp: ts}
}
var _ errors.SafeFormatter = (*PoisonedError)(nil)
var _ fmt.Formatter = (*PoisonedError)(nil)
// SafeFormatError implements errors.SafeFormatter.
func (e *PoisonedError) SafeFormatError(p errors.Printer) error {
p.Printf("encountered poisoned latch %s@%s", e.Span, e.Timestamp)
return nil
}
// Format implements fmt.Formatter.
func (e *PoisonedError) Format(s fmt.State, verb rune) { errors.FormatError(e, s, verb) }
// Error implements error.
func (e *PoisonedError) Error() string {
return fmt.Sprint(e)
}
| pkg/kv/kvserver/concurrency/poison/error.go | 0 | https://github.com/cockroachdb/cockroach/commit/b2e5a1172377ea612528a7d0bdd7841228d707c0 | [
0.00026234835968352854,
0.00019930629059672356,
0.00017034407937899232,
0.00017555415979586542,
0.000035966208088211715
] |
{
"id": 0,
"code_window": [
"func dataSourceAwsAvailabilityZones() *schema.Resource {\n",
"\treturn &schema.Resource{\n",
"\t\tRead: dataSourceAwsAvailabilityZonesRead,\n",
"\n",
"\t\tSchema: map[string]*schema.Schema{\n",
"\t\t\t\"instance\": &schema.Schema{\n",
"\t\t\t\tType: schema.TypeList,\n",
"\t\t\t\tComputed: true,\n",
"\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n",
"\t\t\t},\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"names\": &schema.Schema{\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones.go",
"type": "replace",
"edit_start_line_idx": 18
} | package aws
import (
"fmt"
"reflect"
"sort"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSAvailabilityZones_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckAwsAvailabilityZonesConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsAvailabilityZonesMeta("data.aws_availability_zones.availability_zones"),
),
},
},
})
}
func testAccCheckAwsAvailabilityZonesMeta(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Can't find AZ resource: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("AZ resource ID not set")
}
actual, err := testAccCheckAwsAvailabilityZonesBuildAvailable(rs.Primary.Attributes)
if err != nil {
return err
}
expected := actual
sort.Strings(expected)
if reflect.DeepEqual(expected, actual) != true {
return fmt.Errorf("AZs not sorted - expected %v, got %v", expected, actual)
}
return nil
}
}
func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) {
v, ok := attrs["instance.#"]
if !ok {
return nil, fmt.Errorf("Available AZ list is missing")
}
qty, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
if qty < 1 {
return nil, fmt.Errorf("No AZs found in region, this is probably a bug.")
}
zones := make([]string, qty)
for n := range zones {
zone, ok := attrs["instance."+strconv.Itoa(n)]
if !ok {
return nil, fmt.Errorf("AZ list corrupt, this is definitely a bug")
}
zones[n] = zone
}
return zones, nil
}
const testAccCheckAwsAvailabilityZonesConfig = `
data "aws_availability_zones" "availability_zones" {
}
`
| builtin/providers/aws/data_source_availability_zones_test.go | 1 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.2508305609226227,
0.03270432725548744,
0.00016518132179044187,
0.0009071071399375796,
0.07760536670684814
] |
{
"id": 0,
"code_window": [
"func dataSourceAwsAvailabilityZones() *schema.Resource {\n",
"\treturn &schema.Resource{\n",
"\t\tRead: dataSourceAwsAvailabilityZonesRead,\n",
"\n",
"\t\tSchema: map[string]*schema.Schema{\n",
"\t\t\t\"instance\": &schema.Schema{\n",
"\t\t\t\tType: schema.TypeList,\n",
"\t\t\t\tComputed: true,\n",
"\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n",
"\t\t\t},\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"names\": &schema.Schema{\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones.go",
"type": "replace",
"edit_start_line_idx": 18
} | ---
layout: "aws"
page_title: "AWS: aws_elasticache_security_group"
sidebar_current: "docs-aws-resource-elasticache-security-group"
description: |-
Provides an ElastiCache Security Group to control access to one or more cache clusters.
---
# aws\_elasticache\_security\_<wbr>group
Provides an ElastiCache Security Group to control access to one or more cache
clusters.
~> **NOTE:** ElastiCache Security Groups are for use only when working with an
ElastiCache cluster **outside** of a VPC. If you are using a VPC, see the
[ElastiCache Subnet Group resource](elasticache_subnet_group.html).
## Example Usage
```
resource "aws_security_group" "bar" {
name = "security-group"
}
resource "aws_elasticache_security_group" "bar" {
name = "elasticache-security-group"
security_group_names = ["${aws_security_group.bar.name}"]
}
```
## Argument Reference
The following arguments are supported:
* `name` – (Required) Name for the cache security group. This value is stored as a lowercase string.
* `description` – (Optional) description for the cache security group. Defaults to "Managed by Terraform".
* `security_group_names` – (Required) List of EC2 security group names to be
authorized for ingress to the cache security group
## Attributes Reference
The following attributes are exported:
* `description`
* `name`
* `security_group_names`
| website/source/docs/providers/aws/r/elasticache_security_group.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00016793272516224533,
0.00016516089090146124,
0.0001598603994352743,
0.00016630259051453322,
0.0000027797671009466285
] |
{
"id": 0,
"code_window": [
"func dataSourceAwsAvailabilityZones() *schema.Resource {\n",
"\treturn &schema.Resource{\n",
"\t\tRead: dataSourceAwsAvailabilityZonesRead,\n",
"\n",
"\t\tSchema: map[string]*schema.Schema{\n",
"\t\t\t\"instance\": &schema.Schema{\n",
"\t\t\t\tType: schema.TypeList,\n",
"\t\t\t\tComputed: true,\n",
"\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n",
"\t\t\t},\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"names\": &schema.Schema{\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones.go",
"type": "replace",
"edit_start_line_idx": 18
} | #!/usr/bin/env bash
export OS_AUTH_URL=http://KEYSTONE.ENDPOINT.URL:5000/v2.0
export OS_TENANT_NAME=YOUR_TENANT_NAME
export OS_USERNAME=YOUR_USERNAME
export OS_PASSWORD=YOUR_PASSWORD
export OS_REGION_NAME=YOUR_REGION_NAME
| examples/openstack-with-networking/openrc.sample | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00016927656542975456,
0.00016927656542975456,
0.00016927656542975456,
0.00016927656542975456,
0
] |
{
"id": 0,
"code_window": [
"func dataSourceAwsAvailabilityZones() *schema.Resource {\n",
"\treturn &schema.Resource{\n",
"\t\tRead: dataSourceAwsAvailabilityZonesRead,\n",
"\n",
"\t\tSchema: map[string]*schema.Schema{\n",
"\t\t\t\"instance\": &schema.Schema{\n",
"\t\t\t\tType: schema.TypeList,\n",
"\t\t\t\tComputed: true,\n",
"\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n",
"\t\t\t},\n",
"\t\t},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"names\": &schema.Schema{\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones.go",
"type": "replace",
"edit_start_line_idx": 18
} | package godo
import "fmt"
// ArgError is an error that represents an error with an input to godo. It
// identifies the argument and the cause (if possible).
type ArgError struct {
arg string
reason string
}
var _ error = &ArgError{}
// NewArgError creates an InputError.
func NewArgError(arg, reason string) *ArgError {
return &ArgError{
arg: arg,
reason: reason,
}
}
func (e *ArgError) Error() string {
return fmt.Sprintf("%s is invalid because %s", e.arg, e.reason)
}
| vendor/github.com/digitalocean/godo/errors.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.0007114852778613567,
0.00041108866571448743,
0.0002570573997218162,
0.00026472326135262847,
0.00021243553783278912
] |
{
"id": 1,
"code_window": [
"\n",
"\tsort.Strings(raw)\n",
"\n",
"\tif err := d.Set(\"instance\", raw); err != nil {\n",
"\t\treturn fmt.Errorf(\"[WARN] Error setting availability zones\")\n",
"\t}\n",
"\n",
"\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := d.Set(\"names\", raw); err != nil {\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones.go",
"type": "replace",
"edit_start_line_idx": 46
} | package aws
import (
"fmt"
"log"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAvailabilityZones() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAvailabilityZonesRead,
Schema: map[string]*schema.Schema{
"instance": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
log.Printf("[DEBUG] Reading availability zones")
d.SetId(time.Now().UTC().String())
req := &ec2.DescribeAvailabilityZonesInput{DryRun: aws.Bool(false)}
azresp, err := conn.DescribeAvailabilityZones(req)
if err != nil {
return fmt.Errorf("Error listing availability zones: %s", err)
}
raw := make([]string, len(azresp.AvailabilityZones))
for i, v := range azresp.AvailabilityZones {
raw[i] = *v.ZoneName
}
sort.Strings(raw)
if err := d.Set("instance", raw); err != nil {
return fmt.Errorf("[WARN] Error setting availability zones")
}
return nil
}
| builtin/providers/aws/data_source_availability_zones.go | 1 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.9982463121414185,
0.16703732311725616,
0.00016576905909460038,
0.0004651919298339635,
0.3717290461063385
] |
{
"id": 1,
"code_window": [
"\n",
"\tsort.Strings(raw)\n",
"\n",
"\tif err := d.Set(\"instance\", raw); err != nil {\n",
"\t\treturn fmt.Errorf(\"[WARN] Error setting availability zones\")\n",
"\t}\n",
"\n",
"\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := d.Set(\"names\", raw); err != nil {\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones.go",
"type": "replace",
"edit_start_line_idx": 46
} | provider "aws" {}
resource "aws_instance" "foo" {}
| terraform/test-fixtures/validate-module-pc-inherit/child/main.tf | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.0001690605713520199,
0.0001690605713520199,
0.0001690605713520199,
0.0001690605713520199,
0
] |
{
"id": 1,
"code_window": [
"\n",
"\tsort.Strings(raw)\n",
"\n",
"\tif err := d.Set(\"instance\", raw); err != nil {\n",
"\t\treturn fmt.Errorf(\"[WARN] Error setting availability zones\")\n",
"\t}\n",
"\n",
"\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := d.Set(\"names\", raw); err != nil {\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones.go",
"type": "replace",
"edit_start_line_idx": 46
} | /*
Copyright (c) 2015 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
type DistributedVirtualSwitch struct {
Common
InventoryPath string
}
func NewDistributedVirtualSwitch(c *vim25.Client, ref types.ManagedObjectReference) *DistributedVirtualSwitch {
return &DistributedVirtualSwitch{
Common: NewCommon(c, ref),
}
}
func (s DistributedVirtualSwitch) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) {
return nil, ErrNotSupported // TODO: just to satisfy NetworkReference interface for the finder
}
func (s DistributedVirtualSwitch) Reconfigure(ctx context.Context, spec types.BaseDVSConfigSpec) (*Task, error) {
req := types.ReconfigureDvs_Task{
This: s.Reference(),
Spec: spec,
}
res, err := methods.ReconfigureDvs_Task(ctx, s.Client(), &req)
if err != nil {
return nil, err
}
return NewTask(s.Client(), res.Returnval), nil
}
func (s DistributedVirtualSwitch) AddPortgroup(ctx context.Context, spec []types.DVPortgroupConfigSpec) (*Task, error) {
req := types.AddDVPortgroup_Task{
This: s.Reference(),
Spec: spec,
}
res, err := methods.AddDVPortgroup_Task(ctx, s.Client(), &req)
if err != nil {
return nil, err
}
return NewTask(s.Client(), res.Returnval), nil
}
| vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017806098912842572,
0.00017001830565277487,
0.0001650591439101845,
0.00016808186774142087,
0.000004787579655385343
] |
{
"id": 1,
"code_window": [
"\n",
"\tsort.Strings(raw)\n",
"\n",
"\tif err := d.Set(\"instance\", raw); err != nil {\n",
"\t\treturn fmt.Errorf(\"[WARN] Error setting availability zones\")\n",
"\t}\n",
"\n",
"\treturn nil\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := d.Set(\"names\", raw); err != nil {\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones.go",
"type": "replace",
"edit_start_line_idx": 46
} | package dns
import "github.com/jen20/riviera/azure"
type GetNSRecordSetResponse struct {
ID string `mapstructure:"id"`
Name string `mapstructure:"name"`
Location string `mapstructure:"location"`
Tags map[string]*string `mapstructure:"tags"`
TTL *int `mapstructure:"TTL"`
NSRecords []NSRecord `mapstructure:"NSRecords"`
}
type GetNSRecordSet struct {
Name string `json:"-"`
ResourceGroupName string `json:"-"`
ZoneName string `json:"-"`
}
func (command GetNSRecordSet) APIInfo() azure.APIInfo {
return azure.APIInfo{
APIVersion: apiVersion,
Method: "GET",
URLPathFunc: dnsRecordSetDefaultURLPathFunc(command.ResourceGroupName, command.ZoneName, "NS", command.Name),
ResponseTypeFunc: func() interface{} {
return &GetNSRecordSetResponse{}
},
}
}
| vendor/github.com/jen20/riviera/dns/get_dns_ns_recordset.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.0002006709692068398,
0.00018641905626282096,
0.00016530689026694745,
0.00019327930931467563,
0.000015230491953843739
] |
{
"id": 2,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) {\n",
"\tv, ok := attrs[\"instance.#\"]\n",
"\tif !ok {\n",
"\t\treturn nil, fmt.Errorf(\"Available AZ list is missing\")\n",
"\t}\n",
"\tqty, err := strconv.Atoi(v)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tv, ok := attrs[\"names.#\"]\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones_test.go",
"type": "replace",
"edit_start_line_idx": 54
} | ---
layout: "aws"
page_title: "AWS: aws_availability_zones"
sidebar_current: "docs-aws-datasource-availability-zones"
description: |-
Provides a list of availability zones which can be used by an AWS account
---
# aws\_availability\_zones
The Availability Zones data source allows access to the list of AWS
Availability Zones which can be accessed by an AWS account within the region
configured in the provider.
## Example Usage
```
# Declare the data source
data "aws_availability_zones" "zones" {}
# Create a subnet in each availability zone
resource "aws_subnet" "public" {
count = "${length(data.aws_availability_zones.zones.instance)}"
availability_zone = "${data.aws_availability_zones.zones.instance[count.index]}"
# Other properties...
}
```
## Argument Reference
There are no arguments for this data source.
## Attributes Reference
The following attributes are exported:
* `instance` - A list of the availability zone names available to the account.
| website/source/docs/providers/aws/d/availability_zones.html.markdown | 1 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00033484536106698215,
0.0002135398972313851,
0.00016333305393345654,
0.00017799061606638134,
0.0000702928300597705
] |
{
"id": 2,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) {\n",
"\tv, ok := attrs[\"instance.#\"]\n",
"\tif !ok {\n",
"\t\treturn nil, fmt.Errorf(\"Available AZ list is missing\")\n",
"\t}\n",
"\tqty, err := strconv.Atoi(v)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tv, ok := attrs[\"names.#\"]\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones_test.go",
"type": "replace",
"edit_start_line_idx": 54
} | ---
layout: "aws"
page_title: "AWS: aws_opsworks_stack"
sidebar_current: "docs-aws-resource-opsworks-stack"
description: |-
Provides an OpsWorks stack resource.
---
# aws\_opsworks\_stack
Provides an OpsWorks stack resource.
## Example Usage
```
resource "aws_opsworks_stack" "main" {
name = "awesome-stack"
region = "us-west-1"
service_role_arn = "${aws_iam_role.opsworks.arn}"
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks.arn}"
custom_json = <<EOT
{
"foobar": {
"version": "1.0.0"
}
}
EOT
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) The name of the stack.
* `region` - (Required) The name of the region where the stack will exist.
* `service_role_arn` - (Required) The ARN of an IAM role that the OpsWorks service will act as.
* `default_instance_profile_arn` - (Required) The ARN of an IAM Instance Profile that created instances
will have by default.
* `agent_version` - (Optional) If set to `"LATEST"`, OpsWorks will automatically install the latest version.
* `berkshelf_version` - (Optional) If `manage_berkshelf` is enabled, the version of Berkshelf to use.
* `color` - (Optional) Color to paint next to the stack's resources in the OpsWorks console.
* `default_availability_zone` - (Optional) Name of the availability zone where instances will be created
by default. This is required unless you set `vpc_id`.
* `configuration_manager_name` - (Optional) Name of the configuration manager to use. Defaults to "Chef".
* `configuration_manager_version` - (Optional) Version of the configuratino manager to use. Defaults to "11.4".
* `custom_cookbooks_source` - (Optional) When `use_custom_cookbooks` is set, provide this sub-object as
described below.
* `custom_json` - (Optional) User defined JSON passed to "Chef". Use a "here doc" for multiline JSON.
* `default_os` - (Optional) Name of OS that will be installed on instances by default.
* `default_root_device_type` - (Optional) Name of the type of root device instances will have by default.
* `default_ssh_key_name` - (Optional) Name of the SSH keypair that instances will have by default.
* `default_subnet_id` - (Optional) Id of the subnet in which instances will be created by default. Mandatory
if `vpc_id` is set, and forbidden if it isn't.
* `hostname_theme` - (Optional) Keyword representing the naming scheme that will be used for instance hostnames
within this stack.
* `manage_berkshelf` - (Optional) Boolean value controlling whether Opsworks will run Berkshelf for this stack.
* `use_custom_cookbooks` - (Optional) Boolean value controlling whether the custom cookbook settings are
enabled.
* `use_opsworks_security_groups` - (Optional) Boolean value controlling whether the standard OpsWorks
security groups apply to created instances.
* `vpc_id` - (Optional) The id of the VPC that this stack belongs to.
* `custom_json` - (Optional) Custom JSON attributes to apply to the entire stack.
The `custom_cookbooks_source` block supports the following arguments:
* `type` - (Required) The type of source to use. For example, "archive".
* `url` - (Required) The URL where the cookbooks resource can be found.
* `username` - (Optional) Username to use when authenticating to the source.
* `password` - (Optional) Password to use when authenticating to the source.
* `ssh_key` - (Optional) SSH key to use when authenticating to the source.
* `revision` - (Optional) For sources that are version-aware, the revision to use.
## Attributes Reference
The following attributes are exported:
* `id` - The id of the stack.
| website/source/docs/providers/aws/r/opsworks_stack.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017833862511906773,
0.00016794427938293666,
0.00016186590073630214,
0.00016693861107341945,
0.000004500617706071353
] |
{
"id": 2,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) {\n",
"\tv, ok := attrs[\"instance.#\"]\n",
"\tif !ok {\n",
"\t\treturn nil, fmt.Errorf(\"Available AZ list is missing\")\n",
"\t}\n",
"\tqty, err := strconv.Atoi(v)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tv, ok := attrs[\"names.#\"]\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones_test.go",
"type": "replace",
"edit_start_line_idx": 54
} | variable "dnsimple_domain" {
description = "The domain we are creating a record for."
}
| examples/cross-provider/variables.tf | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.0001741981686791405,
0.0001741981686791405,
0.0001741981686791405,
0.0001741981686791405,
0
] |
{
"id": 2,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) {\n",
"\tv, ok := attrs[\"instance.#\"]\n",
"\tif !ok {\n",
"\t\treturn nil, fmt.Errorf(\"Available AZ list is missing\")\n",
"\t}\n",
"\tqty, err := strconv.Atoi(v)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tv, ok := attrs[\"names.#\"]\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones_test.go",
"type": "replace",
"edit_start_line_idx": 54
} | package aws
import (
"encoding/json"
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayIntegration() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayIntegrationCreate,
Read: resourceAwsApiGatewayIntegrationRead,
Update: resourceAwsApiGatewayIntegrationUpdate,
Delete: resourceAwsApiGatewayIntegrationDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"resource_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"http_method": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateHTTPMethod,
},
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if value != "MOCK" && value != "AWS" && value != "HTTP" {
errors = append(errors, fmt.Errorf(
"%q must be one of 'AWS', 'MOCK', 'HTTP'", k))
}
return
},
},
"uri": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"credentials": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"integration_http_method": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateHTTPMethod,
},
"request_templates": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Elem: schema.TypeString,
},
"request_parameters_in_json": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
}
}
func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
var integrationHttpMethod *string
if v, ok := d.GetOk("integration_http_method"); ok {
integrationHttpMethod = aws.String(v.(string))
}
var uri *string
if v, ok := d.GetOk("uri"); ok {
uri = aws.String(v.(string))
}
templates := make(map[string]string)
for k, v := range d.Get("request_templates").(map[string]interface{}) {
templates[k] = v.(string)
}
parameters := make(map[string]string)
if v, ok := d.GetOk("request_parameters_in_json"); ok {
if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil {
return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err)
}
}
var credentials *string
if val, ok := d.GetOk("credentials"); ok {
credentials = aws.String(val.(string))
}
_, err := conn.PutIntegration(&apigateway.PutIntegrationInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
Type: aws.String(d.Get("type").(string)),
IntegrationHttpMethod: integrationHttpMethod,
Uri: uri,
// TODO reimplement once [GH-2143](https://github.com/hashicorp/terraform/issues/2143) has been implemented
RequestParameters: aws.StringMap(parameters),
RequestTemplates: aws.StringMap(templates),
Credentials: credentials,
CacheNamespace: nil,
CacheKeyParameters: nil,
})
if err != nil {
return fmt.Errorf("Error creating API Gateway Integration: %s", err)
}
d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
return nil
}
func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Integration %s", d.Id())
integration, err := conn.GetIntegration(&apigateway.GetIntegrationInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Integration: %s", integration)
d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
// AWS converts "" to null on their side, convert it back
if v, ok := integration.RequestTemplates["application/json"]; ok && v == nil {
integration.RequestTemplates["application/json"] = aws.String("")
}
d.Set("request_templates", aws.StringValueMap(integration.RequestTemplates))
d.Set("credentials", integration.Credentials)
d.Set("type", integration.Type)
d.Set("uri", integration.Uri)
d.Set("request_parameters_in_json", aws.StringValueMap(integration.RequestParameters))
return nil
}
func resourceAwsApiGatewayIntegrationUpdate(d *schema.ResourceData, meta interface{}) error {
return resourceAwsApiGatewayIntegrationCreate(d, meta)
}
func resourceAwsApiGatewayIntegrationDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Integration: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteIntegration(&apigateway.DeleteIntegrationInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err == nil {
return nil
}
apigatewayErr, ok := err.(awserr.Error)
if apigatewayErr.Code() == "NotFoundException" {
return nil
}
if !ok {
return resource.NonRetryableError(err)
}
return resource.NonRetryableError(err)
})
}
| builtin/providers/aws/resource_aws_api_gateway_integration.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.47285544872283936,
0.02471909672021866,
0.00016245980805251747,
0.0002014512720052153,
0.10283000767230988
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\tzones := make([]string, qty)\n",
"\tfor n := range zones {\n",
"\t\tzone, ok := attrs[\"instance.\"+strconv.Itoa(n)]\n",
"\t\tif !ok {\n",
"\t\t\treturn nil, fmt.Errorf(\"AZ list corrupt, this is definitely a bug\")\n",
"\t\t}\n",
"\t\tzones[n] = zone\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tzone, ok := attrs[\"names.\"+strconv.Itoa(n)]\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones_test.go",
"type": "replace",
"edit_start_line_idx": 67
} | package aws
import (
"fmt"
"log"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAvailabilityZones() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAvailabilityZonesRead,
Schema: map[string]*schema.Schema{
"instance": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
log.Printf("[DEBUG] Reading availability zones")
d.SetId(time.Now().UTC().String())
req := &ec2.DescribeAvailabilityZonesInput{DryRun: aws.Bool(false)}
azresp, err := conn.DescribeAvailabilityZones(req)
if err != nil {
return fmt.Errorf("Error listing availability zones: %s", err)
}
raw := make([]string, len(azresp.AvailabilityZones))
for i, v := range azresp.AvailabilityZones {
raw[i] = *v.ZoneName
}
sort.Strings(raw)
if err := d.Set("instance", raw); err != nil {
return fmt.Errorf("[WARN] Error setting availability zones")
}
return nil
}
| builtin/providers/aws/data_source_availability_zones.go | 1 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.002855890430510044,
0.0006915331468917429,
0.00016661877452861518,
0.00023857603082433343,
0.0009744614944793284
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\tzones := make([]string, qty)\n",
"\tfor n := range zones {\n",
"\t\tzone, ok := attrs[\"instance.\"+strconv.Itoa(n)]\n",
"\t\tif !ok {\n",
"\t\t\treturn nil, fmt.Errorf(\"AZ list corrupt, this is definitely a bug\")\n",
"\t\t}\n",
"\t\tzones[n] = zone\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tzone, ok := attrs[\"names.\"+strconv.Itoa(n)]\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones_test.go",
"type": "replace",
"edit_start_line_idx": 67
} | package remote
import (
"fmt"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/service/s3"
)
func TestS3Client_impl(t *testing.T) {
var _ Client = new(S3Client)
}
func TestS3Factory(t *testing.T) {
// This test just instantiates the client. Shouldn't make any actual
// requests nor incur any costs.
config := make(map[string]string)
// Empty config is an error
_, err := s3Factory(config)
if err == nil {
t.Fatalf("Empty config should be error")
}
config["region"] = "us-west-1"
config["bucket"] = "foo"
config["key"] = "bar"
config["encrypt"] = "1"
// For this test we'll provide the credentials as config. The
// acceptance tests implicitly test passing credentials as
// environment variables.
config["access_key"] = "bazkey"
config["secret_key"] = "bazsecret"
client, err := s3Factory(config)
if err != nil {
t.Fatalf("Error for valid config")
}
s3Client := client.(*S3Client)
if *s3Client.nativeClient.Config.Region != "us-west-1" {
t.Fatalf("Incorrect region was populated")
}
if s3Client.bucketName != "foo" {
t.Fatalf("Incorrect bucketName was populated")
}
if s3Client.keyName != "bar" {
t.Fatalf("Incorrect keyName was populated")
}
credentials, err := s3Client.nativeClient.Config.Credentials.Get()
if err != nil {
t.Fatalf("Error when requesting credentials")
}
if credentials.AccessKeyID != "bazkey" {
t.Fatalf("Incorrect Access Key Id was populated")
}
if credentials.SecretAccessKey != "bazsecret" {
t.Fatalf("Incorrect Secret Access Key was populated")
}
}
func TestS3Client(t *testing.T) {
// This test creates a bucket in S3 and populates it.
// It may incur costs, so it will only run if AWS credential environment
// variables are present.
accessKeyId := os.Getenv("AWS_ACCESS_KEY_ID")
if accessKeyId == "" {
t.Skipf("skipping; AWS_ACCESS_KEY_ID must be set")
}
regionName := os.Getenv("AWS_DEFAULT_REGION")
if regionName == "" {
regionName = "us-west-2"
}
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
testData := []byte(`testing data`)
config := make(map[string]string)
config["region"] = regionName
config["bucket"] = bucketName
config["key"] = keyName
config["encrypt"] = "1"
client, err := s3Factory(config)
if err != nil {
t.Fatalf("Error for valid config")
}
s3Client := client.(*S3Client)
nativeClient := s3Client.nativeClient
createBucketReq := &s3.CreateBucketInput{
Bucket: &bucketName,
}
// Be clear about what we're doing in case the user needs to clean
// this up later.
t.Logf("Creating S3 bucket %s in %s", bucketName, regionName)
_, err = nativeClient.CreateBucket(createBucketReq)
if err != nil {
t.Skipf("Failed to create test S3 bucket, so skipping")
}
// Ensure we can perform a PUT request with the encryption header
err = s3Client.Put(testData)
if err != nil {
t.Logf("WARNING: Failed to send test data to S3 bucket. (error was %s)", err)
}
defer func() {
deleteBucketReq := &s3.DeleteBucketInput{
Bucket: &bucketName,
}
_, err := nativeClient.DeleteBucket(deleteBucketReq)
if err != nil {
t.Logf("WARNING: Failed to delete the test S3 bucket. It has been left in your AWS account and may incur storage charges. (error was %s)", err)
}
}()
testClient(t, client)
}
| state/remote/s3_test.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017662468599155545,
0.0001723522145766765,
0.00016598243382759392,
0.0001726949994917959,
0.000003507315113893128
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\tzones := make([]string, qty)\n",
"\tfor n := range zones {\n",
"\t\tzone, ok := attrs[\"instance.\"+strconv.Itoa(n)]\n",
"\t\tif !ok {\n",
"\t\t\treturn nil, fmt.Errorf(\"AZ list corrupt, this is definitely a bug\")\n",
"\t\t}\n",
"\t\tzones[n] = zone\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tzone, ok := attrs[\"names.\"+strconv.Itoa(n)]\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones_test.go",
"type": "replace",
"edit_start_line_idx": 67
} | // THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package kms
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
"github.com/aws/aws-sdk-go/private/signer/v4"
)
// AWS Key Management Service (AWS KMS) is an encryption and key management
// web service. This guide describes the AWS KMS operations that you can call
// programmatically. For general information about AWS KMS, see the AWS Key
// Management Service Developer Guide (http://docs.aws.amazon.com/kms/latest/developerguide/).
//
// AWS provides SDKs that consist of libraries and sample code for various
// programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.).
// The SDKs provide a convenient way to create programmatic access to AWS KMS
// and other AWS services. For example, the SDKs take care of tasks such as
// signing requests (see below), managing errors, and retrying requests automatically.
// For more information about the AWS SDKs, including how to download and install
// them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
//
// We recommend that you use the AWS SDKs to make programmatic API calls to
// AWS KMS.
//
// Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS
// 1.2. Clients must also support cipher suites with Perfect Forward Secrecy
// (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral
// Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support
// these modes.
//
// Signing Requests
//
// Requests must be signed by using an access key ID and a secret access key.
// We strongly recommend that you do not use your AWS account access key ID
// and secret key for everyday work with AWS KMS. Instead, use the access key
// ID and secret access key for an IAM user, or you can use the AWS Security
// Token Service to generate temporary security credentials that you can use
// to sign requests.
//
// All AWS KMS operations require Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
//
// Logging API Requests
//
// AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related
// events for your AWS account and delivers them to an Amazon S3 bucket that
// you specify. By using the information collected by CloudTrail, you can determine
// what requests were made to AWS KMS, who made the request, when it was made,
// and so on. To learn more about CloudTrail, including how to turn it on and
// find your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/).
//
// Additional Resources
//
// For more information about credentials and request signing, see the following:
//
// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html)
// - This topic provides general information about the types of credentials
// used for accessing AWS. AWS Security Token Service (http://docs.aws.amazon.com/STS/latest/UsingSTS/)
// - This guide describes how to create and use temporary security credentials.
// Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
// - This set of topics walks you through the process of signing a request using
// an access key ID and a secret access key. Commonly Used APIs
//
// Of the APIs discussed in this guide, the following will prove the most
// useful for most applications. You will likely perform actions other than
// these, such as creating keys and assigning policies, by using the console.
//
// Encrypt Decrypt GenerateDataKey GenerateDataKeyWithoutPlaintext
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type KMS struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// A ServiceName is the name of the service the client will make API calls to.
const ServiceName = "kms"
// New creates a new instance of the KMS client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a KMS client from just a session.
// svc := kms.New(mySession)
//
// // Create a KMS client with additional configuration
// svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS {
c := p.ClientConfig(ServiceName, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *KMS {
svc := &KMS{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2014-11-01",
JSONVersion: "1.1",
TargetPrefix: "TrentService",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a KMS operation and runs any
// custom request initialization.
func (c *KMS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}
| vendor/github.com/aws/aws-sdk-go/service/kms/service.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017560839478392154,
0.00016801126184873283,
0.00016386092465836555,
0.00016673622303642333,
0.000004073345280630747
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\tzones := make([]string, qty)\n",
"\tfor n := range zones {\n",
"\t\tzone, ok := attrs[\"instance.\"+strconv.Itoa(n)]\n",
"\t\tif !ok {\n",
"\t\t\treturn nil, fmt.Errorf(\"AZ list corrupt, this is definitely a bug\")\n",
"\t\t}\n",
"\t\tzones[n] = zone\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tzone, ok := attrs[\"names.\"+strconv.Itoa(n)]\n"
],
"file_path": "builtin/providers/aws/data_source_availability_zones_test.go",
"type": "replace",
"edit_start_line_idx": 67
} | package artifactory
func (c *ArtifactoryClient) GetSystemSecurityConfiguration() (s string, e error) {
d, e := c.Get("/api/system/security", make(map[string]string))
return string(d), e
}
| vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017039118392858654,
0.00017039118392858654,
0.00017039118392858654,
0.00017039118392858654,
0
] |
{
"id": 4,
"code_window": [
"\n",
"## Example Usage\n",
"\n",
"```\n",
"# Declare the data source\n",
"data \"aws_availability_zones\" \"zones\" {}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"data \"aws_availability_zones\" \"available\" {}\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 18
} | package aws
import (
"fmt"
"reflect"
"sort"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSAvailabilityZones_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckAwsAvailabilityZonesConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsAvailabilityZonesMeta("data.aws_availability_zones.availability_zones"),
),
},
},
})
}
func testAccCheckAwsAvailabilityZonesMeta(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Can't find AZ resource: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("AZ resource ID not set")
}
actual, err := testAccCheckAwsAvailabilityZonesBuildAvailable(rs.Primary.Attributes)
if err != nil {
return err
}
expected := actual
sort.Strings(expected)
if reflect.DeepEqual(expected, actual) != true {
return fmt.Errorf("AZs not sorted - expected %v, got %v", expected, actual)
}
return nil
}
}
func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) {
v, ok := attrs["instance.#"]
if !ok {
return nil, fmt.Errorf("Available AZ list is missing")
}
qty, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
if qty < 1 {
return nil, fmt.Errorf("No AZs found in region, this is probably a bug.")
}
zones := make([]string, qty)
for n := range zones {
zone, ok := attrs["instance."+strconv.Itoa(n)]
if !ok {
return nil, fmt.Errorf("AZ list corrupt, this is definitely a bug")
}
zones[n] = zone
}
return zones, nil
}
const testAccCheckAwsAvailabilityZonesConfig = `
data "aws_availability_zones" "availability_zones" {
}
`
| builtin/providers/aws/data_source_availability_zones_test.go | 1 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.034473810344934464,
0.007646610494703054,
0.00017223948088940233,
0.0014097703387960792,
0.011193928308784962
] |
{
"id": 4,
"code_window": [
"\n",
"## Example Usage\n",
"\n",
"```\n",
"# Declare the data source\n",
"data \"aws_availability_zones\" \"zones\" {}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"data \"aws_availability_zones\" \"available\" {}\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 18
} | variable "amis" {
default = {
us-east-1 = "foo"
us-west-2 = "bar"
}
}
variable "bar" {
default = "baz"
}
variable "foo" {}
resource "aws_instance" "foo" {
num = "2"
bar = "${var.bar}"
}
resource "aws_instance" "bar" {
foo = "${var.foo}"
bar = "${lookup(var.amis, var.foo)}"
}
| terraform/test-fixtures/input-vars/main.tf | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00023151792993303388,
0.00019344336760696024,
0.00017393029702361673,
0.00017488186131231487,
0.000026925587008008733
] |
{
"id": 4,
"code_window": [
"\n",
"## Example Usage\n",
"\n",
"```\n",
"# Declare the data source\n",
"data \"aws_availability_zones\" \"zones\" {}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"data \"aws_availability_zones\" \"available\" {}\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 18
} | // Package v4 implements signing for AWS V4 signer
package v4
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/rest"
)
const (
authHeaderPrefix = "AWS4-HMAC-SHA256"
timeFormat = "20060102T150405Z"
shortTimeFormat = "20060102"
)
var ignoredHeaders = rules{
blacklist{
mapRule{
"Authorization": struct{}{},
"User-Agent": struct{}{},
},
},
}
// requiredSignedHeaders is a whitelist for build canonical headers.
var requiredSignedHeaders = rules{
whitelist{
mapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
"X-Amz-Grant-Write": struct{}{},
"X-Amz-Grant-Write-Acp": struct{}{},
"X-Amz-Metadata-Directive": struct{}{},
"X-Amz-Mfa": struct{}{},
"X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
},
},
patterns{"X-Amz-Meta-"},
}
// allowedHoisting is a whitelist for build query headers. The boolean value
// represents whether or not it is a pattern.
var allowedQueryHoisting = inclusiveRules{
blacklist{requiredSignedHeaders},
patterns{"X-Amz-"},
}
type signer struct {
Request *http.Request
Time time.Time
ExpireTime time.Duration
ServiceName string
Region string
CredValues credentials.Value
Credentials *credentials.Credentials
Query url.Values
Body io.ReadSeeker
Debug aws.LogLevelType
Logger aws.Logger
isPresign bool
formattedTime string
formattedShortTime string
signedHeaders string
canonicalHeaders string
canonicalString string
credentialString string
stringToSign string
signature string
authorization string
notHoist bool
signedHeaderVals http.Header
}
// Sign requests with signature version 4.
//
// Will sign the requests with the service config's Credentials object
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
// object.
func Sign(req *request.Request) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
region := req.ClientInfo.SigningRegion
if region == "" {
region = aws.StringValue(req.Config.Region)
}
name := req.ClientInfo.SigningName
if name == "" {
name = req.ClientInfo.ServiceName
}
s := signer{
Request: req.HTTPRequest,
Time: req.Time,
ExpireTime: req.ExpireTime,
Query: req.HTTPRequest.URL.Query(),
Body: req.Body,
ServiceName: name,
Region: region,
Credentials: req.Config.Credentials,
Debug: req.Config.LogLevel.Value(),
Logger: req.Config.Logger,
notHoist: req.NotHoist,
}
req.Error = s.sign()
req.Time = s.Time
req.SignedHeaderVals = s.signedHeaderVals
}
func (v4 *signer) sign() error {
if v4.ExpireTime != 0 {
v4.isPresign = true
}
if v4.isRequestSigned() {
if !v4.Credentials.IsExpired() && time.Now().Before(v4.Time.Add(10*time.Minute)) {
// If the request is already signed, and the credentials have not
// expired, and the request is not too old ignore the signing request.
return nil
}
v4.Time = time.Now()
// The credentials have expired for this request. The current signing
// is invalid, and needs to be request because the request will fail.
if v4.isPresign {
v4.removePresign()
// Update the request's query string to ensure the values stays in
// sync in the case retrieving the new credentials fails.
v4.Request.URL.RawQuery = v4.Query.Encode()
}
}
var err error
v4.CredValues, err = v4.Credentials.Get()
if err != nil {
return err
}
if v4.isPresign {
v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
if v4.CredValues.SessionToken != "" {
v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
} else {
v4.Query.Del("X-Amz-Security-Token")
}
} else if v4.CredValues.SessionToken != "" {
v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
}
v4.build()
if v4.Debug.Matches(aws.LogDebugWithSigning) {
v4.logSigningInfo()
}
return nil
}
const logSignInfoMsg = `DEBUG: Request Signiture:
---[ CANONICAL STRING ]-----------------------------
%s
---[ STRING TO SIGN ]--------------------------------
%s%s
-----------------------------------------------------`
const logSignedURLMsg = `
---[ SIGNED URL ]------------------------------------
%s`
func (v4 *signer) logSigningInfo() {
signedURLMsg := ""
if v4.isPresign {
signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
}
msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
v4.Logger.Log(msg)
}
func (v4 *signer) build() {
v4.buildTime() // no depends
v4.buildCredentialString() // no depends
unsignedHeaders := v4.Request.Header
if v4.isPresign {
if !v4.notHoist {
urlValues := url.Values{}
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
for k := range urlValues {
v4.Query[k] = urlValues[k]
}
}
}
v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
v4.buildCanonicalString() // depends on canon headers / signed headers
v4.buildStringToSign() // depends on canon string
v4.buildSignature() // depends on string to sign
if v4.isPresign {
v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
} else {
parts := []string{
authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
"SignedHeaders=" + v4.signedHeaders,
"Signature=" + v4.signature,
}
v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
}
}
func (v4 *signer) buildTime() {
v4.formattedTime = v4.Time.UTC().Format(timeFormat)
v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
if v4.isPresign {
duration := int64(v4.ExpireTime / time.Second)
v4.Query.Set("X-Amz-Date", v4.formattedTime)
v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
} else {
v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
}
}
func (v4 *signer) buildCredentialString() {
v4.credentialString = strings.Join([]string{
v4.formattedShortTime,
v4.Region,
v4.ServiceName,
"aws4_request",
}, "/")
if v4.isPresign {
v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
}
}
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
query := url.Values{}
unsignedHeaders := http.Header{}
for k, h := range header {
if r.IsValid(k) {
query[k] = h
} else {
unsignedHeaders[k] = h
}
}
return query, unsignedHeaders
}
func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) {
var headers []string
headers = append(headers, "host")
for k, v := range header {
canonicalKey := http.CanonicalHeaderKey(k)
if !r.IsValid(canonicalKey) {
continue // ignored header
}
if v4.signedHeaderVals == nil {
v4.signedHeaderVals = make(http.Header)
}
lowerCaseKey := strings.ToLower(k)
if _, ok := v4.signedHeaderVals[lowerCaseKey]; ok {
// include additional values
v4.signedHeaderVals[lowerCaseKey] = append(v4.signedHeaderVals[lowerCaseKey], v...)
continue
}
headers = append(headers, lowerCaseKey)
v4.signedHeaderVals[lowerCaseKey] = v
}
sort.Strings(headers)
v4.signedHeaders = strings.Join(headers, ";")
if v4.isPresign {
v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
}
headerValues := make([]string, len(headers))
for i, k := range headers {
if k == "host" {
headerValues[i] = "host:" + v4.Request.URL.Host
} else {
headerValues[i] = k + ":" +
strings.Join(v4.signedHeaderVals[k], ",")
}
}
v4.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
}
func (v4 *signer) buildCanonicalString() {
v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
uri := v4.Request.URL.Opaque
if uri != "" {
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
} else {
uri = v4.Request.URL.Path
}
if uri == "" {
uri = "/"
}
if v4.ServiceName != "s3" {
uri = rest.EscapePath(uri, false)
}
v4.canonicalString = strings.Join([]string{
v4.Request.Method,
uri,
v4.Request.URL.RawQuery,
v4.canonicalHeaders + "\n",
v4.signedHeaders,
v4.bodyDigest(),
}, "\n")
}
func (v4 *signer) buildStringToSign() {
v4.stringToSign = strings.Join([]string{
authHeaderPrefix,
v4.formattedTime,
v4.credentialString,
hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
}, "\n")
}
func (v4 *signer) buildSignature() {
secret := v4.CredValues.SecretAccessKey
date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
region := makeHmac(date, []byte(v4.Region))
service := makeHmac(region, []byte(v4.ServiceName))
credentials := makeHmac(service, []byte("aws4_request"))
signature := makeHmac(credentials, []byte(v4.stringToSign))
v4.signature = hex.EncodeToString(signature)
}
func (v4 *signer) bodyDigest() string {
hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
if v4.isPresign && v4.ServiceName == "s3" {
hash = "UNSIGNED-PAYLOAD"
} else if v4.Body == nil {
hash = hex.EncodeToString(makeSha256([]byte{}))
} else {
hash = hex.EncodeToString(makeSha256Reader(v4.Body))
}
v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
}
return hash
}
// isRequestSigned returns if the request is currently signed or presigned
func (v4 *signer) isRequestSigned() bool {
if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
return true
}
if v4.Request.Header.Get("Authorization") != "" {
return true
}
return false
}
// unsign removes signing flags for both signed and presigned requests.
func (v4 *signer) removePresign() {
v4.Query.Del("X-Amz-Algorithm")
v4.Query.Del("X-Amz-Signature")
v4.Query.Del("X-Amz-Security-Token")
v4.Query.Del("X-Amz-Date")
v4.Query.Del("X-Amz-Expires")
v4.Query.Del("X-Amz-Credential")
v4.Query.Del("X-Amz-SignedHeaders")
}
func makeHmac(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256Reader(reader io.ReadSeeker) []byte {
hash := sha256.New()
start, _ := reader.Seek(0, 1)
defer reader.Seek(start, 0)
io.Copy(hash, reader)
return hash.Sum(nil)
}
func stripExcessSpaces(headerVals []string) []string {
vals := make([]string, len(headerVals))
for i, str := range headerVals {
stripped := ""
found := false
str = strings.TrimSpace(str)
for _, c := range str {
if !found && c == ' ' {
stripped += string(c)
found = true
} else if c != ' ' {
stripped += string(c)
found = false
}
}
vals[i] = stripped
}
return vals
}
| vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00023785537632647902,
0.0001740625302772969,
0.00016341981245204806,
0.00017250492237508297,
0.000011930301297979895
] |
{
"id": 4,
"code_window": [
"\n",
"## Example Usage\n",
"\n",
"```\n",
"# Declare the data source\n",
"data \"aws_availability_zones\" \"zones\" {}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"data \"aws_availability_zones\" \"available\" {}\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 18
} | ## dnsimple
[](https://travis-ci.org/pearkes/dnsimple)
This package provides the `dnsimple` package which offers
an interface to the DNSimple API.
It's intentionally designed to make heavy use of built-ins and strings
in place of custom data structures and proper types. It also only implements
specific endpoints, and doesn't have full API coverage.
**For those reasons, I recommend looking elsewhere if you just need
a standard DNSimple API client.**
### Documentation
The full documentation is available on [Godoc](http://godoc.org/github.com/pearkes/dnsimple)
| vendor/github.com/pearkes/dnsimple/README.md | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.0001686440664343536,
0.0001678007101872936,
0.0001669573539402336,
0.0001678007101872936,
8.433562470600009e-7
] |
{
"id": 5,
"code_window": [
"\n",
"# Create a subnet in each availability zone\n",
"resource \"aws_subnet\" \"public\" {\n",
" count = \"${length(data.aws_availability_zones.zones.instance)}\"\n",
" \n",
" availability_zone = \"${data.aws_availability_zones.zones.instance[count.index]}\"\n",
"\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"# e.g. Create subnets in the first two available availability zones\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 20
} | ---
layout: "aws"
page_title: "AWS: aws_availability_zones"
sidebar_current: "docs-aws-datasource-availability-zones"
description: |-
Provides a list of availability zones which can be used by an AWS account
---
# aws\_availability\_zones
The Availability Zones data source allows access to the list of AWS
Availability Zones which can be accessed by an AWS account within the region
configured in the provider.
## Example Usage
```
# Declare the data source
data "aws_availability_zones" "zones" {}
# Create a subnet in each availability zone
resource "aws_subnet" "public" {
count = "${length(data.aws_availability_zones.zones.instance)}"
availability_zone = "${data.aws_availability_zones.zones.instance[count.index]}"
# Other properties...
}
```
## Argument Reference
There are no arguments for this data source.
## Attributes Reference
The following attributes are exported:
* `instance` - A list of the availability zone names available to the account.
| website/source/docs/providers/aws/d/availability_zones.html.markdown | 1 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.9982408285140991,
0.2501891553401947,
0.0002294929145136848,
0.0011431543389335275,
0.43188804388046265
] |
{
"id": 5,
"code_window": [
"\n",
"# Create a subnet in each availability zone\n",
"resource \"aws_subnet\" \"public\" {\n",
" count = \"${length(data.aws_availability_zones.zones.instance)}\"\n",
" \n",
" availability_zone = \"${data.aws_availability_zones.zones.instance[count.index]}\"\n",
"\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"# e.g. Create subnets in the first two available availability zones\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 20
} | package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSElasticSearchDomain_basic(t *testing.T) {
var domain elasticsearch.ElasticsearchDomainStatus
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckESDomainDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccESDomainConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain),
),
},
},
})
}
func TestAccAWSElasticSearchDomain_complex(t *testing.T) {
var domain elasticsearch.ElasticsearchDomainStatus
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckESDomainDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccESDomainConfig_complex,
Check: resource.ComposeTestCheckFunc(
testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain),
),
},
},
})
}
func TestAccAWSElasticSearch_tags(t *testing.T) {
var domain elasticsearch.ElasticsearchDomainStatus
var td elasticsearch.ListTagsOutput
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSELBDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccESDomainConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain),
),
},
resource.TestStep{
Config: testAccESDomainConfig_TagUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain),
testAccLoadESTags(&domain, &td),
testAccCheckElasticsearchServiceTags(&td.TagList, "foo", "bar"),
testAccCheckElasticsearchServiceTags(&td.TagList, "new", "type"),
),
},
},
})
}
func testAccLoadESTags(conf *elasticsearch.ElasticsearchDomainStatus, td *elasticsearch.ListTagsOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).esconn
describe, err := conn.ListTags(&elasticsearch.ListTagsInput{
ARN: conf.ARN,
})
if err != nil {
return err
}
if len(describe.TagList) > 0 {
*td = *describe
}
return nil
}
}
func testAccCheckESDomainExists(n string, domain *elasticsearch.ElasticsearchDomainStatus) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ES Domain ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).esconn
opts := &elasticsearch.DescribeElasticsearchDomainInput{
DomainName: aws.String(rs.Primary.Attributes["domain_name"]),
}
resp, err := conn.DescribeElasticsearchDomain(opts)
if err != nil {
return fmt.Errorf("Error describing domain: %s", err.Error())
}
*domain = *resp.DomainStatus
return nil
}
}
func testAccCheckESDomainDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_elasticsearch_domain" {
continue
}
conn := testAccProvider.Meta().(*AWSClient).esconn
opts := &elasticsearch.DescribeElasticsearchDomainInput{
DomainName: aws.String(rs.Primary.Attributes["domain_name"]),
}
_, err := conn.DescribeElasticsearchDomain(opts)
// Verify the error is what we want
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" {
continue
}
return err
}
}
return nil
}
const testAccESDomainConfig = `
resource "aws_elasticsearch_domain" "example" {
domain_name = "tf-test-1"
}
`
const testAccESDomainConfig_TagUpdate = `
resource "aws_elasticsearch_domain" "example" {
domain_name = "tf-test-1"
tags {
foo = "bar"
new = "type"
}
}
`
const testAccESDomainConfig_complex = `
resource "aws_elasticsearch_domain" "example" {
domain_name = "tf-test-2"
advanced_options {
"indices.fielddata.cache.size" = 80
}
ebs_options {
ebs_enabled = false
}
cluster_config {
instance_count = 2
zone_awareness_enabled = true
}
snapshot_options {
automated_snapshot_start_hour = 23
}
tags {
bar = "complex"
}
}
`
| builtin/providers/aws/resource_aws_elasticsearch_domain_test.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.0008123298175632954,
0.0003079719317611307,
0.00016727940237615258,
0.00019443512428551912,
0.00019099825294688344
] |
{
"id": 5,
"code_window": [
"\n",
"# Create a subnet in each availability zone\n",
"resource \"aws_subnet\" \"public\" {\n",
" count = \"${length(data.aws_availability_zones.zones.instance)}\"\n",
" \n",
" availability_zone = \"${data.aws_availability_zones.zones.instance[count.index]}\"\n",
"\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"# e.g. Create subnets in the first two available availability zones\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 20
} | # http-link-go [](https://travis-ci.org/tent/http-link-go)
http-link-go implements parsing and serialization of Link header values as
defined in [RFC 5988](https://tools.ietf.org/html/rfc5988).
[**Documentation**](http://godoc.org/github.com/tent/http-link-go)
## Installation
```text
go get github.com/tent/http-link-go
```
| vendor/github.com/tent/http-link-go/README.md | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017622768064029515,
0.0001702297304291278,
0.0001642317947698757,
0.0001702297304291278,
0.000005997942935209721
] |
{
"id": 5,
"code_window": [
"\n",
"# Create a subnet in each availability zone\n",
"resource \"aws_subnet\" \"public\" {\n",
" count = \"${length(data.aws_availability_zones.zones.instance)}\"\n",
" \n",
" availability_zone = \"${data.aws_availability_zones.zones.instance[count.index]}\"\n",
"\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"# e.g. Create subnets in the first two available availability zones\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 20
} | module "consul" {
foo = "${aws_security_group.firewall.foo}"
source = "./consul"
}
provider "aws" {}
resource "aws_security_group" "firewall" {}
resource "aws_instance" "web" {
security_groups = [
"foo",
"${aws_security_group.firewall.foo}",
"${module.consul.security_group}"
]
}
| terraform/test-fixtures/graph-modules/main.tf | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.002400058787316084,
0.0014608544297516346,
0.0005216499557718635,
0.0014608544297516346,
0.0009392044157721102
] |
{
"id": 6,
"code_window": [
"\n",
" # Other properties...\n",
"}\n",
"```\n",
"\n",
"## Argument Reference\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"resource \"aws_subnet\" \"primary\" {\n",
" availability_zone = \"${data.aws_availability_zones.available.names[0]}\"\n",
"\n",
" # Other properties...\n",
"}\n",
"\n",
"resource \"aws_subnet\" \"secondary\" {\n",
" availability_zone = \"${data.aws_availability_zones.available.names[1]}\"\n",
"\n",
" # Other properties...\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 26
} | package aws
import (
"fmt"
"reflect"
"sort"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSAvailabilityZones_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckAwsAvailabilityZonesConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsAvailabilityZonesMeta("data.aws_availability_zones.availability_zones"),
),
},
},
})
}
func testAccCheckAwsAvailabilityZonesMeta(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Can't find AZ resource: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("AZ resource ID not set")
}
actual, err := testAccCheckAwsAvailabilityZonesBuildAvailable(rs.Primary.Attributes)
if err != nil {
return err
}
expected := actual
sort.Strings(expected)
if reflect.DeepEqual(expected, actual) != true {
return fmt.Errorf("AZs not sorted - expected %v, got %v", expected, actual)
}
return nil
}
}
func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) {
v, ok := attrs["instance.#"]
if !ok {
return nil, fmt.Errorf("Available AZ list is missing")
}
qty, err := strconv.Atoi(v)
if err != nil {
return nil, err
}
if qty < 1 {
return nil, fmt.Errorf("No AZs found in region, this is probably a bug.")
}
zones := make([]string, qty)
for n := range zones {
zone, ok := attrs["instance."+strconv.Itoa(n)]
if !ok {
return nil, fmt.Errorf("AZ list corrupt, this is definitely a bug")
}
zones[n] = zone
}
return zones, nil
}
const testAccCheckAwsAvailabilityZonesConfig = `
data "aws_availability_zones" "availability_zones" {
}
`
| builtin/providers/aws/data_source_availability_zones_test.go | 1 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00018108678341377527,
0.00017374164599459618,
0.00016750860959291458,
0.00017272004333790392,
0.000003774674269152456
] |
{
"id": 6,
"code_window": [
"\n",
" # Other properties...\n",
"}\n",
"```\n",
"\n",
"## Argument Reference\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"resource \"aws_subnet\" \"primary\" {\n",
" availability_zone = \"${data.aws_availability_zones.available.names[0]}\"\n",
"\n",
" # Other properties...\n",
"}\n",
"\n",
"resource \"aws_subnet\" \"secondary\" {\n",
" availability_zone = \"${data.aws_availability_zones.available.names[1]}\"\n",
"\n",
" # Other properties...\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 26
} | language: go
sudo: required
go:
- 1.4.2
- 1.5.3
- 1.6
- tip
os:
- linux
- osx
env:
- GOARCH=amd64 DOCKER_VERSION=1.8.3
- GOARCH=386 DOCKER_VERSION=1.8.3
- GOARCH=amd64 DOCKER_VERSION=1.9.1
- GOARCH=386 DOCKER_VERSION=1.9.1
- GOARCH=amd64 DOCKER_VERSION=1.10.3
- GOARCH=386 DOCKER_VERSION=1.10.3
install:
- travis_retry travis-scripts/install.bash
script:
- travis-scripts/run-tests.bash
services:
- docker
matrix:
fast_finish: true
allow_failures:
- go: tip
| vendor/github.com/fsouza/go-dockerclient/.travis.yml | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017282662156503648,
0.00016967392002698034,
0.00016419751045759767,
0.0001719975844025612,
0.000003887158527504653
] |
{
"id": 6,
"code_window": [
"\n",
" # Other properties...\n",
"}\n",
"```\n",
"\n",
"## Argument Reference\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"resource \"aws_subnet\" \"primary\" {\n",
" availability_zone = \"${data.aws_availability_zones.available.names[0]}\"\n",
"\n",
" # Other properties...\n",
"}\n",
"\n",
"resource \"aws_subnet\" \"secondary\" {\n",
" availability_zone = \"${data.aws_availability_zones.available.names[1]}\"\n",
"\n",
" # Other properties...\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 26
} | package artifactory
| vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017433127504773438,
0.00017433127504773438,
0.00017433127504773438,
0.00017433127504773438,
0
] |
{
"id": 6,
"code_window": [
"\n",
" # Other properties...\n",
"}\n",
"```\n",
"\n",
"## Argument Reference\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"resource \"aws_subnet\" \"primary\" {\n",
" availability_zone = \"${data.aws_availability_zones.available.names[0]}\"\n",
"\n",
" # Other properties...\n",
"}\n",
"\n",
"resource \"aws_subnet\" \"secondary\" {\n",
" availability_zone = \"${data.aws_availability_zones.available.names[1]}\"\n",
"\n",
" # Other properties...\n"
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 26
} | variable "aws_region" {
description = "The AWS region to create resources in."
default = "us-east-1"
}
# AMI's from http://cloud-images.ubuntu.com/locator/ec2/
variable "aws_amis" {
default = {
eu-west-1 = "ami-b1cf19c6"
us-east-1 = "ami-de7ab6b6"
us-west-1 = "ami-3f75767a"
us-west-2 = "ami-21f78e11"
}
}
| examples/consul/variables.tf | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017285878129769117,
0.0001692289370112121,
0.00016559907817281783,
0.0001692289370112121,
0.00000362985156243667
] |
{
"id": 7,
"code_window": [
"## Attributes Reference\n",
"\n",
"The following attributes are exported:\n",
"\n",
"* `instance` - A list of the availability zone names available to the account.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace"
],
"after_edit": [
"* `names` - A list of the availability zone names available to the account."
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 38
} | ---
layout: "aws"
page_title: "AWS: aws_availability_zones"
sidebar_current: "docs-aws-datasource-availability-zones"
description: |-
Provides a list of availability zones which can be used by an AWS account
---
# aws\_availability\_zones
The Availability Zones data source allows access to the list of AWS
Availability Zones which can be accessed by an AWS account within the region
configured in the provider.
## Example Usage
```
# Declare the data source
data "aws_availability_zones" "zones" {}
# Create a subnet in each availability zone
resource "aws_subnet" "public" {
count = "${length(data.aws_availability_zones.zones.instance)}"
availability_zone = "${data.aws_availability_zones.zones.instance[count.index]}"
# Other properties...
}
```
## Argument Reference
There are no arguments for this data source.
## Attributes Reference
The following attributes are exported:
* `instance` - A list of the availability zone names available to the account.
| website/source/docs/providers/aws/d/availability_zones.html.markdown | 1 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.9947629570960999,
0.24899068474769592,
0.00020858494099229574,
0.000495601911097765,
0.43057185411453247
] |
{
"id": 7,
"code_window": [
"## Attributes Reference\n",
"\n",
"The following attributes are exported:\n",
"\n",
"* `instance` - A list of the availability zone names available to the account.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace"
],
"after_edit": [
"* `names` - A list of the availability zone names available to the account."
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 38
} | // Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios
| vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.0001687069598119706,
0.0001685499446466565,
0.0001683929149294272,
0.0001685499446466565,
1.5702244127169251e-7
] |
{
"id": 7,
"code_window": [
"## Attributes Reference\n",
"\n",
"The following attributes are exported:\n",
"\n",
"* `instance` - A list of the availability zone names available to the account.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace"
],
"after_edit": [
"* `names` - A list of the availability zone names available to the account."
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 38
} | package google
/**
* Note! You must run these tests once at a time. Google Cloud SQL does
* not allow you to reuse a database for a short time after you reserved it,
* and for this reason the tests will fail if the same config is used serveral
* times in short succession.
*/
import (
"fmt"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/sqladmin/v1beta4"
)
func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) {
var instance sqladmin.DatabaseInstance
databaseID := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: fmt.Sprintf(
testGoogleSqlDatabaseInstance_basic, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func TestAccGoogleSqlDatabaseInstance_basic2(t *testing.T) {
var instance sqladmin.DatabaseInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleSqlDatabaseInstance_basic2,
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) {
var instance sqladmin.DatabaseInstance
databaseID := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: fmt.Sprintf(
testGoogleSqlDatabaseInstance_settings, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) {
var instance sqladmin.DatabaseInstance
databaseID := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: fmt.Sprintf(
testGoogleSqlDatabaseInstance_basic, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
resource.TestStep{
Config: fmt.Sprintf(
testGoogleSqlDatabaseInstance_settings, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) {
var instance sqladmin.DatabaseInstance
databaseID := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: fmt.Sprintf(
testGoogleSqlDatabaseInstance_settings, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
resource.TestStep{
Config: fmt.Sprintf(
testGoogleSqlDatabaseInstance_basic, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
testAccCheckGoogleSqlDatabaseInstanceEquals(
"google_sql_database_instance.instance", &instance),
),
},
},
})
}
func testAccCheckGoogleSqlDatabaseInstanceEquals(n string,
instance *sqladmin.DatabaseInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
attributes := rs.Primary.Attributes
server := instance.Name
local := attributes["name"]
if server != local {
return fmt.Errorf("Error name mismatch, (%s, %s)", server, local)
}
server = instance.Settings.Tier
local = attributes["settings.0.tier"]
if server != local {
return fmt.Errorf("Error settings.tier mismatch, (%s, %s)", server, local)
}
server = instance.MasterInstanceName
local = attributes["master_instance_name"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error master_instance_name mismatch, (%s, %s)", server, local)
}
server = instance.Settings.ActivationPolicy
local = attributes["settings.0.activation_policy"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.activation_policy mismatch, (%s, %s)", server, local)
}
if instance.Settings.BackupConfiguration != nil {
server = strconv.FormatBool(instance.Settings.BackupConfiguration.BinaryLogEnabled)
local = attributes["settings.0.backup_configuration.0.binary_log_enabled"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.backup_configuration.binary_log_enabled mismatch, (%s, %s)", server, local)
}
server = strconv.FormatBool(instance.Settings.BackupConfiguration.Enabled)
local = attributes["settings.0.backup_configuration.0.enabled"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.backup_configuration.enabled mismatch, (%s, %s)", server, local)
}
server = instance.Settings.BackupConfiguration.StartTime
local = attributes["settings.0.backup_configuration.0.start_time"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.backup_configuration.start_time mismatch, (%s, %s)", server, local)
}
}
server = strconv.FormatBool(instance.Settings.CrashSafeReplicationEnabled)
local = attributes["settings.0.crash_safe_replication"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.crash_safe_replication mismatch, (%s, %s)", server, local)
}
if instance.Settings.IpConfiguration != nil {
server = strconv.FormatBool(instance.Settings.IpConfiguration.Ipv4Enabled)
local = attributes["settings.0.ip_configuration.0.ipv4_enabled"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.ip_configuration.ipv4_enabled mismatch, (%s, %s)", server, local)
}
server = strconv.FormatBool(instance.Settings.IpConfiguration.RequireSsl)
local = attributes["settings.0.ip_configuration.0.require_ssl"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.ip_configuration.require_ssl mismatch, (%s, %s)", server, local)
}
}
if instance.Settings.LocationPreference != nil {
server = instance.Settings.LocationPreference.FollowGaeApplication
local = attributes["settings.0.location_preference.0.follow_gae_application"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.location_preference.follow_gae_application mismatch, (%s, %s)", server, local)
}
server = instance.Settings.LocationPreference.Zone
local = attributes["settings.0.location_preference.0.zone"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.location_preference.zone mismatch, (%s, %s)", server, local)
}
}
server = instance.Settings.PricingPlan
local = attributes["settings.0.pricing_plan"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error settings.pricing_plan mismatch, (%s, %s)", server, local)
}
if instance.ReplicaConfiguration != nil &&
instance.ReplicaConfiguration.MysqlReplicaConfiguration != nil {
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.CaCertificate
local = attributes["replica_configuration.0.ca_certificate"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.ca_certificate mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientCertificate
local = attributes["replica_configuration.0.client_certificate"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.client_certificate mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientKey
local = attributes["replica_configuration.0.client_key"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.client_key mismatch, (%s, %s)", server, local)
}
server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.ConnectRetryInterval, 10)
local = attributes["replica_configuration.0.connect_retry_interval"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.connect_retry_interval mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.DumpFilePath
local = attributes["replica_configuration.0.dump_file_path"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.dump_file_path mismatch, (%s, %s)", server, local)
}
server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.MasterHeartbeatPeriod, 10)
local = attributes["replica_configuration.0.master_heartbeat_period"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.master_heartbeat_period mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Password
local = attributes["replica_configuration.0.password"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.password mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.SslCipher
local = attributes["replica_configuration.0.ssl_cipher"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.ssl_cipher mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Username
local = attributes["replica_configuration.0.username"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.username mismatch, (%s, %s)", server, local)
}
server = strconv.FormatBool(instance.ReplicaConfiguration.MysqlReplicaConfiguration.VerifyServerCertificate)
local = attributes["replica_configuration.0.verify_server_certificate"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.verify_server_certificate mismatch, (%s, %s)", server, local)
}
}
return nil
}
}
func testAccCheckGoogleSqlDatabaseInstanceExists(n string,
instance *sqladmin.DatabaseInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
found, err := config.clientSqlAdmin.Instances.Get(config.Project,
rs.Primary.Attributes["name"]).Do()
*instance = *found
if err != nil {
return fmt.Errorf("Not found: %s", n)
}
return nil
}
}
func testAccGoogleSqlDatabaseInstanceDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
config := testAccProvider.Meta().(*Config)
if rs.Type != "google_sql_database_instance" {
continue
}
_, err := config.clientSqlAdmin.Instances.Get(config.Project,
rs.Primary.Attributes["name"]).Do()
if err == nil {
return fmt.Errorf("Database Instance still exists")
}
}
return nil
}
var testGoogleSqlDatabaseInstance_basic = `
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
region = "us-central"
settings {
tier = "D0"
crash_safe_replication = false
}
}
`
var testGoogleSqlDatabaseInstance_basic2 = `
resource "google_sql_database_instance" "instance" {
region = "us-central"
settings {
tier = "D0"
crash_safe_replication = false
}
}
`
var testGoogleSqlDatabaseInstance_settings = `
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
region = "us-central"
settings {
tier = "D0"
crash_safe_replication = false
replication_type = "ASYNCHRONOUS"
location_preference {
zone = "us-central1-f"
}
ip_configuration {
ipv4_enabled = "true"
authorized_networks {
value = "108.12.12.12"
name = "misc"
expiration_time = "2017-11-15T16:19:00.094Z"
}
}
backup_configuration {
enabled = "true"
start_time = "19:19"
}
activation_policy = "ON_DEMAND"
}
}
`
// Note - this test is not feasible to run unless we generate
// backups first.
var testGoogleSqlDatabaseInstance_replica = `
resource "google_sql_database_instance" "instance_master" {
name = "tf-lw-%d"
database_version = "MYSQL_5_6"
region = "us-east1"
settings {
tier = "D0"
crash_safe_replication = true
backup_configuration {
enabled = true
start_time = "00:00"
binary_log_enabled = true
}
}
}
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
database_version = "MYSQL_5_6"
region = "us-central"
settings {
tier = "D0"
}
master_instance_name = "${google_sql_database_instance.instance_master.name}"
replica_configuration {
ca_certificate = "${file("~/tmp/fake.pem")}"
client_certificate = "${file("~/tmp/fake.pem")}"
client_key = "${file("~/tmp/fake.pem")}"
connect_retry_interval = 100
master_heartbeat_period = 10000
password = "password"
username = "username"
ssl_cipher = "ALL"
verify_server_certificate = false
}
}
`
| builtin/providers/google/resource_sql_database_instance_test.go | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.0075531830079853535,
0.0006894513498991728,
0.00016730334027670324,
0.00026886939303949475,
0.0012375271180644631
] |
{
"id": 7,
"code_window": [
"## Attributes Reference\n",
"\n",
"The following attributes are exported:\n",
"\n",
"* `instance` - A list of the availability zone names available to the account.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace"
],
"after_edit": [
"* `names` - A list of the availability zone names available to the account."
],
"file_path": "website/source/docs/providers/aws/d/availability_zones.html.markdown",
"type": "replace",
"edit_start_line_idx": 38
} | <% wrap_layout :inner do %>
<% content_for :sidebar do %>
<div class="docs-sidebar hidden-print affix-top" role="complementary">
<ul class="nav docs-sidenav">
<li<%= sidebar_current("docs-home") %>>
<a href="/docs/providers/index.html">« Documentation Home</a>
</li>
<li<%= sidebar_current("docs-chef-index") %>>
<a href="/docs/providers/chef/index.html">Chef Provider</a>
</li>
<li<%= sidebar_current(/^docs-chef-resource/) %>>
<a href="#">Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-chef-resource-data-bag") %>>
<a href="/docs/providers/chef/r/data_bag.html">chef_data_bag</a>
</li>
<li<%= sidebar_current("docs-chef-resource-data-bag-item") %>>
<a href="/docs/providers/chef/r/data_bag_item.html">chef_data_bag_item</a>
</li>
<li<%= sidebar_current("docs-chef-resource-environment") %>>
<a href="/docs/providers/chef/r/environment.html">chef_environment</a>
</li>
<li<%= sidebar_current("docs-chef-resource-node") %>>
<a href="/docs/providers/chef/r/node.html">chef_node</a>
</li>
<li<%= sidebar_current("docs-chef-resource-role") %>>
<a href="/docs/providers/chef/r/role.html">chef_role</a>
</li>
</ul>
</li>
</ul>
</div>
<% end %>
<%= yield %>
<% end %>
| website/source/layouts/chef.erb | 0 | https://github.com/hashicorp/terraform/commit/ce447e8e2abc6804664631b04726ad619ac8de9b | [
0.00017109466716647148,
0.00016921231872402132,
0.00016783428145572543,
0.00016896016313694417,
0.0000012630870287466678
] |
{
"id": 0,
"code_window": [
"\n",
"func (b *backend) pathRoleCreateRead(\n",
"\treq *logical.Request, data *framework.FieldData) (*logical.Response, error) {\n",
"\tname := data.Get(\"name\").(string)\n",
"\tvar usernameLength int\n",
"\n",
"\t// Get the role\n",
"\trole, err := b.Role(req.Storage, name)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 33
} | package mysql
import (
"fmt"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
_ "github.com/lib/pq"
)
func pathRoleCreate(b *backend) *framework.Path {
return &framework.Path{
Pattern: "creds/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleCreateRead,
},
HelpSynopsis: pathRoleCreateReadHelpSyn,
HelpDescription: pathRoleCreateReadHelpDesc,
}
}
func (b *backend) pathRoleCreateRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
var usernameLength int
// Get the role
role, err := b.Role(req.Storage, name)
if err != nil {
return nil, err
}
if role == nil {
return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
}
// Determine if we have a lease
lease, err := b.Lease(req.Storage)
if err != nil {
return nil, err
}
if lease == nil {
lease = &configLease{}
}
// Generate our username and password. MySQL limits user to 16 characters
displayName := name
ul, ok := data.GetOk("username_length")
if ok == true {
usernameLength = ul.(int)
} else {
usernameLength = 10
}
if len(displayName) > usernameLength {
displayName = displayName[:usernameLength]
}
userUUID, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
username := fmt.Sprintf("%s-%s", displayName, userUUID)
if len(username) > 16 {
username = username[:16]
}
password, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
// Get our handle
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Start a transaction
tx, err := db.Begin()
if err != nil {
return nil, err
}
defer tx.Rollback()
// Execute each query
for _, query := range SplitSQL(role.SQL) {
stmt, err := tx.Prepare(Query(query, map[string]string{
"name": username,
"password": password,
}))
if err != nil {
return nil, err
}
defer stmt.Close()
if _, err := stmt.Exec(); err != nil {
return nil, err
}
}
// Commit the transaction
if err := tx.Commit(); err != nil {
return nil, err
}
// Return the secret
resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
"username": username,
"password": password,
}, map[string]interface{}{
"username": username,
})
resp.Secret.TTL = lease.Lease
return resp, nil
}
const pathRoleCreateReadHelpSyn = `
Request database credentials for a certain role.
`
const pathRoleCreateReadHelpDesc = `
This path reads database credentials for a certain role. The
database credentials will be generated on demand and will be automatically
revoked when the lease is up.
`
| builtin/logical/mysql/path_role_create.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.9987964630126953,
0.43275922536849976,
0.00016425433568656445,
0.02600432187318802,
0.48969462513923645
] |
{
"id": 0,
"code_window": [
"\n",
"func (b *backend) pathRoleCreateRead(\n",
"\treq *logical.Request, data *framework.FieldData) (*logical.Response, error) {\n",
"\tname := data.Get(\"name\").(string)\n",
"\tvar usernameLength int\n",
"\n",
"\t// Get the role\n",
"\trole, err := b.Role(req.Storage, name)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 33
} | /*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package netutil identifies the system userid responsible for
// localhost TCP connections.
package netutil // import "camlistore.org/pkg/netutil"
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"os/user"
"regexp"
"runtime"
"strconv"
"strings"
)
var (
ErrNotFound = errors.New("netutil: connection not found")
ErrUnsupportedOS = errors.New("netutil: not implemented on this operating system")
)
// ConnUserid returns the uid that owns the given localhost connection.
// The returned error is ErrNotFound if the connection wasn't found.
func ConnUserid(conn net.Conn) (uid int, err error) {
return AddrPairUserid(conn.LocalAddr(), conn.RemoteAddr())
}
// HostPortToIP parses a host:port to a TCPAddr without resolving names.
// If given a context IP, it will resolve localhost to match the context's IP family.
func HostPortToIP(hostport string, ctx *net.TCPAddr) (hostaddr *net.TCPAddr, err error) {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return nil, err
}
iport, err := strconv.Atoi(port)
if err != nil || iport < 0 || iport > 0xFFFF {
return nil, fmt.Errorf("invalid port %d", iport)
}
var addr net.IP
if ctx != nil && host == "localhost" {
if ctx.IP.To4() != nil {
addr = net.IPv4(127, 0, 0, 1)
} else {
addr = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
}
} else if addr = net.ParseIP(host); addr == nil {
return nil, fmt.Errorf("could not parse IP %s", host)
}
return &net.TCPAddr{IP: addr, Port: iport}, nil
}
// AddrPairUserid returns the local userid who owns the TCP connection
// given by the local and remote ip:port (lipport and ripport,
// respectively). Returns ErrNotFound for the error if the TCP connection
// isn't found.
func AddrPairUserid(local, remote net.Addr) (uid int, err error) {
lAddr, lOk := local.(*net.TCPAddr)
rAddr, rOk := remote.(*net.TCPAddr)
if !(lOk && rOk) {
return -1, fmt.Errorf("netutil: Could not convert Addr to TCPAddr.")
}
localv4 := (lAddr.IP.To4() != nil)
remotev4 := (rAddr.IP.To4() != nil)
if localv4 != remotev4 {
return -1, fmt.Errorf("netutil: address pairs of different families; localv4=%v, remotev4=%v",
localv4, remotev4)
}
switch runtime.GOOS {
case "darwin":
return uidFromLsof(lAddr.IP, lAddr.Port, rAddr.IP, rAddr.Port)
case "freebsd":
return uidFromSockstat(lAddr.IP, lAddr.Port, rAddr.IP, rAddr.Port)
case "linux":
file := "/proc/net/tcp"
if !localv4 {
file = "/proc/net/tcp6"
}
f, err := os.Open(file)
if err != nil {
return -1, fmt.Errorf("Error opening %s: %v", file, err)
}
defer f.Close()
return uidFromProcReader(lAddr.IP, lAddr.Port, rAddr.IP, rAddr.Port, f)
}
return 0, ErrUnsupportedOS
}
func toLinuxIPv4Order(b []byte) []byte {
binary.BigEndian.PutUint32(b, binary.LittleEndian.Uint32(b))
return b
}
func toLinuxIPv6Order(b []byte) []byte {
for i := 0; i < 16; i += 4 {
sb := b[i : i+4]
binary.BigEndian.PutUint32(sb, binary.LittleEndian.Uint32(sb))
}
return b
}
type maybeBrackets net.IP
func (p maybeBrackets) String() string {
s := net.IP(p).String()
if strings.Contains(s, ":") {
return "[" + s + "]"
}
return s
}
// Changed by tests.
var uidFromUsername = uidFromUsernameFn
func uidFromUsernameFn(username string) (uid int, err error) {
if uid := os.Getuid(); uid != 0 && username == os.Getenv("USER") {
return uid, nil
}
u, err := user.Lookup(username)
if err == nil {
uid, err := strconv.Atoi(u.Uid)
return uid, err
}
return 0, err
}
func uidFromLsof(lip net.IP, lport int, rip net.IP, rport int) (uid int, err error) {
seek := fmt.Sprintf("%s:%d->%s:%d", maybeBrackets(lip), lport, maybeBrackets(rip), rport)
seekb := []byte(seek)
if _, err = exec.LookPath("lsof"); err != nil {
return
}
cmd := exec.Command("lsof",
"-b", // avoid system calls that could block
"-w", // and don't warn about cases where -b fails
"-n", // don't resolve network names
"-P", // don't resolve network ports,
// TODO(bradfitz): pass down the uid we care about, then do: ?
//"-a", // AND the following together:
// "-u", strconv.Itoa(uid) // just this uid
"-itcp") // we only care about TCP connections
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
defer cmd.Wait()
defer stdout.Close()
err = cmd.Start()
if err != nil {
return
}
defer cmd.Process.Kill()
br := bufio.NewReader(stdout)
for {
line, err := br.ReadSlice('\n')
if err == io.EOF {
break
}
if err != nil {
return -1, err
}
if !bytes.Contains(line, seekb) {
continue
}
// SystemUIS 276 bradfitz 15u IPv4 0xffffff801a7c74e0 0t0 TCP 127.0.0.1:56718->127.0.0.1:5204 (ESTABLISHED)
f := bytes.Fields(line)
if len(f) < 8 {
continue
}
username := string(f[2])
return uidFromUsername(username)
}
return -1, ErrNotFound
}
func uidFromSockstat(lip net.IP, lport int, rip net.IP, rport int) (int, error) {
cmd := exec.Command("sockstat", "-Ptcp")
stdout, err := cmd.StdoutPipe()
if err != nil {
return -1, err
}
defer cmd.Wait()
defer stdout.Close()
err = cmd.Start()
if err != nil {
return -1, err
}
defer cmd.Process.Kill()
return uidFromSockstatReader(lip, lport, rip, rport, stdout)
}
func uidFromSockstatReader(lip net.IP, lport int, rip net.IP, rport int, r io.Reader) (int, error) {
pat, err := regexp.Compile(fmt.Sprintf(`^([^ ]+).*%s:%d *%s:%d$`,
lip.String(), lport, rip.String(), rport))
if err != nil {
return -1, err
}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
l := scanner.Text()
m := pat.FindStringSubmatch(l)
if len(m) == 2 {
return uidFromUsername(m[1])
}
}
if err := scanner.Err(); err != nil {
return -1, err
}
return -1, ErrNotFound
}
func uidFromProcReader(lip net.IP, lport int, rip net.IP, rport int, r io.Reader) (uid int, err error) {
buf := bufio.NewReader(r)
localHex := ""
remoteHex := ""
ipv4 := lip.To4() != nil
if ipv4 {
// In the kernel, the port is run through ntohs(), and
// the inet_request_socket in
// include/net/inet_socket.h says the "loc_addr" and
// "rmt_addr" fields are __be32, but get_openreq4's
// printf of them is raw, without byte order
// converstion.
localHex = fmt.Sprintf("%08X:%04X", toLinuxIPv4Order([]byte(lip.To4())), lport)
remoteHex = fmt.Sprintf("%08X:%04X", toLinuxIPv4Order([]byte(rip.To4())), rport)
} else {
localHex = fmt.Sprintf("%032X:%04X", toLinuxIPv6Order([]byte(lip.To16())), lport)
remoteHex = fmt.Sprintf("%032X:%04X", toLinuxIPv6Order([]byte(rip.To16())), rport)
}
for {
line, err := buf.ReadString('\n')
if err != nil {
return -1, ErrNotFound
}
parts := strings.Fields(strings.TrimSpace(line))
if len(parts) < 8 {
continue
}
// log.Printf("parts[1] = %q; localHex = %q", parts[1], localHex)
if parts[1] == localHex && parts[2] == remoteHex {
uid, err = strconv.Atoi(parts[7])
return uid, err
}
}
panic("unreachable")
}
| vendor/camlistore.org/pkg/netutil/ident.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.005796018522232771,
0.0004460346244741231,
0.00016153455362655222,
0.00017251723329536617,
0.0010512461885809898
] |
{
"id": 0,
"code_window": [
"\n",
"func (b *backend) pathRoleCreateRead(\n",
"\treq *logical.Request, data *framework.FieldData) (*logical.Response, error) {\n",
"\tname := data.Get(\"name\").(string)\n",
"\tvar usernameLength int\n",
"\n",
"\t// Get the role\n",
"\trole, err := b.Role(req.Storage, name)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 33
} | package command
import (
"bufio"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/kv-builder"
"github.com/hashicorp/vault/helper/password"
"github.com/hashicorp/vault/meta"
"github.com/mitchellh/mapstructure"
"github.com/ryanuber/columnize"
)
// AuthHandler is the interface that any auth handlers must implement
// to enable auth via the CLI.
type AuthHandler interface {
Auth(*api.Client, map[string]string) (string, error)
Help() string
}
// AuthCommand is a Command that handles authentication.
type AuthCommand struct {
meta.Meta
Handlers map[string]AuthHandler
// The fields below can be overwritten for tests
testStdin io.Reader
}
func (c *AuthCommand) Run(args []string) int {
var method, authPath string
var methods, methodHelp, noVerify bool
flags := c.Meta.FlagSet("auth", meta.FlagSetDefault)
flags.BoolVar(&methods, "methods", false, "")
flags.BoolVar(&methodHelp, "method-help", false, "")
flags.BoolVar(&noVerify, "no-verify", false, "")
flags.StringVar(&method, "method", "", "method")
flags.StringVar(&authPath, "path", "", "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
if methods {
return c.listMethods()
}
args = flags.Args()
tokenHelper, err := c.TokenHelper()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing token helper: %s\n\n"+
"Please verify that the token helper is available and properly\n"+
"configured for your system. Please refer to the documentation\n"+
"on token helpers for more information.",
err))
return 1
}
// token is where the final token will go
handler := c.Handlers[method]
// Read token from stdin if first arg is exactly "-"
var stdin io.Reader = os.Stdin
if c.testStdin != nil {
stdin = c.testStdin
}
if len(args) > 0 && args[0] == "-" {
stdinR := bufio.NewReader(stdin)
args[0], err = stdinR.ReadString('\n')
if err != nil && err != io.EOF {
c.Ui.Error(fmt.Sprintf("Error reading from stdin: %s", err))
return 1
}
args[0] = strings.TrimSpace(args[0])
}
if method == "" {
token := ""
if len(args) > 0 {
token = args[0]
}
handler = &tokenAuthHandler{Token: token}
args = nil
}
if handler == nil {
methods := make([]string, 0, len(c.Handlers))
for k := range c.Handlers {
methods = append(methods, k)
}
sort.Strings(methods)
c.Ui.Error(fmt.Sprintf(
"Unknown authentication method: %s\n\n"+
"Please use a supported authentication method. The list of supported\n"+
"authentication methods is shown below. Note that this list may not\n"+
"be exhaustive: Vault may support other auth methods. For auth methods\n"+
"unsupported by the CLI, please use the HTTP API.\n\n"+
"%s",
method,
strings.Join(methods, ", ")))
return 1
}
if methodHelp {
c.Ui.Output(handler.Help())
return 0
}
// Warn if the VAULT_TOKEN environment variable is set, as that will take
// precedence
if os.Getenv("VAULT_TOKEN") != "" {
c.Ui.Output("==> WARNING: VAULT_TOKEN environment variable set!\n")
c.Ui.Output(" The environment variable takes precedence over the value")
c.Ui.Output(" set by the auth command. Either update the value of the")
c.Ui.Output(" environment variable or unset it to use the new token.\n")
}
var vars map[string]string
if len(args) > 0 {
builder := kvbuilder.Builder{Stdin: os.Stdin}
if err := builder.Add(args...); err != nil {
c.Ui.Error(err.Error())
return 1
}
if err := mapstructure.Decode(builder.Map(), &vars); err != nil {
c.Ui.Error(fmt.Sprintf("Error parsing options: %s", err))
return 1
}
}
// Build the client so we can auth
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client to auth: %s", err))
return 1
}
if authPath != "" {
vars["mount"] = authPath
}
// Authenticate
token, err := handler.Auth(client, vars)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
// Cache the previous token so that it can be restored if authentication fails
var previousToken string
if previousToken, err = tokenHelper.Get(); err != nil {
c.Ui.Error(fmt.Sprintf("Error caching the previous token: %s\n\n", err))
return 1
}
// Store the token!
if err := tokenHelper.Store(token); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error storing token: %s\n\n"+
"Authentication was not successful and did not persist.\n"+
"Please reauthenticate, or fix the issue above if possible.",
err))
return 1
}
if noVerify {
c.Ui.Output(fmt.Sprintf(
"Authenticated - no token verification has been performed.",
))
return 0
}
// Build the client again so it can read the token we just wrote
client, err = c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client to verify the token: %s", err))
if err := tokenHelper.Store(previousToken); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error restoring the previous token: %s\n\n"+
"Please reauthenticate with a valid token.",
err))
}
return 1
}
// Verify the token
secret, err := client.Auth().Token().LookupSelf()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error validating token: %s", err))
if err := tokenHelper.Store(previousToken); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error restoring the previous token: %s\n\n"+
"Please reauthenticate with a valid token.",
err))
}
return 1
}
if secret == nil {
c.Ui.Error(fmt.Sprintf("Error: Invalid token"))
if err := tokenHelper.Store(previousToken); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error restoring the previous token: %s\n\n"+
"Please reauthenticate with a valid token.",
err))
}
return 1
}
// Get the policies we have
policiesRaw, ok := secret.Data["policies"]
if !ok {
policiesRaw = []string{"unknown"}
}
var policies []string
for _, v := range policiesRaw.([]interface{}) {
policies = append(policies, v.(string))
}
output := "Successfully authenticated! You are now logged in."
if method != "" {
output += "\nThe token below is already saved in the session. You do not"
output += "\nneed to \"vault auth\" again with the token."
}
output += fmt.Sprintf("\ntoken: %s", secret.Data["id"])
output += fmt.Sprintf("\ntoken_duration: %s", secret.Data["ttl"].(json.Number).String())
if len(policies) > 0 {
output += fmt.Sprintf("\ntoken_policies: [%s]", strings.Join(policies, ", "))
}
c.Ui.Output(output)
return 0
}
func (c *AuthCommand) listMethods() int {
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client: %s", err))
return 1
}
auth, err := client.Sys().ListAuth()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error reading auth table: %s", err))
return 1
}
paths := make([]string, 0, len(auth))
for path := range auth {
paths = append(paths, path)
}
sort.Strings(paths)
columns := []string{"Path | Type | Default TTL | Max TTL | Description"}
for _, path := range paths {
auth := auth[path]
defTTL := "system"
if auth.Config.DefaultLeaseTTL != 0 {
defTTL = strconv.Itoa(auth.Config.DefaultLeaseTTL)
}
maxTTL := "system"
if auth.Config.MaxLeaseTTL != 0 {
maxTTL = strconv.Itoa(auth.Config.MaxLeaseTTL)
}
columns = append(columns, fmt.Sprintf(
"%s | %s | %s | %s | %s", path, auth.Type, defTTL, maxTTL, auth.Description))
}
c.Ui.Output(columnize.SimpleFormat(columns))
return 0
}
func (c *AuthCommand) Synopsis() string {
return "Prints information about how to authenticate with Vault"
}
func (c *AuthCommand) Help() string {
helpText := `
Usage: vault auth [options] [token or config...]
Authenticate with Vault with the given token or via any supported
authentication backend.
If no -method is specified, then the token is expected. If it is not
given on the command-line, it will be asked via user input. If the
token is "-", it will be read from stdin.
By specifying -method, alternate authentication methods can be used
such as OAuth or TLS certificates. For these, additional values for
configuration can be specified with "key=value" pairs just like
"vault write". Specify the "-method-help" flag to get help for a specific
method.
If an auth backend is enabled at a different path, such as enabling
"github" at "github-private", the "method" flag should still be "github".
The flag "-path" should be used to specify the path at which the auth
backend is enabled. For example:
"vault auth -method=github -path=github-private token=<github_token>"
The value of the "path" flag will be supplied to auth providers
as the "mount" option in the payload to specify the mount point.
See the "-method-help" for more info.
General Options:
` + meta.GeneralOptionsUsage() + `
Auth Options:
-method=name Outputs help for the authentication method with the given
name for the remote server. If this authentication method
is not available, exit with code 1.
-method-help If set, the help for the selected method will be shown.
-methods List the available auth methods.
-no-verify Do not verify the token after creation; avoids a use count
decrement.
-path The path at which the auth backend is enabled. If an auth
backend is mounted at multiple paths, this option can be
used to authenticate against specific paths.
`
return strings.TrimSpace(helpText)
}
// tokenAuthHandler handles retrieving the token from the command-line.
type tokenAuthHandler struct {
Token string
}
func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error) {
token := h.Token
if token == "" {
var err error
// No arguments given, read the token from user input
fmt.Printf("Token (will be hidden): ")
token, err = password.Read(os.Stdin)
fmt.Printf("\n")
if err != nil {
return "", fmt.Errorf(
"Error attempting to ask for token. The raw error message\n"+
"is shown below, but the most common reason for this error is\n"+
"that you attempted to pipe a value into auth. If you want to\n"+
"pipe the token, please pass '-' as the token argument.\n\n"+
"Raw error: %s", err)
}
}
if token == "" {
return "", fmt.Errorf(
"A token must be passed to auth. Please view the help\n" +
"for more information.")
}
return token, nil
}
func (h *tokenAuthHandler) Help() string {
help := `
No method selected with the "-method" flag, so the "auth" command assumes
you'll be using raw token authentication. For this, specify the token to
authenticate as as the parameter to "vault auth". Example:
vault auth 123456
The token used to authenticate must come from some other source. A root
token is created when Vault is first initialized. After that, subsequent
tokens are created via the API or command line interface (with the
"token"-prefixed commands).
`
return strings.TrimSpace(help)
}
| command/auth.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017561022832524031,
0.0001693553349468857,
0.00016366931959055364,
0.00016913926810957491,
0.0000029267353056638967
] |
{
"id": 0,
"code_window": [
"\n",
"func (b *backend) pathRoleCreateRead(\n",
"\treq *logical.Request, data *framework.FieldData) (*logical.Response, error) {\n",
"\tname := data.Get(\"name\").(string)\n",
"\tvar usernameLength int\n",
"\n",
"\t// Get the role\n",
"\trole, err := b.Role(req.Storage, name)\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 33
} | package mssql
import (
"fmt"
"strings"
)
// SplitSQL is used to split a series of SQL statements
func SplitSQL(sql string) []string {
parts := strings.Split(sql, ";")
out := make([]string, 0, len(parts))
for _, p := range parts {
clean := strings.TrimSpace(p)
if len(clean) > 0 {
out = append(out, clean)
}
}
return out
}
// Query templates a query for us.
func Query(tpl string, data map[string]string) string {
for k, v := range data {
tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
}
return tpl
}
| builtin/logical/mssql/util.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0002278066531289369,
0.00019284815061837435,
0.0001734887046040967,
0.00017724907957017422,
0.000024767021386651322
] |
{
"id": 1,
"code_window": [
"\tif lease == nil {\n",
"\t\tlease = &configLease{}\n",
"\t}\n",
"\n",
"\t// Generate our username and password. MySQL limits user to 16 characters\n",
"\tdisplayName := name\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// Generate our username and password. The username will be the name of\n",
"\t// the role, truncated to role.displaynameLength, appended to a uuid,\n",
"\t// with the entire string truncated to role.usernameLength.\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 53
} | package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathListRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.pathRoleList,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
"sql": &framework.FieldSchema{
Type: framework.TypeString,
Description: "SQL string to create a user. See help for more info.",
},
"username_length": &framework.FieldSchema{
Type: framework.TypeInt,
Description: "number of characters to truncate generated mysql usernames to (default 10)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleRead,
logical.UpdateOperation: b.pathRoleCreate,
logical.DeleteOperation: b.pathRoleDelete,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
entry, err := s.Get("role/" + n)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result roleEntry
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
func (b *backend) pathRoleDelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete("role/" + data.Get("name").(string))
if err != nil {
return nil, err
}
return nil, nil
}
func (b *backend) pathRoleRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, err := b.Role(req.Storage, data.Get("name").(string))
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
return &logical.Response{
Data: map[string]interface{}{
"sql": role.SQL,
},
}, nil
}
func (b *backend) pathRoleList(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List("role/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathRoleCreate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
sql := data.Get("sql").(string)
username_length := data.Get("username_length").(int)
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Test the query by trying to prepare it
for _, query := range SplitSQL(sql) {
stmt, err := db.Prepare(Query(query, map[string]string{
"name": "foo",
"password": "bar",
}))
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error testing query: %s", err)), nil
}
stmt.Close()
}
// Store it
entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
SQL: sql,
USERNAME_LENGTH: username_length,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
return nil, nil
}
type roleEntry struct {
SQL string `json:"sql"`
USERNAME_LENGTH int `json:"username_length"`
}
const pathRoleHelpSyn = `
Manage the roles that can be created with this backend.
`
const pathRoleHelpDesc = `
This path lets you manage the roles that can be created with this backend.
The "sql" parameter customizes the SQL string used to create the role.
This can be a sequence of SQL queries, each semi-colon seperated. Some
substitution will be done to the SQL string for certain keys.
The names of the variables must be surrounded by "{{" and "}}" to be replaced.
* "name" - The random username generated for the DB user.
* "password" - The random password generated for the DB user.
Example of a decent SQL query to use:
CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
GRANT ALL ON db1.* TO '{{name}}'@'%';
Note the above user would be able to access anything in db1. Please see the MySQL
manual on the GRANT command to learn how to do more fine grained access.
The "username_length" parameter determines how many characters of the
role name will be used in creating the generated mysql username; the
default is 10. Note that mysql versions prior to 5.8 have a 16 character
total limit on usernames.
`
| builtin/logical/mysql/path_roles.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0017763314535841346,
0.00033222875208593905,
0.0001646911259740591,
0.00017083465354517102,
0.0004421173071023077
] |
{
"id": 1,
"code_window": [
"\tif lease == nil {\n",
"\t\tlease = &configLease{}\n",
"\t}\n",
"\n",
"\t// Generate our username and password. MySQL limits user to 16 characters\n",
"\tdisplayName := name\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// Generate our username and password. The username will be the name of\n",
"\t// the role, truncated to role.displaynameLength, appended to a uuid,\n",
"\t// with the entire string truncated to role.usernameLength.\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 53
} | Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.
| vendor/github.com/hashicorp/golang-lru/LICENSE | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017966702580451965,
0.00017566373571753502,
0.0001660706038819626,
0.00017672439571470022,
0.000002892292741307756
] |
{
"id": 1,
"code_window": [
"\tif lease == nil {\n",
"\t\tlease = &configLease{}\n",
"\t}\n",
"\n",
"\t// Generate our username and password. MySQL limits user to 16 characters\n",
"\tdisplayName := name\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// Generate our username and password. The username will be the name of\n",
"\t// the role, truncated to role.displaynameLength, appended to a uuid,\n",
"\t// with the entire string truncated to role.usernameLength.\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 53
} | package command
import (
"testing"
"github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/meta"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/cli"
)
func TestMounts(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := http.TestServer(t, core)
defer ln.Close()
ui := new(cli.MockUi)
c := &MountsCommand{
Meta: meta.Meta{
ClientToken: token,
Ui: ui,
},
}
args := []string{
"-address", addr,
}
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
}
| command/mounts_test.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017948311869986355,
0.00017278178711421788,
0.00016685949231032282,
0.0001723922323435545,
0.000004498367616179166
] |
{
"id": 1,
"code_window": [
"\tif lease == nil {\n",
"\t\tlease = &configLease{}\n",
"\t}\n",
"\n",
"\t// Generate our username and password. MySQL limits user to 16 characters\n",
"\tdisplayName := name\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// Generate our username and password. The username will be the name of\n",
"\t// the role, truncated to role.displaynameLength, appended to a uuid,\n",
"\t// with the entire string truncated to role.usernameLength.\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 53
} | package dockertest
/*
Copyright 2014 The Camlistore Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"log"
"math/rand"
"os/exec"
"regexp"
"strings"
"time"
// Import postgres driver
_ "github.com/lib/pq"
"github.com/pborman/uuid"
)
/// runLongTest checks all the conditions for running a docker container
// based on image.
func runLongTest(image string) error {
DockerMachineAvailable = false
if haveDockerMachine() {
DockerMachineAvailable = true
if !startDockerMachine() {
log.Printf(`Starting docker machine "%s" failed.
This could be because the image is already running or because the image does not exist.
Tests will fail if the image does not exist.`, DockerMachineName)
}
} else if !haveDocker() {
return errors.New("Neither 'docker' nor 'docker-machine' available on this system.")
}
if ok, err := HaveImage(image); !ok || err != nil {
if err != nil {
return fmt.Errorf("Error checking for docker image %s: %v", image, err)
}
log.Printf("Pulling docker image %s ...", image)
if err := Pull(image); err != nil {
return fmt.Errorf("Error pulling %s: %v", image, err)
}
}
return nil
}
func runDockerCommand(command string, args ...string) *exec.Cmd {
if DockerMachineAvailable {
command = "/usr/local/bin/" + strings.Join(append([]string{command}, args...), " ")
cmd := exec.Command("docker-machine", "ssh", DockerMachineName, command)
return cmd
}
return exec.Command(command, args...)
}
// haveDockerMachine returns whether the "docker" command was found.
func haveDockerMachine() bool {
_, err := exec.LookPath("docker-machine")
return err == nil
}
// startDockerMachine starts the docker machine and returns false if the command failed to execute
func startDockerMachine() bool {
_, err := exec.Command("docker-machine", "start", DockerMachineName).Output()
return err == nil
}
// haveDocker returns whether the "docker" command was found.
func haveDocker() bool {
_, err := exec.LookPath("docker")
return err == nil
}
type dockerImage struct {
repo string
tag string
}
type dockerImageList []dockerImage
func (l dockerImageList) contains(repo string, tag string) bool {
if tag == "" {
tag = "latest"
}
for _, image := range l {
if image.repo == repo && image.tag == tag {
return true
}
}
return false
}
func parseDockerImagesOutput(data []byte) (images dockerImageList) {
lines := strings.Split(string(data), "\n")
if len(lines) < 2 {
return
}
// skip first line with columns names
images = make(dockerImageList, 0, len(lines)-1)
for _, line := range lines[1:] {
cols := strings.Fields(line)
if len(cols) < 2 {
continue
}
image := dockerImage{
repo: cols[0],
tag: cols[1],
}
images = append(images, image)
}
return
}
func parseImageName(name string) (repo string, tag string) {
if fields := strings.SplitN(name, ":", 2); len(fields) == 2 {
repo, tag = fields[0], fields[1]
} else {
repo = name
}
return
}
// HaveImage reports if docker have image 'name'.
func HaveImage(name string) (bool, error) {
out, err := runDockerCommand("docker", "images", "--no-trunc").Output()
if err != nil {
return false, err
}
repo, tag := parseImageName(name)
images := parseDockerImagesOutput(out)
return images.contains(repo, tag), nil
}
func run(args ...string) (containerID string, err error) {
var stdout, stderr bytes.Buffer
validID := regexp.MustCompile(`^([a-zA-Z0-9]+)$`)
cmd := runDockerCommand("docker", append([]string{"run"}, args...)...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
if err = cmd.Run(); err != nil {
err = fmt.Errorf("Error running docker\nStdOut: %s\nStdErr: %s\nError: %v\n\n", stdout.String(), stderr.String(), err)
return
}
containerID = strings.TrimSpace(string(stdout.String()))
if !validID.MatchString(containerID) {
return "", fmt.Errorf("Error running docker: %s", containerID)
}
if containerID == "" {
return "", errors.New("Unexpected empty output from `docker run`")
}
return containerID, nil
}
// KillContainer runs docker kill on a container.
func KillContainer(container string) error {
if container != "" {
return runDockerCommand("docker", "kill", container).Run()
}
return nil
}
// Pull retrieves the docker image with 'docker pull'.
func Pull(image string) error {
out, err := runDockerCommand("docker", "pull", image).CombinedOutput()
if err != nil {
err = fmt.Errorf("%v: %s", err, out)
}
return err
}
// IP returns the IP address of the container.
func IP(containerID string) (string, error) {
out, err := runDockerCommand("docker", "inspect", containerID).Output()
if err != nil {
return "", err
}
type networkSettings struct {
IPAddress string
}
type container struct {
NetworkSettings networkSettings
}
var c []container
if err := json.NewDecoder(bytes.NewReader(out)).Decode(&c); err != nil {
return "", err
}
if len(c) == 0 {
return "", errors.New("no output from docker inspect")
}
if ip := c[0].NetworkSettings.IPAddress; ip != "" {
return ip, nil
}
return "", errors.New("could not find an IP. Not running?")
}
// SetupMultiportContainer sets up a container, using the start function to run the given image.
// It also looks up the IP address of the container, and tests this address with the given
// ports and timeout. It returns the container ID and its IP address, or makes the test
// fail on error.
func SetupMultiportContainer(image string, ports []int, timeout time.Duration, start func() (string, error)) (c ContainerID, ip string, err error) {
err = runLongTest(image)
if err != nil {
return "", "", err
}
containerID, err := start()
if err != nil {
return "", "", err
}
c = ContainerID(containerID)
ip, err = c.lookup(ports, timeout)
if err != nil {
c.KillRemove()
return "", "", err
}
return c, ip, nil
}
// SetupContainer sets up a container, using the start function to run the given image.
// It also looks up the IP address of the container, and tests this address with the given
// port and timeout. It returns the container ID and its IP address, or makes the test
// fail on error.
func SetupContainer(image string, port int, timeout time.Duration, start func() (string, error)) (c ContainerID, ip string, err error) {
return SetupMultiportContainer(image, []int{port}, timeout, start)
}
// RandomPort returns a random non-priviledged port.
func RandomPort() int {
min := 1025
max := 65534
return min + rand.Intn(max-min)
}
// GenerateContainerID generated a random container id.
func GenerateContainerID() string {
return ContainerPrefix + uuid.New()
}
func init() {
rand.Seed(time.Now().UTC().UnixNano())
} | vendor/github.com/ory-am/dockertest/docker.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0016342144226655364,
0.00023893822799436748,
0.0001675383246038109,
0.00017353304428979754,
0.00027990370290353894
] |
{
"id": 2,
"code_window": [
"\tdisplayName := name\n",
"\tul, ok := data.GetOk(\"username_length\")\n",
"\tif ok == true {\n",
"\t\tusernameLength = ul.(int)\n",
"\t} else {\n",
"\t\tusernameLength = 10\n",
"\t}\n",
"\tif len(displayName) > usernameLength {\n",
"\t\tdisplayName = displayName[:usernameLength]\n",
"\t}\n",
"\tuserUUID, err := uuid.GenerateUUID()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(displayName) > role.DisplaynameLength {\n",
"\t\tdisplayName = displayName[:role.DisplaynameLength]\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 55
} | ---
layout: "docs"
page_title: "Secret Backend: MySQL"
sidebar_current: "docs-secrets-mysql"
description: |-
The MySQL secret backend for Vault generates database credentials to access MySQL.
---
# MySQL Secret Backend
Name: `mysql`
The MySQL secret backend for Vault generates database credentials
dynamically based on configured roles. This means that services that need
to access a database no longer need to hardcode credentials: they can request
them from Vault, and use Vault's leasing mechanism to more easily roll keys.
Additionally, it introduces a new ability: with every service accessing
the database with unique credentials, it makes auditing much easier when
questionable data access is discovered: you can track it down to the specific
instance of a service based on the SQL username.
Vault makes use of its own internal revocation system to ensure that users
become invalid within a reasonable time of the lease expiring.
This page will show a quick start for this backend. For detailed documentation
on every path, use `vault path-help` after mounting the backend.
## Quick Start
The first step to using the mysql backend is to mount it.
Unlike the `generic` backend, the `mysql` backend is not mounted by default.
```
$ vault mount mysql
Successfully mounted 'mysql' at 'mysql'!
```
Next, we must configure Vault to know how to connect to the MySQL
instance. This is done by providing a DSN (Data Source Name):
```
$ vault write mysql/config/connection \
connection_url="root:root@tcp(192.168.33.10:3306)/"
Success! Data written to: mysql/config/connection
```
In this case, we've configured Vault with the user "root" and password "root,
connecting to an instance at "192.168.33.10" on port 3306. It is not necessary
that Vault has the root user, but the user must have privileges to create
other users, namely the `GRANT OPTION` privilege.
Optionally, we can configure the lease settings for credentials generated
by Vault. This is done by writing to the `config/lease` key:
```
$ vault write mysql/config/lease \
lease=1h \
lease_max=24h
Success! Data written to: mysql/config/lease
```
This restricts each credential to being valid or leased for 1 hour
at a time, with a maximum use period of 24 hours. This forces an
application to renew their credentials at least hourly, and to recycle
them once per day.
The next step is to configure a role. A role is a logical name that maps
to a policy used to generate those credentials. For example, lets create
a "readonly" role:
```
$ vault write mysql/roles/readonly \
sql="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';"
Success! Data written to: mysql/roles/readonly
```
By writing to the `roles/readonly` path we are defining the `readonly` role.
This role will be created by evaluating the given `sql` statements. By
default, the `{{name}}` and `{{password}}` fields will be populated by
Vault with dynamically generated values. This SQL statement is creating
the named user, and then granting it `SELECT` or read-only privileges
to tables in the database. More complex `GRANT` queries can be used to
customize the privileges of the role. See the [MySQL manual](https://dev.mysql.com/doc/refman/5.7/en/grant.html)
for more information.
To generate a new set of credentials, we simply read from that role:
```
$ vault read mysql/creds/readonly
Key Value
lease_id mysql/creds/readonly/bd404e98-0f35-b378-269a-b7770ef01897
lease_duration 3600
password 132ae3ef-5a64-7499-351e-bfe59f3a2a21
username root-aefa635a-18
```
By reading from the `creds/readonly` path, Vault has generated a new
set of credentials using the `readonly` role configuration. Here we
see the dynamically generated username and password, along with a one
hour lease.
Using ACLs, it is possible to restrict using the mysql backend such
that trusted operators can manage the role definitions, and both
users and applications are restricted in the credentials they are
allowed to read.
Optionally, you may configure the number of character from the role
name that are truncated to form the mysql usernamed interpolated into
the `{{name}}` field: the default is 10. Note that versions of
mysql prior to 5.8 have a 16 character total limit on user names, so
it is probably not safe to increase this above the default on versions
prior to that.
## API
### /mysql/config/connection
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Configures the connection DSN used to communicate with MySQL.
This is a root protected endpoint.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/config/connection`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">connection_url</span>
<span class="param-flags">required</span>
The MySQL DSN
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">value</span>
<span class="param-flags">optional</span>
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">max_open_connections</span>
<span class="param-flags">optional</span>
Maximum number of open connections to the database.
Defaults to 2.
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">verify-connection</span>
<span class="param-flags">optional</span>
If set, connection_url is verified by actually connecting to the database.
Defaults to true.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/config/lease
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Configures the lease settings for generated credentials.
If not configured, leases default to 1 hour. This is a root
protected endpoint.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/config/lease`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">lease</span>
<span class="param-flags">required</span>
The lease value provided as a string duration
with time suffix. Hour is the largest suffix.
</li>
<li>
<span class="param">lease_max</span>
<span class="param-flags">required</span>
The maximum lease value provided as a string duration
with time suffix. Hour is the largest suffix.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/roles/
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Creates or updates the role definition.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">sql</span>
<span class="param-flags">required</span>
The SQL statements executed to create and configure the role.
Must be semi-colon separated. The '{{name}}' and '{{password}}'
values will be substituted.
</li>
<li>
<span class="param">username_length</span>
<span class="param-flags">optional</span>
Determines how many characters from the role name will be used
to form the mysql username interpolated into the '{{name}}' field
of the sql parameter.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Queries the role definition.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"sql": "CREATE USER..."
}
}
```
</dd>
</dl>
#### LIST
<dl class="api">
<dt>Description</dt>
<dd>
Returns a list of available roles. Only the role names are returned, not
any values.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/roles/?list=true`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"auth": null,
"data": {
"keys": ["dev", "prod"]
},
"lease_duration": 2592000,
"lease_id": "",
"renewable": false
}
```
</dd>
</dl>
#### DELETE
<dl class="api">
<dt>Description</dt>
<dd>
Deletes the role definition.
</dd>
<dt>Method</dt>
<dd>DELETE</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/creds/
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Generates a new set of dynamic credentials based on the named role.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/mysql/creds/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"username": "root-aefa635a-18",
"password": "132ae3ef-5a64-7499-351e-bfe59f3a2a21"
}
}
```
</dd>
</dl>
| website/source/docs/secrets/mysql/index.html.md | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00020659202709794044,
0.00017109802865888923,
0.0001613832137081772,
0.0001718183048069477,
0.000007535308668593643
] |
{
"id": 2,
"code_window": [
"\tdisplayName := name\n",
"\tul, ok := data.GetOk(\"username_length\")\n",
"\tif ok == true {\n",
"\t\tusernameLength = ul.(int)\n",
"\t} else {\n",
"\t\tusernameLength = 10\n",
"\t}\n",
"\tif len(displayName) > usernameLength {\n",
"\t\tdisplayName = displayName[:usernameLength]\n",
"\t}\n",
"\tuserUUID, err := uuid.GenerateUUID()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(displayName) > role.DisplaynameLength {\n",
"\t\tdisplayName = displayName[:role.DisplaynameLength]\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 55
} |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017830358410719782,
0.0001766225614119321,
0.00017484999261796474,
0.00017663545440882444,
9.61253476816637e-7
] |
{
"id": 2,
"code_window": [
"\tdisplayName := name\n",
"\tul, ok := data.GetOk(\"username_length\")\n",
"\tif ok == true {\n",
"\t\tusernameLength = ul.(int)\n",
"\t} else {\n",
"\t\tusernameLength = 10\n",
"\t}\n",
"\tif len(displayName) > usernameLength {\n",
"\t\tdisplayName = displayName[:usernameLength]\n",
"\t}\n",
"\tuserUUID, err := uuid.GenerateUUID()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(displayName) > role.DisplaynameLength {\n",
"\t\tdisplayName = displayName[:role.DisplaynameLength]\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 55
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tlsutil
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"io/ioutil"
)
// NewCertPool creates x509 certPool with provided CA files.
func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, CAFile := range CAFiles {
pemByte, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, err
}
for {
var block *pem.Block
block, pemByte = pem.Decode(pemByte)
if block == nil {
break
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certPool.AddCert(cert)
}
}
return certPool, nil
}
// NewCert generates TLS cert by using the given cert,key and parse function.
func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
cert, err := ioutil.ReadFile(certfile)
if err != nil {
return nil, err
}
key, err := ioutil.ReadFile(keyfile)
if err != nil {
return nil, err
}
if parseFunc == nil {
parseFunc = tls.X509KeyPair
}
tlsCert, err := parseFunc(cert, key)
if err != nil {
return nil, err
}
return &tlsCert, nil
}
| vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017877566278912127,
0.00017052856856025755,
0.00016066113312263042,
0.00017142685828730464,
0.000005844377938046819
] |
{
"id": 2,
"code_window": [
"\tdisplayName := name\n",
"\tul, ok := data.GetOk(\"username_length\")\n",
"\tif ok == true {\n",
"\t\tusernameLength = ul.(int)\n",
"\t} else {\n",
"\t\tusernameLength = 10\n",
"\t}\n",
"\tif len(displayName) > usernameLength {\n",
"\t\tdisplayName = displayName[:usernameLength]\n",
"\t}\n",
"\tuserUUID, err := uuid.GenerateUUID()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(displayName) > role.DisplaynameLength {\n",
"\t\tdisplayName = displayName[:role.DisplaynameLength]\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 55
} | ---
layout: "docs"
page_title: "Auth Backend: Token"
sidebar_current: "docs-auth-token"
description: |-
The token store auth backend is used to authenticate using tokens.
---
# Auth Backend: Token
The token backend is the only auth backend that is built-in and
automatically available at `/auth/token` as well as with first-class
built-in CLI methods such as `vault token-create`. It allows users to
authenticate using a token, as well to create new tokens, revoke
secrets by token, and more.
When any other auth backend returns an identity, Vault core invokes the
token backend to create a new unique token for that identity.
The token store can also be used to bypass any other auth backend:
you can create tokens directly, as well as perform a variety of other
operations on tokens such as renewal and revocation.
Please see the [token concepts](/docs/concepts/tokens.html) page dedicated
to tokens.
## Authentication
#### Via the CLI
```
$ vault auth <token>
...
```
#### Via the API
The token is set directly as a header for the HTTP API. The name
of the header should be "X-Vault-Token" and the value should be the token.
## API
### /auth/token/create
### /auth/token/create-orphan
### /auth/token/create/[role_name]
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Creates a new token. Certain options are only available when called by a
root token. If used via the `/auth/token/create-orphan` endpoint, a root
token is not required to create an orphan token (otherwise set with the
`no_parent` option). If used with a role name in the path, the token will
be created against the specified role name; this may override options set
during this call.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URLs</dt>
<dd>`/auth/token/create`</dd>
<dd>`/auth/token/create-orphan`</dd>
<dd>`/auth/token/create/<role_name>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">id</span>
<span class="param-flags">optional</span>
The ID of the client token. Can only be specified by a root token.
Otherwise, the token ID is a randomly generated UUID.
</li>
<li>
<span class="param">policies</span>
<span class="param-flags">optional</span>
A list of policies for the token. This must be a subset of the
policies belonging to the token making the request, unless root.
If not specified, defaults to all the policies of the calling token.
</li>
<li>
<span class="param">meta</span>
<span class="param-flags">optional</span>
A map of string to string valued metadata. This is passed through
to the audit backends.
</li>
<li>
<span class="param">no_parent</span>
<span class="param-flags">optional</span>
If true and set by a root caller, the token will not have the
parent token of the caller. This creates a token with no parent.
</li>
<li>
<span class="param">no_default_policy</span>
<span class="param-flags">optional</span>
If true the `default` policy will not be a part of this token's
policy set.
</li>
<li>
<span class="param">renewable</span>
<span class="param-flags">optional</span>
Set to `false` to disable the ability of the token to be renewed past
its initial TTL. Specifying `true`, or omitting this option, will allow
the token to be renewable up to the system/mount maximum TTL.
</li>
<li>
<span class="param">lease</span>
<span class="param-flags">optional</span>
DEPRECATED; use "ttl" instead.
</li>
<li>
<span class="param">ttl</span>
<span class="param-flags">optional</span>
The TTL period of the token, provided as "1h", where hour is
the largest suffix. If not provided, the token is valid for the
[default lease TTL](/docs/config/index.html), or
indefinitely if the root policy is used.
</li>
<li>
<span class="param">explicit_max_ttl</span>
<span class="param-flags">optional</span>
If set, the token will have an explicit max TTL set upon it. This
maximum token TTL *cannot* be changed later, and unlike with normal
tokens, updates to the system/mount max TTL value will have no effect
at renewal time -- the token will never be able to be renewed or used
past the value set at issue time.
</li>
<li>
<span class="param">display_name</span>
<span class="param-flags">optional</span>
The display name of the token. Defaults to "token".
</li>
<li>
<span class="param">num_uses</span>
<span class="param-flags">optional</span>
The maximum uses for the given token. This can be used to create
a one-time-token or limited use token. Defaults to 0, which has
no limit to the number of uses.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"auth": {
"client_token": "ABCD",
"policies": ["web", "stage"],
"metadata": {"user": "armon"},
"lease_duration": 3600,
"renewable": true,
}
}
```
</dd>
</dl>
### /auth/token/lookup-self
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Returns information about the current client token.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"id": "ClientToken",
"policies": ["web", "stage"],
"path": "auth/github/login",
"meta": {"user": "armon", "organization": "hashicorp"},
"display_name": "github-armon",
"num_uses": 0,
}
}
```
</dd>
</dl>
### /auth/token/lookup[/token]
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Returns information about the client token provided in the request path.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/auth/token/lookup/<token>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"id": "ClientToken",
"policies": ["web", "stage"],
"path": "auth/github/login",
"meta": {"user": "armon", "organization": "hashicorp"},
"display_name": "github-armon",
"num_uses": 0,
}
}
```
</dd>
</dl>
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Returns information about the client token provided in the request body.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/lookup`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">token</span>
<span class="param-flags">required</span>
Token to lookup.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"id": "ClientToken",
"policies": ["web", "stage"],
"path": "auth/github/login",
"meta": {"user": "armon", "organization": "hashicorp"},
"display_name": "github-armon",
"num_uses": 0,
}
}
```
</dd>
</dl>
### /auth/token/renew-self
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Renews a lease associated with the calling token. This is used to prevent
the expiration of a token, and the automatic revocation of it. Token
renewal is possible only if there is a lease associated with it.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/renew-self`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">increment</span>
<span class="param-flags">optional</span>
An optional requested lease increment can be provided. This
increment may be ignored.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"auth": {
"client_token": "ABCD",
"policies": ["web", "stage"],
"metadata": {"user": "armon"},
"lease_duration": 3600,
"renewable": true,
}
}
```
</dd>
</dl>
### /auth/token/renew[/token]
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Renews a lease associated with a token. This is used to prevent the
expiration of a token, and the automatic revocation of it. Token
renewal is possible only if there is a lease associated with it.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/renew</token>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">token</span>
<span class="param-flags">required</span>
Token to renew. This can be part of the URL or the body.
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">increment</span>
<span class="param-flags">optional</span>
An optional requested lease increment can be provided. This
increment may be ignored.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"auth": {
"client_token": "ABCD",
"policies": ["web", "stage"],
"metadata": {"user": "armon"},
"lease_duration": 3600,
"renewable": true,
}
}
```
</dd>
</dl>
### /auth/token/revoke[/token]
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Revokes a token and all child tokens. When the token is revoked,
all secrets generated with it are also revoked.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/revoke</token>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">token</span>
<span class="param-flags">required</span>
Token to revoke. This can be part of the URL or the body.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>`204` response code.
</dd>
</dl>
### /auth/token/revoke-self/
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Revokes the token used to call it and all child tokens.
When the token is revoked, all secrets generated with
it are also revoked.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/revoke-self`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>`204` response code.
</dd>
</dl>
### /auth/token/revoke-orphan[/token]
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Revokes a token but not its child tokens. When the token is revoked, all
secrets generated with it are also revoked. All child tokens are orphaned,
but can be revoked sub-sequently using `/auth/token/revoke/`. This is a
root-protected endpoint.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/revoke-orphan</token>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">token</span>
<span class="param-flags">required</span>
Token to revoke. This can be part of the URL or the body.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>`204` response code.
</dd>
</dl>
### /auth/token/roles/[role_name]
#### DELETE
<dl class="api">
<dt>Description</dt>
<dd>
Deletes the named role.
</dd>
<dt>Method</dt>
<dd>DELETE</dd>
<dt>URL</dt>
<dd>`/auth/token/roles/<role_name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Fetches the named role configuration.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/auth/token/roles/<role_name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"period": 3600,
"allowed_policies": ["web", "stage"],
"orphan": true,
"path_suffix": ""
}
}
```
</dd>
</dl>
#### LIST
<dl class="api">
<dt>Description</dt>
<dd>
Lists available roles.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/auth/token/roles?list=true`<dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"keys": ["role1", "role2"]
}
}
```
</dd>
</dl>
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Creates (or replaces) the named role. Roles enforce specific behavior when
creating tokens that allow token functionality that is otherwise not
available or would require `sudo`/root privileges to access. Role
parameters, when set, override any provided options to the `create`
endpoints. The role name is also included in the token path, allowing all
tokens created against a role to be revoked using the `sys/revoke-prefix`
endpoint.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/roles/<role_name>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">allowed_policies</span>
<span class="param-flags">optional</span>
If set, tokens can be created with any subset of the policies in this
list, rather than the normal semantics of tokens being a subset of the
calling token's policies. The parameter is a comma-delimited string of
policy names.
</li>
<li>
<span class="param">orphan</span>
<span class="param-flags">optional</span>
If `true`, tokens created against this policy will be orphan tokens
(they will have no parent). As such, they will not be automatically
revoked by the revocation of any other token.
</li>
<li>
<span class="param">period</span>
<span class="param-flags">optional</span>
If set, tokens created against this role will <i>not</i> have a maximum
lifetime. Instead, they will have a fixed TTL that is refreshed with
each renewal. So long as they continue to be renewed, they will never
expire. The parameter is an integer duration of seconds. Tokens issued
track updates to the role value; the new period takes effect upon next
renew. This cannot be used in conjunction with `explicit_max_ttl`.
</li>
<li>
<span class="param">renewable</span>
<span class="param-flags">optional</span>
Set to `false` to disable the ability of token created against this
role to be renewed past their initial TTL. Defaults to `true`, which
allows tokens to be renewed up to the system/mount maximum TTL.
</li>
<li>
<span class="param">path_suffix</span>
<span class="param-flags">optional</span>
If set, tokens created against this role will have the given suffix as
part of their path in addition to the role name. This can be useful in
certain scenarios, such as keeping the same role name in the future but
revoking all tokens created against it before some point in time. The
suffix can be changed, allowing new callers to have the new suffix as
part of their path, and then tokens with the old suffix can be revoked
via `sys/revoke-prefix`.
</li>
<li>
<span class="param">explicit_max_ttl</span>
<span class="param-flags">optional</span>
If set, tokens created with this role have an explicit max TTL set upon
them. This maximum token TTL *cannot* be changed later, and unlike with
normal tokens, updates to the role or the system/mount max TTL value
will have no effect at renewal time -- the token will never be able to
be renewed or used past the value set at issue time. This cannot be
used in conjunction with `period`.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` return code.
</dd>
</dl>
### /auth/token/lookup-accessor[/accessor]
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Fetch the properties of the token associated with the accessor, except the token ID.
This is meant for purposes where there is no access to token ID but there is need
to fetch the properties of a token.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/lookup-accessor</accessor>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">accessor</span>
<span class="param-flags">required</span>
Accessor of the token to lookup. This can be part of the URL or the body.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"lease_id": "",
"renewable": false,
"lease_duration": 0,
"data": {
"creation_time": 1457533232,
"creation_ttl": 2592000,
"display_name": "token",
"id": "",
"meta": null,
"num_uses": 0,
"orphan": false,
"path": "auth/token/create",
"policies": ["default", "web"],
"ttl": 2591976
},
"warnings": null,
"auth": null
}
```
</dd>
</dl>
### /auth/token/revoke-accessor[/accessor]
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Revoke the token associated with the accessor and all the child tokens.
This is meant for purposes where there is no access to token ID but
there is need to revoke a token and its children.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/auth/token/revoke-accessor</accessor>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">accessor</span>
<span class="param-flags">required</span>
Accessor of the token. This can be part of the URL or the body.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>`204` response code.
</dd>
</dl>
| website/source/docs/auth/token.html.md | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0001791796094039455,
0.00017216028936672956,
0.000162674201419577,
0.00017272794502787292,
0.0000030493899885186693
] |
{
"id": 3,
"code_window": [
"\t\treturn nil, err\n",
"\t}\n",
"\tusername := fmt.Sprintf(\"%s-%s\", displayName, userUUID)\n",
"\tif len(username) > 16 {\n",
"\t\tusername = username[:16]\n",
"\t}\n",
"\tpassword, err := uuid.GenerateUUID()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(username) > role.UsernameLength {\n",
"\t\tusername = username[:role.UsernameLength]\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 69
} | package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathListRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.pathRoleList,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
"sql": &framework.FieldSchema{
Type: framework.TypeString,
Description: "SQL string to create a user. See help for more info.",
},
"username_length": &framework.FieldSchema{
Type: framework.TypeInt,
Description: "number of characters to truncate generated mysql usernames to (default 10)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleRead,
logical.UpdateOperation: b.pathRoleCreate,
logical.DeleteOperation: b.pathRoleDelete,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
entry, err := s.Get("role/" + n)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result roleEntry
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
func (b *backend) pathRoleDelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete("role/" + data.Get("name").(string))
if err != nil {
return nil, err
}
return nil, nil
}
func (b *backend) pathRoleRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, err := b.Role(req.Storage, data.Get("name").(string))
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
return &logical.Response{
Data: map[string]interface{}{
"sql": role.SQL,
},
}, nil
}
func (b *backend) pathRoleList(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List("role/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathRoleCreate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
sql := data.Get("sql").(string)
username_length := data.Get("username_length").(int)
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Test the query by trying to prepare it
for _, query := range SplitSQL(sql) {
stmt, err := db.Prepare(Query(query, map[string]string{
"name": "foo",
"password": "bar",
}))
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error testing query: %s", err)), nil
}
stmt.Close()
}
// Store it
entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
SQL: sql,
USERNAME_LENGTH: username_length,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
return nil, nil
}
type roleEntry struct {
SQL string `json:"sql"`
USERNAME_LENGTH int `json:"username_length"`
}
const pathRoleHelpSyn = `
Manage the roles that can be created with this backend.
`
const pathRoleHelpDesc = `
This path lets you manage the roles that can be created with this backend.
The "sql" parameter customizes the SQL string used to create the role.
This can be a sequence of SQL queries, each semi-colon seperated. Some
substitution will be done to the SQL string for certain keys.
The names of the variables must be surrounded by "{{" and "}}" to be replaced.
* "name" - The random username generated for the DB user.
* "password" - The random password generated for the DB user.
Example of a decent SQL query to use:
CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
GRANT ALL ON db1.* TO '{{name}}'@'%';
Note the above user would be able to access anything in db1. Please see the MySQL
manual on the GRANT command to learn how to do more fine grained access.
The "username_length" parameter determines how many characters of the
role name will be used in creating the generated mysql username; the
default is 10. Note that mysql versions prior to 5.8 have a 16 character
total limit on usernames.
`
| builtin/logical/mysql/path_roles.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0005608243518508971,
0.00021683443628717214,
0.0001627461751922965,
0.0001738200371619314,
0.0001056405235431157
] |
{
"id": 3,
"code_window": [
"\t\treturn nil, err\n",
"\t}\n",
"\tusername := fmt.Sprintf(\"%s-%s\", displayName, userUUID)\n",
"\tif len(username) > 16 {\n",
"\t\tusername = username[:16]\n",
"\t}\n",
"\tpassword, err := uuid.GenerateUUID()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(username) > role.UsernameLength {\n",
"\t\tusername = username[:role.UsernameLength]\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 69
} | # pkcs7
[](https://godoc.org/github.com/fullsailor/pkcs7)
pkcs7 implements parsing and creating signed and enveloped messages.
- Documentation on [GoDoc](http://godoc.org/github.com/fullsailor/pkcs7)
| vendor/github.com/fullsailor/pkcs7/README.md | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00016464636428281665,
0.00016464636428281665,
0.00016464636428281665,
0.00016464636428281665,
0
] |
{
"id": 3,
"code_window": [
"\t\treturn nil, err\n",
"\t}\n",
"\tusername := fmt.Sprintf(\"%s-%s\", displayName, userUUID)\n",
"\tif len(username) > 16 {\n",
"\t\tusername = username[:16]\n",
"\t}\n",
"\tpassword, err := uuid.GenerateUUID()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(username) > role.UsernameLength {\n",
"\t\tusername = username[:role.UsernameLength]\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 69
} | # go-homedir
This is a Go library for detecting the user's home directory without
the use of cgo, so the library can be used in cross-compilation environments.
Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
for a user, and `homedir.Expand()` to expand the `~` in a path to the home
directory.
**Why not just use `os/user`?** The built-in `os/user` package requires
cgo on Darwin systems. This means that any Go code that uses that package
cannot cross compile. But 99% of the time the use for `os/user` is just to
retrieve the home directory, which we can do for the current user without
cgo. This library does that, enabling cross-compilation.
| vendor/github.com/mitchellh/go-homedir/README.md | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00018323339463677257,
0.00017498512170277536,
0.00016673686332069337,
0.00017498512170277536,
0.0000082482656580396
] |
{
"id": 3,
"code_window": [
"\t\treturn nil, err\n",
"\t}\n",
"\tusername := fmt.Sprintf(\"%s-%s\", displayName, userUUID)\n",
"\tif len(username) > 16 {\n",
"\t\tusername = username[:16]\n",
"\t}\n",
"\tpassword, err := uuid.GenerateUUID()\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(username) > role.UsernameLength {\n",
"\t\tusername = username[:role.UsernameLength]\n"
],
"file_path": "builtin/logical/mysql/path_role_create.go",
"type": "replace",
"edit_start_line_idx": 69
} | // Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clock_seq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clock_seq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clock_seq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence a new random
// clock sequence is generated the first time a clock sequence is requested by
// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
// for
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clock_seq == 0 {
setClockSequence(-1)
}
return int(clock_seq & 0x3fff)
}
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
old_seq := clock_seq
clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if old_seq != clock_seq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. It returns false if uuid is not valid. The time is only well defined
// for version 1 and 2 UUIDs.
func (uuid UUID) Time() (Time, bool) {
if len(uuid) != 16 {
return 0, false
}
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time), true
}
// ClockSequence returns the clock sequence encoded in uuid. It returns false
// if uuid is not valid. The clock sequence is only well defined for version 1
// and 2 UUIDs.
func (uuid UUID) ClockSequence() (int, bool) {
if len(uuid) != 16 {
return 0, false
}
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
}
| vendor/github.com/pborman/uuid/time.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00033346566488035023,
0.0001902237709145993,
0.0001623363932594657,
0.00017074393690563738,
0.000047728004574310035
] |
{
"id": 4,
"code_window": [
"\t\t\t\tDescription: \"SQL string to create a user. See help for more info.\",\n",
"\t\t\t},\n",
"\n",
"\t\t\t\"username_length\": &framework.FieldSchema{\n",
"\t\t\t\tType: framework.TypeInt,\n",
"\t\t\t\tDescription: \"number of characters to truncate generated mysql usernames to (default 10)\",\n",
"\t\t\t},\n",
"\t\t},\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tDescription: \"number of characters to truncate generated mysql usernames to (default 16)\",\n",
"\t\t\t\tDefault: 16,\n",
"\t\t\t},\n",
"\n",
"\t\t\t\"displayname_length\": &framework.FieldSchema{\n",
"\t\t\t\tType: framework.TypeInt,\n",
"\t\t\t\tDescription: \"number of characters to truncate the rolename portion of generated mysql usernames to (default 10)\",\n",
"\t\t\t\tDefault: 10,\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 39
} | package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathListRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.pathRoleList,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
"sql": &framework.FieldSchema{
Type: framework.TypeString,
Description: "SQL string to create a user. See help for more info.",
},
"username_length": &framework.FieldSchema{
Type: framework.TypeInt,
Description: "number of characters to truncate generated mysql usernames to (default 10)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleRead,
logical.UpdateOperation: b.pathRoleCreate,
logical.DeleteOperation: b.pathRoleDelete,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
entry, err := s.Get("role/" + n)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result roleEntry
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
func (b *backend) pathRoleDelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete("role/" + data.Get("name").(string))
if err != nil {
return nil, err
}
return nil, nil
}
func (b *backend) pathRoleRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, err := b.Role(req.Storage, data.Get("name").(string))
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
return &logical.Response{
Data: map[string]interface{}{
"sql": role.SQL,
},
}, nil
}
func (b *backend) pathRoleList(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List("role/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathRoleCreate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
sql := data.Get("sql").(string)
username_length := data.Get("username_length").(int)
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Test the query by trying to prepare it
for _, query := range SplitSQL(sql) {
stmt, err := db.Prepare(Query(query, map[string]string{
"name": "foo",
"password": "bar",
}))
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error testing query: %s", err)), nil
}
stmt.Close()
}
// Store it
entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
SQL: sql,
USERNAME_LENGTH: username_length,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
return nil, nil
}
type roleEntry struct {
SQL string `json:"sql"`
USERNAME_LENGTH int `json:"username_length"`
}
const pathRoleHelpSyn = `
Manage the roles that can be created with this backend.
`
const pathRoleHelpDesc = `
This path lets you manage the roles that can be created with this backend.
The "sql" parameter customizes the SQL string used to create the role.
This can be a sequence of SQL queries, each semi-colon seperated. Some
substitution will be done to the SQL string for certain keys.
The names of the variables must be surrounded by "{{" and "}}" to be replaced.
* "name" - The random username generated for the DB user.
* "password" - The random password generated for the DB user.
Example of a decent SQL query to use:
CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
GRANT ALL ON db1.* TO '{{name}}'@'%';
Note the above user would be able to access anything in db1. Please see the MySQL
manual on the GRANT command to learn how to do more fine grained access.
The "username_length" parameter determines how many characters of the
role name will be used in creating the generated mysql username; the
default is 10. Note that mysql versions prior to 5.8 have a 16 character
total limit on usernames.
`
| builtin/logical/mysql/path_roles.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.96454918384552,
0.05166250839829445,
0.00016676897939760238,
0.0003157495229970664,
0.21517905592918396
] |
{
"id": 4,
"code_window": [
"\t\t\t\tDescription: \"SQL string to create a user. See help for more info.\",\n",
"\t\t\t},\n",
"\n",
"\t\t\t\"username_length\": &framework.FieldSchema{\n",
"\t\t\t\tType: framework.TypeInt,\n",
"\t\t\t\tDescription: \"number of characters to truncate generated mysql usernames to (default 10)\",\n",
"\t\t\t},\n",
"\t\t},\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tDescription: \"number of characters to truncate generated mysql usernames to (default 16)\",\n",
"\t\t\t\tDefault: 16,\n",
"\t\t\t},\n",
"\n",
"\t\t\t\"displayname_length\": &framework.FieldSchema{\n",
"\t\t\t\tType: framework.TypeInt,\n",
"\t\t\t\tDescription: \"number of characters to truncate the rolename portion of generated mysql usernames to (default 10)\",\n",
"\t\t\t\tDefault: 10,\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for AMD64, FreeBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
| vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017629691865295172,
0.00017265212954953313,
0.000169429971720092,
0.0001722294691717252,
0.0000028193041998747503
] |
{
"id": 4,
"code_window": [
"\t\t\t\tDescription: \"SQL string to create a user. See help for more info.\",\n",
"\t\t\t},\n",
"\n",
"\t\t\t\"username_length\": &framework.FieldSchema{\n",
"\t\t\t\tType: framework.TypeInt,\n",
"\t\t\t\tDescription: \"number of characters to truncate generated mysql usernames to (default 10)\",\n",
"\t\t\t},\n",
"\t\t},\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tDescription: \"number of characters to truncate generated mysql usernames to (default 16)\",\n",
"\t\t\t\tDefault: 16,\n",
"\t\t\t},\n",
"\n",
"\t\t\t\"displayname_length\": &framework.FieldSchema{\n",
"\t\t\t\tType: framework.TypeInt,\n",
"\t\t\t\tDescription: \"number of characters to truncate the rolename portion of generated mysql usernames to (default 10)\",\n",
"\t\t\t\tDefault: 10,\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 39
} | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// +build appengine
package internal
import (
"errors"
"fmt"
"net/http"
"time"
"appengine"
"appengine_internal"
basepb "appengine_internal/base"
"github.com/golang/protobuf/proto"
netcontext "golang.org/x/net/context"
)
var contextKey = "holds an appengine.Context"
func fromContext(ctx netcontext.Context) appengine.Context {
c, _ := ctx.Value(&contextKey).(appengine.Context)
return c
}
// This is only for classic App Engine adapters.
func ClassicContextFromContext(ctx netcontext.Context) appengine.Context {
return fromContext(ctx)
}
func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
ctx := netcontext.WithValue(parent, &contextKey, c)
s := &basepb.StringProto{}
c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
if ns := s.GetValue(); ns != "" {
ctx = NamespacedContext(ctx, ns)
}
return ctx
}
func IncomingHeaders(ctx netcontext.Context) http.Header {
if c := fromContext(ctx); c != nil {
if req, ok := c.Request().(*http.Request); ok {
return req.Header
}
}
return nil
}
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
c := appengine.NewContext(req)
return withContext(parent, c)
}
type testingContext struct {
appengine.Context
req *http.Request
}
func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
if service == "__go__" && method == "GetNamespace" {
return nil
}
return fmt.Errorf("testingContext: unsupported Call")
}
func (t *testingContext) Request() interface{} { return t.req }
func ContextForTesting(req *http.Request) netcontext.Context {
return withContext(netcontext.Background(), &testingContext{req: req})
}
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
if ns := NamespaceFromContext(ctx); ns != "" {
if fn, ok := NamespaceMods[service]; ok {
fn(in, ns)
}
}
if f, ctx, ok := callOverrideFromContext(ctx); ok {
return f(ctx, service, method, in, out)
}
// Handle already-done contexts quickly.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
c := fromContext(ctx)
if c == nil {
// Give a good error message rather than a panic lower down.
return errors.New("not an App Engine context")
}
// Apply transaction modifications if we're in a transaction.
if t := transactionFromContext(ctx); t != nil {
if t.finished {
return errors.New("transaction context has expired")
}
applyTransaction(in, &t.transaction)
}
var opts *appengine_internal.CallOptions
if d, ok := ctx.Deadline(); ok {
opts = &appengine_internal.CallOptions{
Timeout: d.Sub(time.Now()),
}
}
err := c.Call(service, method, in, out, opts)
switch v := err.(type) {
case *appengine_internal.APIError:
return &APIError{
Service: v.Service,
Detail: v.Detail,
Code: v.Code,
}
case *appengine_internal.CallError:
return &CallError{
Detail: v.Detail,
Code: v.Code,
Timeout: v.Timeout,
}
}
return err
}
func handleHTTP(w http.ResponseWriter, r *http.Request) {
panic("handleHTTP called; this should be impossible")
}
func logf(c appengine.Context, level int64, format string, args ...interface{}) {
var fn func(format string, args ...interface{})
switch level {
case 0:
fn = c.Debugf
case 1:
fn = c.Infof
case 2:
fn = c.Warningf
case 3:
fn = c.Errorf
case 4:
fn = c.Criticalf
default:
// This shouldn't happen.
fn = c.Criticalf
}
fn(format, args...)
}
| vendor/google.golang.org/appengine/internal/api_classic.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0007020168704912066,
0.000206543889362365,
0.00016519470955245197,
0.00017189413483720273,
0.0001281958248000592
] |
{
"id": 4,
"code_window": [
"\t\t\t\tDescription: \"SQL string to create a user. See help for more info.\",\n",
"\t\t\t},\n",
"\n",
"\t\t\t\"username_length\": &framework.FieldSchema{\n",
"\t\t\t\tType: framework.TypeInt,\n",
"\t\t\t\tDescription: \"number of characters to truncate generated mysql usernames to (default 10)\",\n",
"\t\t\t},\n",
"\t\t},\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tDescription: \"number of characters to truncate generated mysql usernames to (default 16)\",\n",
"\t\t\t\tDefault: 16,\n",
"\t\t\t},\n",
"\n",
"\t\t\t\"displayname_length\": &framework.FieldSchema{\n",
"\t\t\t\tType: framework.TypeInt,\n",
"\t\t\t\tDescription: \"number of characters to truncate the rolename portion of generated mysql usernames to (default 10)\",\n",
"\t\t\t\tDefault: 10,\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 39
} | Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| vendor/golang.org/x/sys/LICENSE | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0001703538146102801,
0.0001664813607931137,
0.00016143015818670392,
0.00016766009503044188,
0.000003737198767339578
] |
Subsets and Splits