hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 4,
"code_window": [
"type TiDBStatement struct {\n",
"\tid uint32\n",
"\tnumParams int\n",
"\tboundParams [][]byte\n",
"\tctx *TiDBContext\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tparamsType []byte\n"
],
"file_path": "server/driver_tidb.go",
"type": "add",
"edit_start_line_idx": 52
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"github.com/juju/errors"
"github.com/pingcap/tidb"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/util/types"
)
// TiDBDriver implements IDriver.
type TiDBDriver struct {
store kv.Storage
}
// NewTiDBDriver creates a new TiDBDriver.
func NewTiDBDriver(store kv.Storage) *TiDBDriver {
driver := &TiDBDriver{
store: store,
}
return driver
}
// TiDBContext implements IContext.
type TiDBContext struct {
session tidb.Session
currentDB string
warningCount uint16
stmts map[int]*TiDBStatement
}
// TiDBStatement implements IStatement.
type TiDBStatement struct {
id uint32
numParams int
boundParams [][]byte
ctx *TiDBContext
}
// ID implements IStatement ID method.
func (ts *TiDBStatement) ID() int {
return int(ts.id)
}
// Execute implements IStatement Execute method.
func (ts *TiDBStatement) Execute(args ...interface{}) (rs ResultSet, err error) {
tidbRecordset, err := ts.ctx.session.ExecutePreparedStmt(ts.id, args...)
if err != nil {
return nil, errors.Trace(err)
}
if tidbRecordset == nil {
return
}
rs = &tidbResultSet{
recordSet: tidbRecordset,
}
return
}
// AppendParam implements IStatement AppendParam method.
func (ts *TiDBStatement) AppendParam(paramID int, data []byte) error {
if paramID >= len(ts.boundParams) {
return mysql.NewErr(mysql.ErrWrongArguments, "stmt_send_longdata")
}
ts.boundParams[paramID] = append(ts.boundParams[paramID], data...)
return nil
}
// NumParams implements IStatement NumParams method.
func (ts *TiDBStatement) NumParams() int {
return ts.numParams
}
// BoundParams implements IStatement BoundParams method.
func (ts *TiDBStatement) BoundParams() [][]byte {
return ts.boundParams
}
// Reset implements IStatement Reset method.
func (ts *TiDBStatement) Reset() {
for i := range ts.boundParams {
ts.boundParams[i] = nil
}
}
// Close implements IStatement Close method.
func (ts *TiDBStatement) Close() error {
//TODO close at tidb level
err := ts.ctx.session.DropPreparedStmt(ts.id)
if err != nil {
return errors.Trace(err)
}
delete(ts.ctx.stmts, int(ts.id))
return nil
}
// OpenCtx implements IDriver.
func (qd *TiDBDriver) OpenCtx(connID uint64, capability uint32, collation uint8, dbname string) (IContext, error) {
session, err := tidb.CreateSession(qd.store)
if err != nil {
return nil, errors.Trace(err)
}
session.SetClientCapability(capability)
session.SetConnectionID(connID)
if dbname != "" {
_, err = session.Execute("use " + dbname)
if err != nil {
return nil, errors.Trace(err)
}
}
tc := &TiDBContext{
session: session,
currentDB: dbname,
stmts: make(map[int]*TiDBStatement),
}
return tc, nil
}
// Status implements IContext Status method.
func (tc *TiDBContext) Status() uint16 {
return tc.session.Status()
}
// LastInsertID implements IContext LastInsertID method.
func (tc *TiDBContext) LastInsertID() uint64 {
return tc.session.LastInsertID()
}
// Value implements IContext Value method.
func (tc *TiDBContext) Value(key fmt.Stringer) interface{} {
return tc.session.Value(key)
}
// SetValue implements IContext SetValue method.
func (tc *TiDBContext) SetValue(key fmt.Stringer, value interface{}) {
tc.session.SetValue(key, value)
}
// CommitTxn implements IContext CommitTxn method.
func (tc *TiDBContext) CommitTxn() error {
return tc.session.CommitTxn()
}
// RollbackTxn implements IContext RollbackTxn method.
func (tc *TiDBContext) RollbackTxn() error {
return tc.session.RollbackTxn()
}
// AffectedRows implements IContext AffectedRows method.
func (tc *TiDBContext) AffectedRows() uint64 {
return tc.session.AffectedRows()
}
// CurrentDB implements IContext CurrentDB method.
func (tc *TiDBContext) CurrentDB() string {
return tc.currentDB
}
// WarningCount implements IContext WarningCount method.
func (tc *TiDBContext) WarningCount() uint16 {
return tc.warningCount
}
// Execute implements IContext Execute method.
func (tc *TiDBContext) Execute(sql string) (rs []ResultSet, err error) {
rsList, err := tc.session.Execute(sql)
if err != nil {
return
}
if len(rsList) == 0 { // result ok
return
}
rs = make([]ResultSet, len(rsList))
for i := 0; i < len(rsList); i++ {
rs[i] = &tidbResultSet{
recordSet: rsList[i],
}
}
return
}
// SetClientCapability implements IContext SetClientCapability method.
func (tc *TiDBContext) SetClientCapability(flags uint32) {
tc.session.SetClientCapability(flags)
}
// Close implements IContext Close method.
func (tc *TiDBContext) Close() (err error) {
return tc.session.Close()
}
// Auth implements IContext Auth method.
func (tc *TiDBContext) Auth(user string, auth []byte, salt []byte) bool {
return tc.session.Auth(user, auth, salt)
}
// FieldList implements IContext FieldList method.
func (tc *TiDBContext) FieldList(table string) (colums []*ColumnInfo, err error) {
rs, err := tc.Execute("SELECT * FROM `" + table + "` LIMIT 0")
if err != nil {
return nil, errors.Trace(err)
}
colums, err = rs[0].Columns()
if err != nil {
return nil, errors.Trace(err)
}
return
}
// GetStatement implements IContext GetStatement method.
func (tc *TiDBContext) GetStatement(stmtID int) IStatement {
tcStmt := tc.stmts[stmtID]
if tcStmt != nil {
return tcStmt
}
return nil
}
// Prepare implements IContext Prepare method.
func (tc *TiDBContext) Prepare(sql string) (statement IStatement, columns, params []*ColumnInfo, err error) {
stmtID, paramCount, fields, err := tc.session.PrepareStmt(sql)
if err != nil {
return
}
stmt := &TiDBStatement{
id: stmtID,
numParams: paramCount,
boundParams: make([][]byte, paramCount),
ctx: tc,
}
statement = stmt
columns = make([]*ColumnInfo, len(fields))
for i := range fields {
columns[i] = convertColumnInfo(fields[i])
}
params = make([]*ColumnInfo, paramCount)
for i := range params {
params[i] = &ColumnInfo{
Type: mysql.TypeBlob,
}
}
tc.stmts[int(stmtID)] = stmt
return
}
type tidbResultSet struct {
recordSet ast.RecordSet
}
func (trs *tidbResultSet) Next() ([]types.Datum, error) {
row, err := trs.recordSet.Next()
if err != nil {
return nil, errors.Trace(err)
}
if row != nil {
return row.Data, nil
}
return nil, nil
}
func (trs *tidbResultSet) Close() error {
return trs.recordSet.Close()
}
func (trs *tidbResultSet) Columns() ([]*ColumnInfo, error) {
fields, err := trs.recordSet.Fields()
if err != nil {
return nil, errors.Trace(err)
}
var columns []*ColumnInfo
for _, v := range fields {
columns = append(columns, convertColumnInfo(v))
}
return columns, nil
}
func convertColumnInfo(fld *ast.ResultField) (ci *ColumnInfo) {
ci = new(ColumnInfo)
ci.Name = fld.ColumnAsName.O
ci.OrgName = fld.Column.Name.O
ci.Table = fld.TableAsName.O
if fld.Table != nil {
ci.OrgTable = fld.Table.Name.O
}
ci.Schema = fld.DBName.O
ci.Flag = uint16(fld.Column.Flag)
ci.Charset = uint16(mysql.CharsetIDs[fld.Column.Charset])
if fld.Column.Flen == types.UnspecifiedLength {
ci.ColumnLength = 0
} else {
ci.ColumnLength = uint32(fld.Column.Flen)
}
if fld.Column.Decimal == types.UnspecifiedLength {
ci.Decimal = 0
} else {
ci.Decimal = uint8(fld.Column.Decimal)
}
ci.Type = uint8(fld.Column.Tp)
// Keep things compatible for old clients.
// Refer to mysql-server/sql/protocol.cc send_result_set_metadata()
if ci.Type == mysql.TypeVarchar {
ci.Type = mysql.TypeVarString
}
return
}
| server/driver_tidb.go | 1 | https://github.com/pingcap/tidb/commit/b18033423193c39749403b34d87f80acdd5ced07 | [
0.9985635876655579,
0.26422813534736633,
0.00016691898053977638,
0.007981905713677406,
0.40728822350502014
] |
{
"id": 4,
"code_window": [
"type TiDBStatement struct {\n",
"\tid uint32\n",
"\tnumParams int\n",
"\tboundParams [][]byte\n",
"\tctx *TiDBContext\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tparamsType []byte\n"
],
"file_path": "server/driver_tidb.go",
"type": "add",
"edit_start_line_idx": 52
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package domain
import (
"math/rand"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/util/testleak"
)
type leaseGrantItem struct {
leaseGrantTS uint64
schemaVer int64
}
func (*testSuite) TestSchemaValidator(c *C) {
defer testleak.AfterTest(c)()
lease := 2 * time.Millisecond
leaseGrantCh := make(chan leaseGrantItem)
oracleCh := make(chan uint64)
exit := make(chan struct{})
go serverFunc(lease, leaseGrantCh, oracleCh, exit)
validator := newSchemaValidator(lease)
for i := 0; i < 10; i++ {
delay := time.Duration(100+rand.Intn(900)) * time.Microsecond
time.Sleep(delay)
// Reload can run arbitrarily, at any time.
reload(validator, leaseGrantCh)
}
// Take a lease, check it's valid.
item := <-leaseGrantCh
validator.Update(item.leaseGrantTS, item.schemaVer)
valid := validator.Check(item.leaseGrantTS, item.schemaVer)
c.Assert(valid, IsTrue)
// Sleep for a long time, check schema is invalid.
time.Sleep(lease)
ts := <-oracleCh
valid = validator.Check(ts, item.schemaVer)
c.Assert(valid, IsFalse)
reload(validator, leaseGrantCh)
valid = validator.Check(ts, item.schemaVer)
c.Assert(valid, IsFalse)
// Check the latest schema version must changed.
c.Assert(item.schemaVer, Less, validator.Latest())
exit <- struct{}{}
}
func reload(validator SchemaValidator, leaseGrantCh chan leaseGrantItem) {
item := <-leaseGrantCh
validator.Update(item.leaseGrantTS, item.schemaVer)
}
// serverFunc plays the role as a remote server, runs in a seperate goroutine.
// It can grant lease and provide timestamp oracle.
// Caller should communicate with it through channel to mock network.
func serverFunc(lease time.Duration, requireLease chan leaseGrantItem, oracleCh chan uint64, exit chan struct{}) {
var version int64
leaseTS := uint64(time.Now().UnixNano())
ticker := time.NewTicker(lease)
for {
select {
case <-ticker.C:
version++
leaseTS = uint64(time.Now().UnixNano())
case requireLease <- leaseGrantItem{
leaseGrantTS: leaseTS,
schemaVer: version,
}:
case oracleCh <- uint64(time.Now().UnixNano()):
case <-exit:
return
}
}
}
| domain/schema_validator_test.go | 0 | https://github.com/pingcap/tidb/commit/b18033423193c39749403b34d87f80acdd5ced07 | [
0.0003419152635615319,
0.00019111306755803525,
0.00016801264428067952,
0.00017497752560302615,
0.00005051173866377212
] |
{
"id": 4,
"code_window": [
"type TiDBStatement struct {\n",
"\tid uint32\n",
"\tnumParams int\n",
"\tboundParams [][]byte\n",
"\tctx *TiDBContext\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tparamsType []byte\n"
],
"file_path": "server/driver_tidb.go",
"type": "add",
"edit_start_line_idx": 52
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
queryHistgram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "handle_query_duration_seconds",
Help: "Bucketed histogram of processing time (s) of handled queries.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
})
queryCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "query_total",
Help: "Counter of queries.",
}, []string{"type"})
// Query handle result status.
querySucc = "query_succ"
queryFailed = "query_failed"
)
func init() {
prometheus.MustRegister(queryHistgram)
prometheus.MustRegister(queryCounter)
}
| distsql/metrics.go | 0 | https://github.com/pingcap/tidb/commit/b18033423193c39749403b34d87f80acdd5ced07 | [
0.0001818552118493244,
0.00017606528126634657,
0.00017171916260849684,
0.00017643682076595724,
0.0000037656895983673166
] |
{
"id": 4,
"code_window": [
"type TiDBStatement struct {\n",
"\tid uint32\n",
"\tnumParams int\n",
"\tboundParams [][]byte\n",
"\tctx *TiDBContext\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tparamsType []byte\n"
],
"file_path": "server/driver_tidb.go",
"type": "add",
"edit_start_line_idx": 52
} | // Copyright (c) 2014, Suryandaru Triandana <[email protected]>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package util
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
type buffer struct {
b []byte
miss int
}
// BufferPool is a 'buffer pool'.
type BufferPool struct {
pool [6]chan []byte
size [5]uint32
sizeMiss [5]uint32
sizeHalf [5]uint32
baseline [4]int
baseline0 int
mu sync.RWMutex
closed bool
closeC chan struct{}
get uint32
put uint32
half uint32
less uint32
equal uint32
greater uint32
miss uint32
}
func (p *BufferPool) poolNum(n int) int {
if n <= p.baseline0 && n > p.baseline0/2 {
return 0
}
for i, x := range p.baseline {
if n <= x {
return i + 1
}
}
return len(p.baseline) + 1
}
// Get returns buffer with length of n.
func (p *BufferPool) Get(n int) []byte {
if p == nil {
return make([]byte, n)
}
p.mu.RLock()
defer p.mu.RUnlock()
if p.closed {
return make([]byte, n)
}
atomic.AddUint32(&p.get, 1)
poolNum := p.poolNum(n)
pool := p.pool[poolNum]
if poolNum == 0 {
// Fast path.
select {
case b := <-pool:
switch {
case cap(b) > n:
if cap(b)-n >= n {
atomic.AddUint32(&p.half, 1)
select {
case pool <- b:
default:
}
return make([]byte, n)
} else {
atomic.AddUint32(&p.less, 1)
return b[:n]
}
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
atomic.AddUint32(&p.greater, 1)
}
default:
atomic.AddUint32(&p.miss, 1)
}
return make([]byte, n, p.baseline0)
} else {
sizePtr := &p.size[poolNum-1]
select {
case b := <-pool:
switch {
case cap(b) > n:
if cap(b)-n >= n {
atomic.AddUint32(&p.half, 1)
sizeHalfPtr := &p.sizeHalf[poolNum-1]
if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
atomic.StoreUint32(sizeHalfPtr, 0)
} else {
select {
case pool <- b:
default:
}
}
return make([]byte, n)
} else {
atomic.AddUint32(&p.less, 1)
return b[:n]
}
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
atomic.AddUint32(&p.greater, 1)
if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
select {
case pool <- b:
default:
}
}
}
default:
atomic.AddUint32(&p.miss, 1)
}
if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
if size == 0 {
atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
} else {
sizeMissPtr := &p.sizeMiss[poolNum-1]
if atomic.AddUint32(sizeMissPtr, 1) == 20 {
atomic.StoreUint32(sizePtr, uint32(n))
atomic.StoreUint32(sizeMissPtr, 0)
}
}
return make([]byte, n)
} else {
return make([]byte, n, size)
}
}
}
// Put adds given buffer to the pool.
func (p *BufferPool) Put(b []byte) {
if p == nil {
return
}
p.mu.RLock()
defer p.mu.RUnlock()
if p.closed {
return
}
atomic.AddUint32(&p.put, 1)
pool := p.pool[p.poolNum(cap(b))]
select {
case pool <- b:
default:
}
}
func (p *BufferPool) Close() {
if p == nil {
return
}
p.mu.Lock()
if !p.closed {
p.closed = true
p.closeC <- struct{}{}
}
p.mu.Unlock()
}
func (p *BufferPool) String() string {
if p == nil {
return "<nil>"
}
return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
}
func (p *BufferPool) drain() {
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
for _, ch := range p.pool {
select {
case <-ch:
default:
}
}
case <-p.closeC:
close(p.closeC)
for _, ch := range p.pool {
close(ch)
}
return
}
}
}
// NewBufferPool creates a new initialized 'buffer pool'.
func NewBufferPool(baseline int) *BufferPool {
if baseline <= 0 {
panic("baseline can't be <= 0")
}
p := &BufferPool{
baseline0: baseline,
baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
closeC: make(chan struct{}, 1),
}
for i, cap := range []int{2, 2, 4, 4, 2, 1} {
p.pool[i] = make(chan []byte, cap)
}
go p.drain()
return p
}
| _vendor/src/github.com/pingcap/goleveldb/leveldb/util/buffer_pool.go | 0 | https://github.com/pingcap/tidb/commit/b18033423193c39749403b34d87f80acdd5ced07 | [
0.0007655611261725426,
0.00021123094484210014,
0.000163633594638668,
0.00017704966012388468,
0.00011937129602301866
] |
{
"id": 5,
"code_window": [
"func (ts *TiDBStatement) BoundParams() [][]byte {\n",
"\treturn ts.boundParams\n",
"}\n",
"\n",
"// Reset implements IStatement Reset method.\n",
"func (ts *TiDBStatement) Reset() {\n",
"\tfor i := range ts.boundParams {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SetParamsType implements IStatement SetParamsType method.\n",
"func (ts *TiDBStatement) SetParamsType(paramsType []byte) {\n",
"\tts.paramsType = paramsType\n",
"}\n",
"\n",
"// GetParamsType implements IStatement GetParamsType method.\n",
"func (ts *TiDBStatement) GetParamsType() []byte {\n",
"\treturn ts.paramsType\n",
"}\n",
"\n"
],
"file_path": "server/driver_tidb.go",
"type": "add",
"edit_start_line_idx": 94
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"github.com/pingcap/tidb/util/types"
)
// IDriver opens IContext.
type IDriver interface {
// OpenCtx opens an IContext with connection id, client capability, collation and dbname.
OpenCtx(connID uint64, capability uint32, collation uint8, dbname string) (IContext, error)
}
// IContext is the interface to execute command.
type IContext interface {
// Status returns server status code.
Status() uint16
// LastInsertID returns last inserted ID.
LastInsertID() uint64
// AffectedRows returns affected rows of last executed command.
AffectedRows() uint64
// Value returns the value associated with this context for key.
Value(key fmt.Stringer) interface{}
// SetValue saves a value associated with this context for key.
SetValue(key fmt.Stringer, value interface{})
// CommitTxn commits the transaction operations.
CommitTxn() error
// RollbackTxn undoes the transaction operations.
RollbackTxn() error
// WarningCount returns warning count of last executed command.
WarningCount() uint16
// CurrentDB returns current DB.
CurrentDB() string
// Execute executes a SQL statement.
Execute(sql string) ([]ResultSet, error)
// SetClientCapability sets client capability flags
SetClientCapability(uint32)
// Prepare prepares a statement.
Prepare(sql string) (statement IStatement, columns, params []*ColumnInfo, err error)
// GetStatement gets IStatement by statement ID.
GetStatement(stmtID int) IStatement
// FieldList returns columns of a table.
FieldList(tableName string) (columns []*ColumnInfo, err error)
// Close closes the IContext.
Close() error
// Auth verifies user's authentication.
Auth(user string, auth []byte, salt []byte) bool
}
// IStatement is the interface to use a prepared statement.
type IStatement interface {
// ID returns statement ID
ID() int
// Execute executes the statement.
Execute(args ...interface{}) (ResultSet, error)
// AppendParam appends parameter to the statement.
AppendParam(paramID int, data []byte) error
// NumParams returns number of parameters.
NumParams() int
// BoundParams returns bound parameters.
BoundParams() [][]byte
// Reset removes all bound parameters.
Reset()
// Close closes the statement.
Close() error
}
// ResultSet is the result set of an query.
type ResultSet interface {
Columns() ([]*ColumnInfo, error)
Next() ([]types.Datum, error)
Close() error
}
| server/driver.go | 1 | https://github.com/pingcap/tidb/commit/b18033423193c39749403b34d87f80acdd5ced07 | [
0.9965900182723999,
0.09147772938013077,
0.00016666787269059569,
0.0001778446458047256,
0.2862255275249481
] |
{
"id": 5,
"code_window": [
"func (ts *TiDBStatement) BoundParams() [][]byte {\n",
"\treturn ts.boundParams\n",
"}\n",
"\n",
"// Reset implements IStatement Reset method.\n",
"func (ts *TiDBStatement) Reset() {\n",
"\tfor i := range ts.boundParams {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SetParamsType implements IStatement SetParamsType method.\n",
"func (ts *TiDBStatement) SetParamsType(paramsType []byte) {\n",
"\tts.paramsType = paramsType\n",
"}\n",
"\n",
"// GetParamsType implements IStatement GetParamsType method.\n",
"func (ts *TiDBStatement) GetParamsType() []byte {\n",
"\treturn ts.paramsType\n",
"}\n",
"\n"
],
"file_path": "server/driver_tidb.go",
"type": "add",
"edit_start_line_idx": 94
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"sync"
"time"
"unsafe"
"github.com/juju/errors"
"github.com/ngaut/log"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/kv"
"golang.org/x/net/context"
)
var (
_ kv.Snapshot = (*tikvSnapshot)(nil)
)
const (
scanBatchSize = 100
batchGetSize = 5120
)
// tikvSnapshot implements MvccSnapshot interface.
type tikvSnapshot struct {
store *tikvStore
version kv.Version
}
// newTiKVSnapshot creates a snapshot of an TiKV store.
func newTiKVSnapshot(store *tikvStore, ver kv.Version) *tikvSnapshot {
return &tikvSnapshot{
store: store,
version: ver,
}
}
// BatchGet gets all the keys' value from kv-server and returns a map contains key/value pairs.
// The map will not contain nonexistent keys.
func (s *tikvSnapshot) BatchGet(keys []kv.Key) (map[string][]byte, error) {
txnCmdCounter.WithLabelValues("batch_get").Inc()
start := time.Now()
defer func() { txnCmdHistogram.WithLabelValues("batch_get").Observe(time.Since(start).Seconds()) }()
// We want [][]byte instead of []kv.Key, use some magic to save memory.
bytesKeys := *(*[][]byte)(unsafe.Pointer(&keys))
bo := NewBackoffer(batchGetMaxBackoff, context.Background())
// Create a map to collect key-values from region servers.
var mu sync.Mutex
m := make(map[string][]byte)
err := s.batchGetKeysByRegions(bo, bytesKeys, func(k, v []byte) {
if len(v) == 0 {
return
}
mu.Lock()
m[string(k)] = v
mu.Unlock()
})
if err != nil {
return nil, errors.Trace(err)
}
return m, nil
}
func (s *tikvSnapshot) batchGetKeysByRegions(bo *Backoffer, keys [][]byte, collectF func(k, v []byte)) error {
groups, _, err := s.store.regionCache.GroupKeysByRegion(bo, keys)
if err != nil {
return errors.Trace(err)
}
var batches []batchKeys
for id, g := range groups {
batches = appendBatchBySize(batches, id, g, func([]byte) int { return 1 }, batchGetSize)
}
if len(batches) == 0 {
return nil
}
if len(batches) == 1 {
return errors.Trace(s.batchGetSingleRegion(bo, batches[0], collectF))
}
ch := make(chan error)
for _, batch := range batches {
go func(batch batchKeys) {
ch <- s.batchGetSingleRegion(bo.Fork(), batch, collectF)
}(batch)
}
for i := 0; i < len(batches); i++ {
if e := <-ch; e != nil {
log.Debugf("snapshot batchGet failed: %v, tid: %d", e, s.version.Ver)
err = e
}
}
return errors.Trace(err)
}
func (s *tikvSnapshot) batchGetSingleRegion(bo *Backoffer, batch batchKeys, collectF func(k, v []byte)) error {
pending := batch.keys
for {
req := &pb.Request{
Type: pb.MessageType_CmdBatchGet,
CmdBatchGetReq: &pb.CmdBatchGetRequest{
Keys: pending,
Version: s.version.Ver,
},
}
resp, err := s.store.SendKVReq(bo, req, batch.region, readTimeoutMedium)
if err != nil {
return errors.Trace(err)
}
if regionErr := resp.GetRegionError(); regionErr != nil {
err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = s.batchGetKeysByRegions(bo, pending, collectF)
return errors.Trace(err)
}
batchGetResp := resp.GetCmdBatchGetResp()
if batchGetResp == nil {
return errors.Trace(errBodyMissing)
}
var (
lockedKeys [][]byte
locks []*Lock
)
for _, pair := range batchGetResp.Pairs {
keyErr := pair.GetError()
if keyErr == nil {
collectF(pair.GetKey(), pair.GetValue())
continue
}
lock, err := extractLockFromKeyErr(keyErr)
if err != nil {
return errors.Trace(err)
}
lockedKeys = append(lockedKeys, lock.Key)
locks = append(locks, lock)
}
if len(lockedKeys) > 0 {
ok, err := s.store.lockResolver.ResolveLocks(bo, locks)
if err != nil {
return errors.Trace(err)
}
if !ok {
err = bo.Backoff(boTxnLock, errors.Errorf("batchGet lockedKeys: %d", len(lockedKeys)))
if err != nil {
return errors.Trace(err)
}
}
pending = lockedKeys
continue
}
return nil
}
}
// Get gets the value for key k from snapshot.
func (s *tikvSnapshot) Get(k kv.Key) ([]byte, error) {
val, err := s.get(NewBackoffer(getMaxBackoff, context.Background()), k)
if err != nil {
return nil, errors.Trace(err)
}
if len(val) == 0 {
return nil, kv.ErrNotExist
}
return val, nil
}
func (s *tikvSnapshot) get(bo *Backoffer, k kv.Key) ([]byte, error) {
req := &pb.Request{
Type: pb.MessageType_CmdGet,
CmdGetReq: &pb.CmdGetRequest{
Key: k,
Version: s.version.Ver,
},
}
for {
loc, err := s.store.regionCache.LocateKey(bo, k)
if err != nil {
return nil, errors.Trace(err)
}
resp, err := s.store.SendKVReq(bo, req, loc.Region, readTimeoutShort)
if err != nil {
return nil, errors.Trace(err)
}
if regionErr := resp.GetRegionError(); regionErr != nil {
err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
if err != nil {
return nil, errors.Trace(err)
}
continue
}
cmdGetResp := resp.GetCmdGetResp()
if cmdGetResp == nil {
return nil, errors.Trace(errBodyMissing)
}
val := cmdGetResp.GetValue()
if keyErr := cmdGetResp.GetError(); keyErr != nil {
lock, err := extractLockFromKeyErr(keyErr)
if err != nil {
return nil, errors.Trace(err)
}
ok, err := s.store.lockResolver.ResolveLocks(bo, []*Lock{lock})
if err != nil {
return nil, errors.Trace(err)
}
if !ok {
err = bo.Backoff(boTxnLock, errors.New(keyErr.String()))
if err != nil {
return nil, errors.Trace(err)
}
}
continue
}
return val, nil
}
}
// Seek return a list of key-value pair after `k`.
func (s *tikvSnapshot) Seek(k kv.Key) (kv.Iterator, error) {
scanner, err := newScanner(s, k, scanBatchSize)
return scanner, errors.Trace(err)
}
// SeekReverse creates a reversed Iterator positioned on the first entry which key is less than k.
func (s *tikvSnapshot) SeekReverse(k kv.Key) (kv.Iterator, error) {
return nil, kv.ErrNotImplemented
}
func extractLockFromKeyErr(keyErr *pb.KeyError) (*Lock, error) {
if locked := keyErr.GetLocked(); locked != nil {
return newLock(locked), nil
}
if keyErr.Retryable != "" {
err := errors.Errorf("tikv restarts txn: %s", keyErr.GetRetryable())
log.Debug(err)
return nil, errors.Annotate(err, txnRetryableMark)
}
if keyErr.Abort != "" {
err := errors.Errorf("tikv aborts txn: %s", keyErr.GetAbort())
log.Warn(err)
return nil, errors.Trace(err)
}
return nil, errors.Errorf("unexpected KeyError: %s", keyErr.String())
}
| store/tikv/snapshot.go | 0 | https://github.com/pingcap/tidb/commit/b18033423193c39749403b34d87f80acdd5ced07 | [
0.0011483561247587204,
0.0002559569547884166,
0.00016658286040183157,
0.00017838132043834776,
0.00021131806715857238
] |
{
"id": 5,
"code_window": [
"func (ts *TiDBStatement) BoundParams() [][]byte {\n",
"\treturn ts.boundParams\n",
"}\n",
"\n",
"// Reset implements IStatement Reset method.\n",
"func (ts *TiDBStatement) Reset() {\n",
"\tfor i := range ts.boundParams {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SetParamsType implements IStatement SetParamsType method.\n",
"func (ts *TiDBStatement) SetParamsType(paramsType []byte) {\n",
"\tts.paramsType = paramsType\n",
"}\n",
"\n",
"// GetParamsType implements IStatement GetParamsType method.\n",
"func (ts *TiDBStatement) GetParamsType() []byte {\n",
"\treturn ts.paramsType\n",
"}\n",
"\n"
],
"file_path": "server/driver_tidb.go",
"type": "add",
"edit_start_line_idx": 94
} | // Code generated by protoc-gen-gogo.
// source: binlog.proto
// DO NOT EDIT!
/*
Package binlog is a generated protocol buffer package.
It is generated from these files:
binlog.proto
It has these top-level messages:
TableMutation
PrewriteValue
Binlog
*/
package binlog
import (
"fmt"
proto "github.com/golang/protobuf/proto"
)
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MutationType int32
const (
MutationType_Insert MutationType = 0
MutationType_Update MutationType = 1
MutationType_DeleteID MutationType = 2
MutationType_DeletePK MutationType = 3
MutationType_DeleteRow MutationType = 4
)
var MutationType_name = map[int32]string{
0: "Insert",
1: "Update",
2: "DeleteID",
3: "DeletePK",
4: "DeleteRow",
}
var MutationType_value = map[string]int32{
"Insert": 0,
"Update": 1,
"DeleteID": 2,
"DeletePK": 3,
"DeleteRow": 4,
}
func (x MutationType) Enum() *MutationType {
p := new(MutationType)
*p = x
return p
}
func (x MutationType) String() string {
return proto.EnumName(MutationType_name, int32(x))
}
func (x *MutationType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(MutationType_value, data, "MutationType")
if err != nil {
return err
}
*x = MutationType(value)
return nil
}
func (MutationType) EnumDescriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{0} }
type BinlogType int32
const (
BinlogType_Prewrite BinlogType = 0
BinlogType_Commit BinlogType = 1
BinlogType_Rollback BinlogType = 2
BinlogType_PreDDL BinlogType = 3
BinlogType_PostDDL BinlogType = 4
)
var BinlogType_name = map[int32]string{
0: "Prewrite",
1: "Commit",
2: "Rollback",
3: "PreDDL",
4: "PostDDL",
}
var BinlogType_value = map[string]int32{
"Prewrite": 0,
"Commit": 1,
"Rollback": 2,
"PreDDL": 3,
"PostDDL": 4,
}
func (x BinlogType) Enum() *BinlogType {
p := new(BinlogType)
*p = x
return p
}
func (x BinlogType) String() string {
return proto.EnumName(BinlogType_name, int32(x))
}
func (x *BinlogType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(BinlogType_value, data, "BinlogType")
if err != nil {
return err
}
*x = BinlogType(value)
return nil
}
func (BinlogType) EnumDescriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{1} }
// TableMutation contains mutations in a table.
type TableMutation struct {
TableId int64 `protobuf:"varint,1,opt,name=table_id,json=tableId" json:"table_id"`
// For inserted rows and updated rows, we save all column values of the row.
InsertedRows [][]byte `protobuf:"bytes,2,rep,name=inserted_rows,json=insertedRows" json:"inserted_rows,omitempty"`
UpdatedRows [][]byte `protobuf:"bytes,3,rep,name=updated_rows,json=updatedRows" json:"updated_rows,omitempty"`
// If the table PK is handle, we can only save the id of the deleted row.
DeletedIds []int64 `protobuf:"varint,4,rep,name=deleted_ids,json=deletedIds" json:"deleted_ids,omitempty"`
// If the table has PK but PK is not handle, we save the PK of the deleted row.
DeletedPks [][]byte `protobuf:"bytes,5,rep,name=deleted_pks,json=deletedPks" json:"deleted_pks,omitempty"`
// If the table doesn't have PK, we save the row value of the deleted row.
DeletedRows [][]byte `protobuf:"bytes,6,rep,name=deleted_rows,json=deletedRows" json:"deleted_rows,omitempty"`
// Used to apply table mutations in original sequence.
Sequence []MutationType `protobuf:"varint,7,rep,name=sequence,enum=binlog.MutationType" json:"sequence,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *TableMutation) Reset() { *m = TableMutation{} }
func (m *TableMutation) String() string { return proto.CompactTextString(m) }
func (*TableMutation) ProtoMessage() {}
func (*TableMutation) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{0} }
func (m *TableMutation) GetTableId() int64 {
if m != nil {
return m.TableId
}
return 0
}
func (m *TableMutation) GetInsertedRows() [][]byte {
if m != nil {
return m.InsertedRows
}
return nil
}
func (m *TableMutation) GetUpdatedRows() [][]byte {
if m != nil {
return m.UpdatedRows
}
return nil
}
func (m *TableMutation) GetDeletedIds() []int64 {
if m != nil {
return m.DeletedIds
}
return nil
}
func (m *TableMutation) GetDeletedPks() [][]byte {
if m != nil {
return m.DeletedPks
}
return nil
}
func (m *TableMutation) GetDeletedRows() [][]byte {
if m != nil {
return m.DeletedRows
}
return nil
}
func (m *TableMutation) GetSequence() []MutationType {
if m != nil {
return m.Sequence
}
return nil
}
type PrewriteValue struct {
SchemaVersion int64 `protobuf:"varint,1,opt,name=schema_version,json=schemaVersion" json:"schema_version"`
Mutations []TableMutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations"`
XXX_unrecognized []byte `json:"-"`
}
func (m *PrewriteValue) Reset() { *m = PrewriteValue{} }
func (m *PrewriteValue) String() string { return proto.CompactTextString(m) }
func (*PrewriteValue) ProtoMessage() {}
func (*PrewriteValue) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{1} }
func (m *PrewriteValue) GetSchemaVersion() int64 {
if m != nil {
return m.SchemaVersion
}
return 0
}
func (m *PrewriteValue) GetMutations() []TableMutation {
if m != nil {
return m.Mutations
}
return nil
}
// Binlog contains all the changes in a transaction, which can be used to reconstruct SQL statement, then export to
// other systems.
type Binlog struct {
Tp BinlogType `protobuf:"varint,1,opt,name=tp,enum=binlog.BinlogType" json:"tp"`
// start_ts is used in Prewrite, Commit and Rollback binlog Type.
// It is used for pairing prewrite log to commit log or rollback log.
StartTs int64 `protobuf:"varint,2,opt,name=start_ts,json=startTs" json:"start_ts"`
// commit_ts is used only in binlog type Commit.
CommitTs int64 `protobuf:"varint,3,opt,name=commit_ts,json=commitTs" json:"commit_ts"`
// prewrite key is used only in Prewrite binlog type.
// It is the primary key of the transaction, is used to check that the transaction is
// commited or not if it failed to pair to commit log or rollback log within a time window.
PrewriteKey []byte `protobuf:"bytes,4,opt,name=prewrite_key,json=prewriteKey" json:"prewrite_key,omitempty"`
// prewrite_data is marshalled from PrewriteData type,
// we do not need to unmarshal prewrite data before the binlog have been successfully paired.
PrewriteValue []byte `protobuf:"bytes,5,opt,name=prewrite_value,json=prewriteValue" json:"prewrite_value,omitempty"`
// ddl_query is the original ddl statement query, used for PreDDL type.
DdlQuery []byte `protobuf:"bytes,6,opt,name=ddl_query,json=ddlQuery" json:"ddl_query,omitempty"`
// ddl_job_id is used for PreDDL and PostDDL binlog type.
// If PreDDL has matching PostDDL with the same job_id, we can execute the DDL right away, otherwise,
// we can use the job_id to check if the ddl statement has been successfully added to DDL job list.
DdlJobId int64 `protobuf:"varint,7,opt,name=ddl_job_id,json=ddlJobId" json:"ddl_job_id"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Binlog) Reset() { *m = Binlog{} }
func (m *Binlog) String() string { return proto.CompactTextString(m) }
func (*Binlog) ProtoMessage() {}
func (*Binlog) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{2} }
func (m *Binlog) GetTp() BinlogType {
if m != nil {
return m.Tp
}
return BinlogType_Prewrite
}
func (m *Binlog) GetStartTs() int64 {
if m != nil {
return m.StartTs
}
return 0
}
func (m *Binlog) GetCommitTs() int64 {
if m != nil {
return m.CommitTs
}
return 0
}
func (m *Binlog) GetPrewriteKey() []byte {
if m != nil {
return m.PrewriteKey
}
return nil
}
func (m *Binlog) GetPrewriteValue() []byte {
if m != nil {
return m.PrewriteValue
}
return nil
}
func (m *Binlog) GetDdlQuery() []byte {
if m != nil {
return m.DdlQuery
}
return nil
}
func (m *Binlog) GetDdlJobId() int64 {
if m != nil {
return m.DdlJobId
}
return 0
}
func init() {
proto.RegisterType((*TableMutation)(nil), "binlog.TableMutation")
proto.RegisterType((*PrewriteValue)(nil), "binlog.PrewriteValue")
proto.RegisterType((*Binlog)(nil), "binlog.Binlog")
proto.RegisterEnum("binlog.MutationType", MutationType_name, MutationType_value)
proto.RegisterEnum("binlog.BinlogType", BinlogType_name, BinlogType_value)
}
func (m *TableMutation) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *TableMutation) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0x8
i++
i = encodeVarintBinlog(data, i, uint64(m.TableId))
if len(m.InsertedRows) > 0 {
for _, b := range m.InsertedRows {
data[i] = 0x12
i++
i = encodeVarintBinlog(data, i, uint64(len(b)))
i += copy(data[i:], b)
}
}
if len(m.UpdatedRows) > 0 {
for _, b := range m.UpdatedRows {
data[i] = 0x1a
i++
i = encodeVarintBinlog(data, i, uint64(len(b)))
i += copy(data[i:], b)
}
}
if len(m.DeletedIds) > 0 {
for _, num := range m.DeletedIds {
data[i] = 0x20
i++
i = encodeVarintBinlog(data, i, uint64(num))
}
}
if len(m.DeletedPks) > 0 {
for _, b := range m.DeletedPks {
data[i] = 0x2a
i++
i = encodeVarintBinlog(data, i, uint64(len(b)))
i += copy(data[i:], b)
}
}
if len(m.DeletedRows) > 0 {
for _, b := range m.DeletedRows {
data[i] = 0x32
i++
i = encodeVarintBinlog(data, i, uint64(len(b)))
i += copy(data[i:], b)
}
}
if len(m.Sequence) > 0 {
for _, num := range m.Sequence {
data[i] = 0x38
i++
i = encodeVarintBinlog(data, i, uint64(num))
}
}
if m.XXX_unrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *PrewriteValue) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *PrewriteValue) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0x8
i++
i = encodeVarintBinlog(data, i, uint64(m.SchemaVersion))
if len(m.Mutations) > 0 {
for _, msg := range m.Mutations {
data[i] = 0x12
i++
i = encodeVarintBinlog(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Binlog) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Binlog) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0x8
i++
i = encodeVarintBinlog(data, i, uint64(m.Tp))
data[i] = 0x10
i++
i = encodeVarintBinlog(data, i, uint64(m.StartTs))
data[i] = 0x18
i++
i = encodeVarintBinlog(data, i, uint64(m.CommitTs))
if m.PrewriteKey != nil {
data[i] = 0x22
i++
i = encodeVarintBinlog(data, i, uint64(len(m.PrewriteKey)))
i += copy(data[i:], m.PrewriteKey)
}
if m.PrewriteValue != nil {
data[i] = 0x2a
i++
i = encodeVarintBinlog(data, i, uint64(len(m.PrewriteValue)))
i += copy(data[i:], m.PrewriteValue)
}
if m.DdlQuery != nil {
data[i] = 0x32
i++
i = encodeVarintBinlog(data, i, uint64(len(m.DdlQuery)))
i += copy(data[i:], m.DdlQuery)
}
data[i] = 0x38
i++
i = encodeVarintBinlog(data, i, uint64(m.DdlJobId))
if m.XXX_unrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeFixed64Binlog(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Binlog(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintBinlog(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *TableMutation) Size() (n int) {
var l int
_ = l
n += 1 + sovBinlog(uint64(m.TableId))
if len(m.InsertedRows) > 0 {
for _, b := range m.InsertedRows {
l = len(b)
n += 1 + l + sovBinlog(uint64(l))
}
}
if len(m.UpdatedRows) > 0 {
for _, b := range m.UpdatedRows {
l = len(b)
n += 1 + l + sovBinlog(uint64(l))
}
}
if len(m.DeletedIds) > 0 {
for _, e := range m.DeletedIds {
n += 1 + sovBinlog(uint64(e))
}
}
if len(m.DeletedPks) > 0 {
for _, b := range m.DeletedPks {
l = len(b)
n += 1 + l + sovBinlog(uint64(l))
}
}
if len(m.DeletedRows) > 0 {
for _, b := range m.DeletedRows {
l = len(b)
n += 1 + l + sovBinlog(uint64(l))
}
}
if len(m.Sequence) > 0 {
for _, e := range m.Sequence {
n += 1 + sovBinlog(uint64(e))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *PrewriteValue) Size() (n int) {
var l int
_ = l
n += 1 + sovBinlog(uint64(m.SchemaVersion))
if len(m.Mutations) > 0 {
for _, e := range m.Mutations {
l = e.Size()
n += 1 + l + sovBinlog(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Binlog) Size() (n int) {
var l int
_ = l
n += 1 + sovBinlog(uint64(m.Tp))
n += 1 + sovBinlog(uint64(m.StartTs))
n += 1 + sovBinlog(uint64(m.CommitTs))
if m.PrewriteKey != nil {
l = len(m.PrewriteKey)
n += 1 + l + sovBinlog(uint64(l))
}
if m.PrewriteValue != nil {
l = len(m.PrewriteValue)
n += 1 + l + sovBinlog(uint64(l))
}
if m.DdlQuery != nil {
l = len(m.DdlQuery)
n += 1 + l + sovBinlog(uint64(l))
}
n += 1 + sovBinlog(uint64(m.DdlJobId))
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovBinlog(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozBinlog(x uint64) (n int) {
return sovBinlog(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *TableMutation) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TableMutation: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TableMutation: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TableId", wireType)
}
m.TableId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.TableId |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InsertedRows", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthBinlog
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.InsertedRows = append(m.InsertedRows, make([]byte, postIndex-iNdEx))
copy(m.InsertedRows[len(m.InsertedRows)-1], data[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field UpdatedRows", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthBinlog
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.UpdatedRows = append(m.UpdatedRows, make([]byte, postIndex-iNdEx))
copy(m.UpdatedRows[len(m.UpdatedRows)-1], data[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DeletedIds", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.DeletedIds = append(m.DeletedIds, v)
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeletedPks", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthBinlog
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DeletedPks = append(m.DeletedPks, make([]byte, postIndex-iNdEx))
copy(m.DeletedPks[len(m.DeletedPks)-1], data[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeletedRows", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthBinlog
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DeletedRows = append(m.DeletedRows, make([]byte, postIndex-iNdEx))
copy(m.DeletedRows[len(m.DeletedRows)-1], data[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
}
var v MutationType
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (MutationType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Sequence = append(m.Sequence, v)
default:
iNdEx = preIndex
skippy, err := skipBinlog(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthBinlog
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PrewriteValue) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PrewriteValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PrewriteValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaVersion", wireType)
}
m.SchemaVersion = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.SchemaVersion |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthBinlog
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Mutations = append(m.Mutations, TableMutation{})
if err := m.Mutations[len(m.Mutations)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipBinlog(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthBinlog
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Binlog) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Binlog: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Binlog: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Tp", wireType)
}
m.Tp = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Tp |= (BinlogType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
}
m.StartTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.StartTs |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitTs", wireType)
}
m.CommitTs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.CommitTs |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrewriteKey", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthBinlog
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PrewriteKey = append(m.PrewriteKey[:0], data[iNdEx:postIndex]...)
if m.PrewriteKey == nil {
m.PrewriteKey = []byte{}
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrewriteValue", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthBinlog
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PrewriteValue = append(m.PrewriteValue[:0], data[iNdEx:postIndex]...)
if m.PrewriteValue == nil {
m.PrewriteValue = []byte{}
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DdlQuery", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthBinlog
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DdlQuery = append(m.DdlQuery[:0], data[iNdEx:postIndex]...)
if m.DdlQuery == nil {
m.DdlQuery = []byte{}
}
iNdEx = postIndex
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DdlJobId", wireType)
}
m.DdlJobId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowBinlog
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.DdlJobId |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipBinlog(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthBinlog
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipBinlog(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowBinlog
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowBinlog
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowBinlog
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthBinlog
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowBinlog
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipBinlog(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthBinlog = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowBinlog = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("binlog.proto", fileDescriptorBinlog) }
var fileDescriptorBinlog = []byte{
// 517 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x54, 0x92, 0xcd, 0x6e, 0x9b, 0x40,
0x14, 0x85, 0x03, 0x38, 0xfe, 0xb9, 0x06, 0x0b, 0x8d, 0x52, 0x09, 0xb5, 0x92, 0x4d, 0x5d, 0x55,
0x42, 0xa9, 0x94, 0x56, 0xde, 0x75, 0xeb, 0x7a, 0x43, 0xd3, 0x48, 0x04, 0xb9, 0xd9, 0x22, 0xf0,
0x8c, 0x5c, 0x6a, 0xcc, 0x10, 0x66, 0x08, 0xf2, 0xb2, 0x4f, 0xd0, 0x6d, 0x1f, 0x29, 0xcb, 0x3e,
0x41, 0x55, 0xb9, 0x2f, 0x52, 0xcd, 0x0c, 0xf8, 0x67, 0xc7, 0x3d, 0xf7, 0x9b, 0x7b, 0x98, 0x73,
0x07, 0xcc, 0x24, 0xcd, 0x33, 0xba, 0xbe, 0x29, 0x4a, 0xca, 0x29, 0xea, 0xaa, 0xea, 0xe5, 0xd5,
0x9a, 0xae, 0xa9, 0x94, 0xde, 0x8b, 0x2f, 0xd5, 0x9d, 0xfe, 0xd4, 0xc1, 0x5a, 0xc6, 0x49, 0x46,
0xee, 0x2a, 0x1e, 0xf3, 0x94, 0xe6, 0x68, 0x02, 0x7d, 0x2e, 0x84, 0x28, 0xc5, 0x8e, 0xe6, 0x6a,
0x9e, 0x31, 0xef, 0x3c, 0xff, 0x99, 0x5c, 0x84, 0x3d, 0xa9, 0xfa, 0x18, 0xbd, 0x01, 0x2b, 0xcd,
0x19, 0x29, 0x39, 0xc1, 0x51, 0x49, 0x6b, 0xe6, 0xe8, 0xae, 0xe1, 0x99, 0xa1, 0xd9, 0x8a, 0x21,
0xad, 0x19, 0x7a, 0x0d, 0x66, 0x55, 0xe0, 0xf8, 0xc0, 0x18, 0x92, 0x19, 0x36, 0x9a, 0x44, 0x26,
0x30, 0xc4, 0x24, 0x23, 0x02, 0x49, 0x31, 0x73, 0x3a, 0xae, 0xe1, 0x19, 0x21, 0x34, 0x92, 0x8f,
0xcf, 0x80, 0x62, 0xc3, 0x9c, 0x4b, 0x39, 0xa2, 0x05, 0x82, 0x8d, 0x34, 0x69, 0x01, 0x69, 0xd2,
0x55, 0x26, 0x8d, 0x26, 0x4d, 0x3e, 0x40, 0x9f, 0x91, 0xc7, 0x8a, 0xe4, 0x2b, 0xe2, 0xf4, 0x5c,
0xc3, 0x1b, 0xcd, 0xae, 0x6e, 0x9a, 0x78, 0xda, 0x1b, 0x2f, 0x77, 0x05, 0x09, 0x0f, 0xd4, 0xb4,
0x06, 0x2b, 0x28, 0x49, 0x5d, 0xa6, 0x9c, 0x3c, 0xc4, 0x59, 0x45, 0xd0, 0x3b, 0x18, 0xb1, 0xd5,
0x37, 0xb2, 0x8d, 0xa3, 0x27, 0x52, 0xb2, 0x94, 0xe6, 0x67, 0xb1, 0x58, 0xaa, 0xf7, 0xa0, 0x5a,
0xe8, 0x23, 0x0c, 0xb6, 0xcd, 0x5c, 0x15, 0xcc, 0x70, 0xf6, 0xa2, 0x35, 0x3c, 0xcb, 0xb9, 0x39,
0x7e, 0xa4, 0xa7, 0x3f, 0x74, 0xe8, 0xce, 0x25, 0x89, 0x3c, 0xd0, 0x79, 0x21, 0x6d, 0x46, 0x33,
0xd4, 0x1e, 0x57, 0x3d, 0xf1, 0xb7, 0xcd, 0x59, 0x9d, 0x17, 0x62, 0x5b, 0x8c, 0xc7, 0x25, 0x8f,
0xb8, 0xb0, 0x3b, 0xd9, 0x96, 0x54, 0x97, 0x22, 0xa3, 0xc1, 0x8a, 0x6e, 0xb7, 0xa9, 0x24, 0x8c,
0x13, 0xa2, 0xaf, 0x64, 0x89, 0x98, 0x45, 0x73, 0xe3, 0x68, 0x43, 0x76, 0x4e, 0xc7, 0xd5, 0x44,
0x8c, 0xad, 0x76, 0x4b, 0x76, 0xe8, 0x2d, 0x8c, 0x0e, 0xc8, 0x93, 0x48, 0xc5, 0xb9, 0x94, 0x90,
0x55, 0x9c, 0x45, 0xf5, 0x0a, 0x06, 0x18, 0x67, 0xd1, 0x63, 0x45, 0xca, 0x9d, 0xd3, 0x95, 0x44,
0x1f, 0xe3, 0xec, 0x5e, 0xd4, 0x68, 0x0a, 0x20, 0x9a, 0xdf, 0x69, 0x22, 0x9e, 0x56, 0xef, 0xf4,
0x57, 0x30, 0xce, 0x3e, 0xd3, 0xc4, 0xc7, 0xd7, 0xf7, 0x60, 0x9e, 0xae, 0x05, 0x01, 0x74, 0x7d,
0xf9, 0xac, 0xec, 0x0b, 0xf1, 0xfd, 0x55, 0x3e, 0x1f, 0x5b, 0x43, 0x26, 0xf4, 0x17, 0x72, 0xcb,
0xfe, 0xc2, 0xd6, 0x8f, 0x55, 0x70, 0x6b, 0x1b, 0xc8, 0x82, 0x81, 0xaa, 0x42, 0x5a, 0xdb, 0x9d,
0xeb, 0x3b, 0x80, 0x63, 0x72, 0x02, 0x6d, 0xb7, 0xab, 0x46, 0x7e, 0x92, 0x29, 0xa8, 0x91, 0x21,
0xcd, 0xb2, 0x24, 0x5e, 0x6d, 0x6c, 0x5d, 0x74, 0x82, 0x92, 0x2c, 0x16, 0x5f, 0x6c, 0x03, 0x0d,
0xa1, 0x17, 0x50, 0xc6, 0x45, 0xd1, 0x99, 0xdb, 0xcf, 0xfb, 0xb1, 0xf6, 0x7b, 0x3f, 0xd6, 0xfe,
0xee, 0xc7, 0xda, 0xaf, 0x7f, 0xe3, 0x8b, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xad, 0x69,
0x76, 0x6f, 0x03, 0x00, 0x00,
}
| _vendor/src/github.com/pingcap/tipb/go-binlog/binlog.pb.go | 0 | https://github.com/pingcap/tidb/commit/b18033423193c39749403b34d87f80acdd5ced07 | [
0.9961187839508057,
0.024175450205802917,
0.00016604573465883732,
0.0001864673977252096,
0.1505628079175949
] |
{
"id": 5,
"code_window": [
"func (ts *TiDBStatement) BoundParams() [][]byte {\n",
"\treturn ts.boundParams\n",
"}\n",
"\n",
"// Reset implements IStatement Reset method.\n",
"func (ts *TiDBStatement) Reset() {\n",
"\tfor i := range ts.boundParams {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"// SetParamsType implements IStatement SetParamsType method.\n",
"func (ts *TiDBStatement) SetParamsType(paramsType []byte) {\n",
"\tts.paramsType = paramsType\n",
"}\n",
"\n",
"// GetParamsType implements IStatement GetParamsType method.\n",
"func (ts *TiDBStatement) GetParamsType() []byte {\n",
"\treturn ts.paramsType\n",
"}\n",
"\n"
],
"file_path": "server/driver_tidb.go",
"type": "add",
"edit_start_line_idx": 94
} | // Code generated by protoc-gen-gogo.
// source: pump.proto
// DO NOT EDIT!
/*
Package binlog is a generated protocol buffer package.
It is generated from these files:
pump.proto
It has these top-level messages:
WriteBinlogReq
WriteBinlogResp
PullBinlogReq
PullBinlogResp
Pos
Entity
*/
package binlog
import (
"fmt"
proto "github.com/golang/protobuf/proto"
math "math"
io "io"
)
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type WriteBinlogReq struct {
// The identifier of tidb-cluster, which is given at tidb startup.
// Must specify the clusterID for each binlog to write.
ClusterID uint64 `protobuf:"varint,1,opt,name=clusterID,proto3" json:"clusterID,omitempty"`
// Payload bytes can be decoded back to binlog struct by the protobuf.
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
}
func (m *WriteBinlogReq) Reset() { *m = WriteBinlogReq{} }
func (m *WriteBinlogReq) String() string { return proto.CompactTextString(m) }
func (*WriteBinlogReq) ProtoMessage() {}
func (*WriteBinlogReq) Descriptor() ([]byte, []int) { return fileDescriptorPump, []int{0} }
type WriteBinlogResp struct {
// An empty errmsg returned means a successful write.
// Otherwise return the error description.
Errmsg string `protobuf:"bytes,1,opt,name=errmsg,proto3" json:"errmsg,omitempty"`
}
func (m *WriteBinlogResp) Reset() { *m = WriteBinlogResp{} }
func (m *WriteBinlogResp) String() string { return proto.CompactTextString(m) }
func (*WriteBinlogResp) ProtoMessage() {}
func (*WriteBinlogResp) Descriptor() ([]byte, []int) { return fileDescriptorPump, []int{1} }
type PullBinlogReq struct {
// Specifies which clusterID of binlog to pull.
ClusterID uint64 `protobuf:"varint,1,opt,name=clusterID,proto3" json:"clusterID,omitempty"`
// The position from which the binlog will be sent.
StartFrom Pos `protobuf:"bytes,2,opt,name=startFrom" json:"startFrom"`
// The max number of binlog in a batch to pull.
Batch int32 `protobuf:"varint,3,opt,name=batch,proto3" json:"batch,omitempty"`
}
func (m *PullBinlogReq) Reset() { *m = PullBinlogReq{} }
func (m *PullBinlogReq) String() string { return proto.CompactTextString(m) }
func (*PullBinlogReq) ProtoMessage() {}
func (*PullBinlogReq) Descriptor() ([]byte, []int) { return fileDescriptorPump, []int{2} }
func (m *PullBinlogReq) GetStartFrom() Pos {
if m != nil {
return m.StartFrom
}
return Pos{}
}
type PullBinlogResp struct {
// An empty errmsg means that the successful acquisition of binlogs.
Errmsg string `protobuf:"bytes,1,opt,name=errmsg,proto3" json:"errmsg,omitempty"`
// The binlog entities pulled in a batch
Entities []Entity `protobuf:"bytes,2,rep,name=entities" json:"entities"`
}
func (m *PullBinlogResp) Reset() { *m = PullBinlogResp{} }
func (m *PullBinlogResp) String() string { return proto.CompactTextString(m) }
func (*PullBinlogResp) ProtoMessage() {}
func (*PullBinlogResp) Descriptor() ([]byte, []int) { return fileDescriptorPump, []int{3} }
func (m *PullBinlogResp) GetEntities() []Entity {
if m != nil {
return m.Entities
}
return nil
}
// Binlogs are stored in a number of sequential files in a directory.
// The Pos describes the position of a binlog.
type Pos struct {
// The suffix of binlog file, like .000001 .000002
Suffix uint64 `protobuf:"varint,1,opt,name=suffix,proto3" json:"suffix,omitempty"`
// The binlog offset in a file.
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
}
func (m *Pos) Reset() { *m = Pos{} }
func (m *Pos) String() string { return proto.CompactTextString(m) }
func (*Pos) ProtoMessage() {}
func (*Pos) Descriptor() ([]byte, []int) { return fileDescriptorPump, []int{4} }
type Entity struct {
// The position of the binlog entity.
Pos Pos `protobuf:"bytes,1,opt,name=pos" json:"pos"`
// The payload of binlog entity.
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
}
func (m *Entity) Reset() { *m = Entity{} }
func (m *Entity) String() string { return proto.CompactTextString(m) }
func (*Entity) ProtoMessage() {}
func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptorPump, []int{5} }
func (m *Entity) GetPos() Pos {
if m != nil {
return m.Pos
}
return Pos{}
}
func init() {
proto.RegisterType((*WriteBinlogReq)(nil), "binlog.WriteBinlogReq")
proto.RegisterType((*WriteBinlogResp)(nil), "binlog.WriteBinlogResp")
proto.RegisterType((*PullBinlogReq)(nil), "binlog.PullBinlogReq")
proto.RegisterType((*PullBinlogResp)(nil), "binlog.PullBinlogResp")
proto.RegisterType((*Pos)(nil), "binlog.Pos")
proto.RegisterType((*Entity)(nil), "binlog.Entity")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion3
// Client API for Pump service
type PumpClient interface {
// Writes a binlog to the local file on the pump machine.
// A response with an empty errmsg is returned if the binlog is written successfully.
WriteBinlog(ctx context.Context, in *WriteBinlogReq, opts ...grpc.CallOption) (*WriteBinlogResp, error)
// Obtains a batch of binlog from a given location.
PullBinlogs(ctx context.Context, in *PullBinlogReq, opts ...grpc.CallOption) (*PullBinlogResp, error)
}
type pumpClient struct {
cc *grpc.ClientConn
}
func NewPumpClient(cc *grpc.ClientConn) PumpClient {
return &pumpClient{cc}
}
func (c *pumpClient) WriteBinlog(ctx context.Context, in *WriteBinlogReq, opts ...grpc.CallOption) (*WriteBinlogResp, error) {
out := new(WriteBinlogResp)
err := grpc.Invoke(ctx, "/binlog.Pump/WriteBinlog", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pumpClient) PullBinlogs(ctx context.Context, in *PullBinlogReq, opts ...grpc.CallOption) (*PullBinlogResp, error) {
out := new(PullBinlogResp)
err := grpc.Invoke(ctx, "/binlog.Pump/PullBinlogs", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Pump service
type PumpServer interface {
// Writes a binlog to the local file on the pump machine.
// A response with an empty errmsg is returned if the binlog is written successfully.
WriteBinlog(context.Context, *WriteBinlogReq) (*WriteBinlogResp, error)
// Obtains a batch of binlog from a given location.
PullBinlogs(context.Context, *PullBinlogReq) (*PullBinlogResp, error)
}
func RegisterPumpServer(s *grpc.Server, srv PumpServer) {
s.RegisterService(&_Pump_serviceDesc, srv)
}
func _Pump_WriteBinlog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(WriteBinlogReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PumpServer).WriteBinlog(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/binlog.Pump/WriteBinlog",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PumpServer).WriteBinlog(ctx, req.(*WriteBinlogReq))
}
return interceptor(ctx, in, info, handler)
}
func _Pump_PullBinlogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PullBinlogReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PumpServer).PullBinlogs(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/binlog.Pump/PullBinlogs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PumpServer).PullBinlogs(ctx, req.(*PullBinlogReq))
}
return interceptor(ctx, in, info, handler)
}
var _Pump_serviceDesc = grpc.ServiceDesc{
ServiceName: "binlog.Pump",
HandlerType: (*PumpServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "WriteBinlog",
Handler: _Pump_WriteBinlog_Handler,
},
{
MethodName: "PullBinlogs",
Handler: _Pump_PullBinlogs_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: fileDescriptorPump,
}
func (m *WriteBinlogReq) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *WriteBinlogReq) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.ClusterID != 0 {
data[i] = 0x8
i++
i = encodeVarintPump(data, i, uint64(m.ClusterID))
}
if len(m.Payload) > 0 {
data[i] = 0x12
i++
i = encodeVarintPump(data, i, uint64(len(m.Payload)))
i += copy(data[i:], m.Payload)
}
return i, nil
}
func (m *WriteBinlogResp) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *WriteBinlogResp) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Errmsg) > 0 {
data[i] = 0xa
i++
i = encodeVarintPump(data, i, uint64(len(m.Errmsg)))
i += copy(data[i:], m.Errmsg)
}
return i, nil
}
func (m *PullBinlogReq) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *PullBinlogReq) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.ClusterID != 0 {
data[i] = 0x8
i++
i = encodeVarintPump(data, i, uint64(m.ClusterID))
}
data[i] = 0x12
i++
i = encodeVarintPump(data, i, uint64(m.StartFrom.Size()))
n1, err := m.StartFrom.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n1
if m.Batch != 0 {
data[i] = 0x18
i++
i = encodeVarintPump(data, i, uint64(m.Batch))
}
return i, nil
}
func (m *PullBinlogResp) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *PullBinlogResp) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Errmsg) > 0 {
data[i] = 0xa
i++
i = encodeVarintPump(data, i, uint64(len(m.Errmsg)))
i += copy(data[i:], m.Errmsg)
}
if len(m.Entities) > 0 {
for _, msg := range m.Entities {
data[i] = 0x12
i++
i = encodeVarintPump(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *Pos) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Pos) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Suffix != 0 {
data[i] = 0x8
i++
i = encodeVarintPump(data, i, uint64(m.Suffix))
}
if m.Offset != 0 {
data[i] = 0x10
i++
i = encodeVarintPump(data, i, uint64(m.Offset))
}
return i, nil
}
func (m *Entity) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Entity) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0xa
i++
i = encodeVarintPump(data, i, uint64(m.Pos.Size()))
n2, err := m.Pos.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n2
if len(m.Payload) > 0 {
data[i] = 0x12
i++
i = encodeVarintPump(data, i, uint64(len(m.Payload)))
i += copy(data[i:], m.Payload)
}
return i, nil
}
func encodeFixed64Pump(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Pump(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintPump(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *WriteBinlogReq) Size() (n int) {
var l int
_ = l
if m.ClusterID != 0 {
n += 1 + sovPump(uint64(m.ClusterID))
}
l = len(m.Payload)
if l > 0 {
n += 1 + l + sovPump(uint64(l))
}
return n
}
func (m *WriteBinlogResp) Size() (n int) {
var l int
_ = l
l = len(m.Errmsg)
if l > 0 {
n += 1 + l + sovPump(uint64(l))
}
return n
}
func (m *PullBinlogReq) Size() (n int) {
var l int
_ = l
if m.ClusterID != 0 {
n += 1 + sovPump(uint64(m.ClusterID))
}
l = m.StartFrom.Size()
n += 1 + l + sovPump(uint64(l))
if m.Batch != 0 {
n += 1 + sovPump(uint64(m.Batch))
}
return n
}
func (m *PullBinlogResp) Size() (n int) {
var l int
_ = l
l = len(m.Errmsg)
if l > 0 {
n += 1 + l + sovPump(uint64(l))
}
if len(m.Entities) > 0 {
for _, e := range m.Entities {
l = e.Size()
n += 1 + l + sovPump(uint64(l))
}
}
return n
}
func (m *Pos) Size() (n int) {
var l int
_ = l
if m.Suffix != 0 {
n += 1 + sovPump(uint64(m.Suffix))
}
if m.Offset != 0 {
n += 1 + sovPump(uint64(m.Offset))
}
return n
}
func (m *Entity) Size() (n int) {
var l int
_ = l
l = m.Pos.Size()
n += 1 + l + sovPump(uint64(l))
l = len(m.Payload)
if l > 0 {
n += 1 + l + sovPump(uint64(l))
}
return n
}
func sovPump(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozPump(x uint64) (n int) {
return sovPump(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *WriteBinlogReq) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: WriteBinlogReq: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: WriteBinlogReq: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
}
m.ClusterID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ClusterID |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPump
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Payload = append(m.Payload[:0], data[iNdEx:postIndex]...)
if m.Payload == nil {
m.Payload = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPump(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPump
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *WriteBinlogResp) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: WriteBinlogResp: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: WriteBinlogResp: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Errmsg", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPump
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Errmsg = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPump(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPump
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PullBinlogReq) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PullBinlogReq: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PullBinlogReq: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
}
m.ClusterID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ClusterID |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartFrom", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPump
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.StartFrom.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Batch", wireType)
}
m.Batch = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Batch |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPump(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPump
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PullBinlogResp) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PullBinlogResp: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PullBinlogResp: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Errmsg", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPump
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Errmsg = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Entities", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPump
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Entities = append(m.Entities, Entity{})
if err := m.Entities[len(m.Entities)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPump(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPump
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Pos) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Pos: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Pos: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Suffix", wireType)
}
m.Suffix = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Suffix |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
}
m.Offset = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Offset |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipPump(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPump
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Entity) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Entity: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Entity: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pos", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPump
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Pos.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPump
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthPump
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Payload = append(m.Payload[:0], data[iNdEx:postIndex]...)
if m.Payload == nil {
m.Payload = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPump(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPump
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipPump(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPump
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPump
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPump
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthPump
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPump
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipPump(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthPump = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowPump = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("pump.proto", fileDescriptorPump) }
var fileDescriptorPump = []byte{
// 357 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x52, 0xcd, 0x4a, 0xf3, 0x40,
0x14, 0x6d, 0xbe, 0xb4, 0xf9, 0xec, 0x8d, 0x56, 0x19, 0x6a, 0x0d, 0x45, 0xaa, 0x8c, 0x1b, 0xdd,
0xb4, 0x52, 0x71, 0x2b, 0x52, 0xfc, 0xdd, 0x95, 0xd9, 0x08, 0xee, 0xd2, 0x3a, 0x89, 0x81, 0xb4,
0x33, 0xce, 0x4c, 0xc0, 0xbe, 0x81, 0x8f, 0xe0, 0x23, 0x75, 0xe9, 0x13, 0x88, 0xe8, 0x8b, 0x38,
0x99, 0xa4, 0x7f, 0x90, 0x82, 0x8b, 0x81, 0x9c, 0x73, 0xef, 0x9c, 0x73, 0xef, 0x9c, 0x00, 0xf0,
0x64, 0xc4, 0xdb, 0x5c, 0x30, 0xc5, 0x90, 0x33, 0x88, 0xc6, 0x31, 0x0b, 0x9b, 0xf5, 0x90, 0x85,
0xcc, 0x50, 0x9d, 0xf4, 0x2b, 0xab, 0xe2, 0x3b, 0xa8, 0x3d, 0x88, 0x48, 0xd1, 0x9e, 0x69, 0x22,
0xf4, 0x05, 0xed, 0x43, 0x75, 0x18, 0x27, 0x52, 0x51, 0x71, 0x7f, 0xe5, 0x59, 0x87, 0xd6, 0x71,
0x99, 0x2c, 0x08, 0xe4, 0xc1, 0x7f, 0xee, 0x4f, 0x62, 0xe6, 0x3f, 0x79, 0xff, 0x74, 0x6d, 0x93,
0xcc, 0x20, 0x3e, 0x81, 0xed, 0x15, 0x25, 0xc9, 0x51, 0x03, 0x1c, 0x2a, 0xc4, 0x48, 0x86, 0x46,
0xa7, 0x4a, 0x72, 0x84, 0x15, 0x6c, 0xf5, 0x93, 0x38, 0xfe, 0xab, 0x67, 0x07, 0xaa, 0x52, 0xf9,
0x42, 0xdd, 0x08, 0x36, 0x32, 0xae, 0x6e, 0xd7, 0x6d, 0x67, 0x5b, 0xb5, 0xfb, 0x4c, 0xf6, 0xca,
0xd3, 0xcf, 0x83, 0x12, 0x59, 0xf4, 0xa0, 0x3a, 0x54, 0x06, 0xbe, 0x1a, 0x3e, 0x7b, 0xb6, 0x6e,
0xae, 0x90, 0x0c, 0xe0, 0x47, 0xa8, 0x2d, 0xbb, 0xae, 0x9f, 0x0f, 0x9d, 0xc2, 0x06, 0x1d, 0xab,
0x48, 0x45, 0x54, 0x6a, 0x3f, 0x5b, 0xfb, 0xd5, 0x66, 0x7e, 0xd7, 0x29, 0x3f, 0xc9, 0x2d, 0xe7,
0x5d, 0xf8, 0x1c, 0x6c, 0x3d, 0x49, 0x2a, 0x28, 0x93, 0x20, 0x88, 0x5e, 0xf3, 0x25, 0x72, 0x94,
0xf2, 0x2c, 0x08, 0x24, 0x55, 0x66, 0x7c, 0x9b, 0xe4, 0x08, 0xdf, 0x82, 0x93, 0x09, 0xa2, 0x23,
0xb0, 0x39, 0x93, 0xe6, 0x5a, 0xe1, 0x76, 0x69, 0x75, 0xfd, 0xe3, 0x77, 0xdf, 0x2c, 0x28, 0xf7,
0x75, 0xe6, 0xe8, 0x12, 0xdc, 0xa5, 0x14, 0x50, 0x63, 0xa6, 0xb4, 0x1a, 0x72, 0x73, 0xaf, 0x90,
0x97, 0x1c, 0x97, 0xd0, 0x05, 0xb8, 0x8b, 0x67, 0x92, 0x68, 0x77, 0x3e, 0xcb, 0x72, 0x62, 0xcd,
0x46, 0x11, 0x9d, 0xde, 0xef, 0xed, 0x4c, 0xbf, 0x5b, 0xd6, 0x87, 0x3e, 0x5f, 0xfa, 0xbc, 0xff,
0xb4, 0x4a, 0x03, 0xc7, 0xfc, 0x6a, 0x67, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x3a, 0x1d,
0x93, 0x96, 0x02, 0x00, 0x00,
}
| _vendor/src/github.com/pingcap/tipb/go-binlog/pump.pb.go | 0 | https://github.com/pingcap/tidb/commit/b18033423193c39749403b34d87f80acdd5ced07 | [
0.9972530007362366,
0.05433102324604988,
0.0001655021624173969,
0.0001806044892873615,
0.2183523327112198
] |
{
"id": 0,
"code_window": [
"\t\toriStats, ok := sctx.GetSessionVars().GetSystemVar(variable.TiDBBuildStatsConcurrency)\n",
"\t\tif !ok {\n",
"\t\t\toriStats = strconv.Itoa(variable.DefBuildStatsConcurrency)\n",
"\t\t}\n",
"\t\toriScan := sctx.GetSessionVars().DistSQLScanConcurrency()\n",
"\t\toriIndex := sctx.GetSessionVars().IndexSerialScanConcurrency()\n",
"\t\toriIso, ok := sctx.GetSessionVars().GetSystemVar(variable.TxnIsolation)\n",
"\t\tif !ok {\n",
"\t\t\toriIso = \"REPEATABLE-READ\"\n",
"\t\t}\n",
"\t\tautoConcurrency, err1 := sctx.GetSessionVars().GetSessionOrGlobalSystemVar(ctx, variable.TiDBAutoBuildStatsConcurrency)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\toriScan := sctx.GetSessionVars().AnalyzeDistSQLScanConcurrency()\n"
],
"file_path": "pkg/executor/adapter.go",
"type": "replace",
"edit_start_line_idx": 515
} | // Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
stderrors "errors"
"math"
"slices"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/pkg/config"
"github.com/pingcap/tidb/pkg/domain"
"github.com/pingcap/tidb/pkg/expression"
"github.com/pingcap/tidb/pkg/metrics"
"github.com/pingcap/tidb/pkg/parser/ast"
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/parser/mysql"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/statistics"
"github.com/pingcap/tidb/pkg/table"
"github.com/pingcap/tidb/pkg/tablecodec"
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/chunk"
"github.com/pingcap/tidb/pkg/util/codec"
"github.com/pingcap/tidb/pkg/util/collate"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/pingcap/tidb/pkg/util/memory"
"github.com/pingcap/tidb/pkg/util/ranger"
"github.com/pingcap/tidb/pkg/util/timeutil"
"github.com/pingcap/tipb/go-tipb"
"github.com/tiancaiamao/gp"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)
// AnalyzeColumnsExecV2 is used to maintain v2 analyze process
type AnalyzeColumnsExecV2 struct {
*AnalyzeColumnsExec
}
func (e *AnalyzeColumnsExecV2) analyzeColumnsPushDownWithRetryV2(gp *gp.Pool) *statistics.AnalyzeResults {
analyzeResult := e.analyzeColumnsPushDownV2(gp)
if e.notRetryable(analyzeResult) {
return analyzeResult
}
finishJobWithLog(e.ctx, analyzeResult.Job, analyzeResult.Err)
statsHandle := domain.GetDomain(e.ctx).StatsHandle()
if statsHandle == nil {
return analyzeResult
}
var statsTbl *statistics.Table
tid := e.tableID.GetStatisticsID()
if tid == e.tableInfo.ID {
statsTbl = statsHandle.GetTableStats(e.tableInfo)
} else {
statsTbl = statsHandle.GetPartitionStats(e.tableInfo, tid)
}
if statsTbl == nil || statsTbl.RealtimeCount <= 0 {
return analyzeResult
}
newSampleRate := math.Min(1, float64(config.DefRowsForSampleRate)/float64(statsTbl.RealtimeCount))
if newSampleRate >= *e.analyzePB.ColReq.SampleRate {
return analyzeResult
}
*e.analyzePB.ColReq.SampleRate = newSampleRate
prepareV2AnalyzeJobInfo(e.AnalyzeColumnsExec, true)
AddNewAnalyzeJob(e.ctx, e.job)
StartAnalyzeJob(e.ctx, e.job)
return e.analyzeColumnsPushDownV2(gp)
}
// Do **not** retry if succeed / not oom error / not auto-analyze / samplerate not set.
func (e *AnalyzeColumnsExecV2) notRetryable(analyzeResult *statistics.AnalyzeResults) bool {
return analyzeResult.Err == nil || analyzeResult.Err != errAnalyzeOOM ||
!e.ctx.GetSessionVars().InRestrictedSQL ||
e.analyzePB.ColReq == nil || *e.analyzePB.ColReq.SampleRate <= 0
}
func (e *AnalyzeColumnsExecV2) analyzeColumnsPushDownV2(gp *gp.Pool) *statistics.AnalyzeResults {
var ranges []*ranger.Range
if hc := e.handleCols; hc != nil {
if hc.IsInt() {
ranges = ranger.FullIntRange(mysql.HasUnsignedFlag(hc.GetCol(0).RetType.GetFlag()))
} else {
ranges = ranger.FullNotNullRange()
}
} else {
ranges = ranger.FullIntRange(false)
}
collExtStats := e.ctx.GetSessionVars().EnableExtendedStats
specialIndexes := make([]*model.IndexInfo, 0, len(e.indexes))
specialIndexesOffsets := make([]int, 0, len(e.indexes))
for i, idx := range e.indexes {
isSpecial := false
for _, col := range idx.Columns {
colInfo := e.colsInfo[col.Offset]
isVirtualCol := colInfo.IsGenerated() && !colInfo.GeneratedStored
isPrefixCol := col.Length != types.UnspecifiedLength
if isVirtualCol || isPrefixCol {
isSpecial = true
break
}
}
if isSpecial {
specialIndexesOffsets = append(specialIndexesOffsets, i)
specialIndexes = append(specialIndexes, idx)
}
}
samplingStatsConcurrency, err := getBuildSamplingStatsConcurrency(e.ctx)
if err != nil {
e.memTracker.Release(e.memTracker.BytesConsumed())
return &statistics.AnalyzeResults{Err: err, Job: e.job}
}
statsConcurrncy, err := getBuildStatsConcurrency(e.ctx)
if err != nil {
e.memTracker.Release(e.memTracker.BytesConsumed())
return &statistics.AnalyzeResults{Err: err, Job: e.job}
}
idxNDVPushDownCh := make(chan analyzeIndexNDVTotalResult, 1)
// subIndexWorkerWg is better to be initialized in handleNDVForSpecialIndexes, however if we do so, golang would
// report unexpected/unreasonable data race error on subIndexWorkerWg when running TestAnalyzeVirtualCol test
// case with `-race` flag now.
wg := util.NewWaitGroupPool(gp)
wg.Run(func() {
e.handleNDVForSpecialIndexes(specialIndexes, idxNDVPushDownCh, statsConcurrncy)
})
defer wg.Wait()
count, hists, topNs, fmSketches, extStats, err := e.buildSamplingStats(gp, ranges, collExtStats, specialIndexesOffsets, idxNDVPushDownCh, samplingStatsConcurrency)
if err != nil {
e.memTracker.Release(e.memTracker.BytesConsumed())
return &statistics.AnalyzeResults{Err: err, Job: e.job}
}
cLen := len(e.analyzePB.ColReq.ColumnsInfo)
colGroupResult := &statistics.AnalyzeResult{
Hist: hists[cLen:],
TopNs: topNs[cLen:],
Fms: fmSketches[cLen:],
IsIndex: 1,
}
// Discard stats of _tidb_rowid.
// Because the process of analyzing will keep the order of results be the same as the colsInfo in the analyze task,
// and in `buildAnalyzeFullSamplingTask` we always place the _tidb_rowid at the last of colsInfo, so if there are
// stats for _tidb_rowid, it must be at the end of the column stats.
// Virtual column has no histogram yet. So we check nil here.
if hists[cLen-1] != nil && hists[cLen-1].ID == -1 {
cLen--
}
colResult := &statistics.AnalyzeResult{
Hist: hists[:cLen],
TopNs: topNs[:cLen],
Fms: fmSketches[:cLen],
}
return &statistics.AnalyzeResults{
TableID: e.tableID,
Ars: []*statistics.AnalyzeResult{colResult, colGroupResult},
Job: e.job,
StatsVer: e.StatsVersion,
Count: count,
Snapshot: e.snapshot,
ExtStats: extStats,
BaseCount: e.baseCount,
BaseModifyCnt: e.baseModifyCnt,
}
}
// decodeSampleDataWithVirtualColumn constructs the virtual column by evaluating from the deocded normal columns.
// If it failed, it would return false to trigger normal decoding way without the virtual column.
func (e *AnalyzeColumnsExecV2) decodeSampleDataWithVirtualColumn(
collector statistics.RowSampleCollector,
fieldTps []*types.FieldType,
virtualColIdx []int,
schema *expression.Schema,
) error {
totFts := make([]*types.FieldType, 0, e.schemaForVirtualColEval.Len())
for _, col := range e.schemaForVirtualColEval.Columns {
totFts = append(totFts, col.RetType)
}
chk := chunk.NewChunkWithCapacity(totFts, len(collector.Base().Samples))
decoder := codec.NewDecoder(chk, e.ctx.GetSessionVars().Location())
for _, sample := range collector.Base().Samples {
for i, columns := range sample.Columns {
if schema.Columns[i].VirtualExpr != nil {
continue
}
_, err := decoder.DecodeOne(columns.GetBytes(), i, e.schemaForVirtualColEval.Columns[i].RetType)
if err != nil {
return err
}
}
}
err := table.FillVirtualColumnValue(fieldTps, virtualColIdx, schema.Columns, e.colsInfo, e.ctx, chk)
if err != nil {
return err
}
iter := chunk.NewIterator4Chunk(chk)
for row, i := iter.Begin(), 0; row != iter.End(); row, i = iter.Next(), i+1 {
datums := row.GetDatumRow(totFts)
collector.Base().Samples[i].Columns = datums
}
return nil
}
func printAnalyzeMergeCollectorLog(oldRootCount, newRootCount, subCount, tableID, partitionID int64, isPartition bool, info string, index int) {
if index < 0 {
logutil.BgLogger().Debug(info,
zap.Int64("tableID", tableID),
zap.Int64("partitionID", partitionID),
zap.Bool("isPartitionTable", isPartition),
zap.Int64("oldRootCount", oldRootCount),
zap.Int64("newRootCount", newRootCount),
zap.Int64("subCount", subCount))
} else {
logutil.BgLogger().Debug(info,
zap.Int64("tableID", tableID),
zap.Int64("partitionID", partitionID),
zap.Bool("isPartitionTable", isPartition),
zap.Int64("oldRootCount", oldRootCount),
zap.Int64("newRootCount", newRootCount),
zap.Int64("subCount", subCount),
zap.Int("subCollectorIndex", index))
}
}
func (e *AnalyzeColumnsExecV2) buildSamplingStats(
gp *gp.Pool,
ranges []*ranger.Range,
needExtStats bool,
indexesWithVirtualColOffsets []int,
idxNDVPushDownCh chan analyzeIndexNDVTotalResult,
samplingStatsConcurrency int,
) (
count int64,
hists []*statistics.Histogram,
topns []*statistics.TopN,
fmSketches []*statistics.FMSketch,
extStats *statistics.ExtendedStatsColl,
err error,
) {
// Open memory tracker and resultHandler.
if err = e.open(ranges); err != nil {
return 0, nil, nil, nil, nil, err
}
defer func() {
if err1 := e.resultHandler.Close(); err1 != nil {
err = err1
}
}()
l := len(e.analyzePB.ColReq.ColumnsInfo) + len(e.analyzePB.ColReq.ColumnGroups)
rootRowCollector := statistics.NewRowSampleCollector(int(e.analyzePB.ColReq.SampleSize), e.analyzePB.ColReq.GetSampleRate(), l)
for i := 0; i < l; i++ {
rootRowCollector.Base().FMSketches = append(rootRowCollector.Base().FMSketches, statistics.NewFMSketch(maxSketchSize))
}
sc := e.ctx.GetSessionVars().StmtCtx
// Start workers to merge the result from collectors.
mergeResultCh := make(chan *samplingMergeResult, 1)
mergeTaskCh := make(chan []byte, 1)
var taskEg errgroup.Group
// Start read data from resultHandler and send them to mergeTaskCh.
taskEg.Go(func() (err error) {
defer func() {
if r := recover(); r != nil {
err = getAnalyzePanicErr(r)
}
}()
return readDataAndSendTask(e.ctx, e.resultHandler, mergeTaskCh, e.memTracker)
})
e.samplingMergeWg = &util.WaitGroupWrapper{}
e.samplingMergeWg.Add(samplingStatsConcurrency)
for i := 0; i < samplingStatsConcurrency; i++ {
id := i
gp.Go(func() {
e.subMergeWorker(mergeResultCh, mergeTaskCh, l, id)
})
}
// Merge the result from collectors.
mergeWorkerPanicCnt := 0
mergeEg, mergeCtx := errgroup.WithContext(context.Background())
mergeEg.Go(func() (err error) {
defer func() {
if r := recover(); r != nil {
err = getAnalyzePanicErr(r)
}
}()
for mergeWorkerPanicCnt < samplingStatsConcurrency {
mergeResult, ok := <-mergeResultCh
if !ok {
break
}
if mergeResult.err != nil {
err = mergeResult.err
if isAnalyzeWorkerPanic(mergeResult.err) {
mergeWorkerPanicCnt++
}
continue
}
oldRootCollectorSize := rootRowCollector.Base().MemSize
oldRootCollectorCount := rootRowCollector.Base().Count
// Merge the result from sub-collectors.
rootRowCollector.MergeCollector(mergeResult.collector)
newRootCollectorCount := rootRowCollector.Base().Count
printAnalyzeMergeCollectorLog(oldRootCollectorCount, newRootCollectorCount,
mergeResult.collector.Base().Count, e.tableID.TableID, e.tableID.PartitionID, e.tableID.IsPartitionTable(),
"merge subMergeWorker in AnalyzeColumnsExecV2", -1)
e.memTracker.Consume(rootRowCollector.Base().MemSize - oldRootCollectorSize - mergeResult.collector.Base().MemSize)
mergeResult.collector.DestroyAndPutToPool()
}
return err
})
err = taskEg.Wait()
if err != nil {
mergeCtx.Done()
if err1 := mergeEg.Wait(); err1 != nil {
err = stderrors.Join(err, err1)
}
return 0, nil, nil, nil, nil, getAnalyzePanicErr(err)
}
err = mergeEg.Wait()
defer e.memTracker.Release(rootRowCollector.Base().MemSize)
if err != nil {
return 0, nil, nil, nil, nil, err
}
// Decode the data from sample collectors.
virtualColIdx := buildVirtualColumnIndex(e.schemaForVirtualColEval, e.colsInfo)
if len(virtualColIdx) > 0 {
fieldTps := make([]*types.FieldType, 0, len(virtualColIdx))
for _, colOffset := range virtualColIdx {
fieldTps = append(fieldTps, e.schemaForVirtualColEval.Columns[colOffset].RetType)
}
err = e.decodeSampleDataWithVirtualColumn(rootRowCollector, fieldTps, virtualColIdx, e.schemaForVirtualColEval)
if err != nil {
return 0, nil, nil, nil, nil, err
}
} else {
// If there's no virtual column or we meet error during eval virtual column, we fallback to normal decode otherwise.
for _, sample := range rootRowCollector.Base().Samples {
for i := range sample.Columns {
sample.Columns[i], err = tablecodec.DecodeColumnValue(sample.Columns[i].GetBytes(), &e.colsInfo[i].FieldType, sc.TimeZone())
if err != nil {
return 0, nil, nil, nil, nil, err
}
}
}
}
// Calculate handle from the row data for each row. It will be used to sort the samples.
for _, sample := range rootRowCollector.Base().Samples {
sample.Handle, err = e.handleCols.BuildHandleByDatums(sample.Columns)
if err != nil {
return 0, nil, nil, nil, nil, err
}
}
colLen := len(e.colsInfo)
// The order of the samples are broken when merging samples from sub-collectors.
// So now we need to sort the samples according to the handle in order to calculate correlation.
slices.SortFunc(rootRowCollector.Base().Samples, func(i, j *statistics.ReservoirRowSampleItem) int {
return i.Handle.Compare(j.Handle)
})
totalLen := len(e.colsInfo) + len(e.indexes)
hists = make([]*statistics.Histogram, totalLen)
topns = make([]*statistics.TopN, totalLen)
fmSketches = make([]*statistics.FMSketch, 0, totalLen)
buildResultChan := make(chan error, totalLen)
buildTaskChan := make(chan *samplingBuildTask, totalLen)
if totalLen < samplingStatsConcurrency {
samplingStatsConcurrency = totalLen
}
e.samplingBuilderWg = newNotifyErrorWaitGroupWrapper(gp, buildResultChan)
sampleCollectors := make([]*statistics.SampleCollector, len(e.colsInfo))
exitCh := make(chan struct{})
e.samplingBuilderWg.Add(samplingStatsConcurrency)
// Start workers to build stats.
for i := 0; i < samplingStatsConcurrency; i++ {
e.samplingBuilderWg.Run(func() {
e.subBuildWorker(buildResultChan, buildTaskChan, hists, topns, sampleCollectors, exitCh)
})
}
// Generate tasks for building stats.
for i, col := range e.colsInfo {
buildTaskChan <- &samplingBuildTask{
id: col.ID,
rootRowCollector: rootRowCollector,
tp: &col.FieldType,
isColumn: true,
slicePos: i,
}
fmSketches = append(fmSketches, rootRowCollector.Base().FMSketches[i])
}
indexPushedDownResult := <-idxNDVPushDownCh
if indexPushedDownResult.err != nil {
close(exitCh)
e.samplingBuilderWg.Wait()
return 0, nil, nil, nil, nil, indexPushedDownResult.err
}
for _, offset := range indexesWithVirtualColOffsets {
ret := indexPushedDownResult.results[e.indexes[offset].ID]
rootRowCollector.Base().NullCount[colLen+offset] = ret.Count
rootRowCollector.Base().FMSketches[colLen+offset] = ret.Ars[0].Fms[0]
}
// Generate tasks for building stats for indexes.
for i, idx := range e.indexes {
buildTaskChan <- &samplingBuildTask{
id: idx.ID,
rootRowCollector: rootRowCollector,
tp: types.NewFieldType(mysql.TypeBlob),
isColumn: false,
slicePos: colLen + i,
}
fmSketches = append(fmSketches, rootRowCollector.Base().FMSketches[colLen+i])
}
close(buildTaskChan)
panicCnt := 0
for panicCnt < samplingStatsConcurrency {
err1, ok := <-buildResultChan
if !ok {
break
}
if err1 != nil {
err = err1
if isAnalyzeWorkerPanic(err1) {
panicCnt++
}
continue
}
}
defer func() {
totalSampleCollectorSize := int64(0)
for _, sampleCollector := range sampleCollectors {
if sampleCollector != nil {
totalSampleCollectorSize += sampleCollector.MemSize
}
}
e.memTracker.Release(totalSampleCollectorSize)
}()
if err != nil {
return 0, nil, nil, nil, nil, err
}
count = rootRowCollector.Base().Count
if needExtStats {
extStats, err = statistics.BuildExtendedStats(e.ctx, e.TableID.GetStatisticsID(), e.colsInfo, sampleCollectors)
if err != nil {
return 0, nil, nil, nil, nil, err
}
}
return
}
// handleNDVForSpecialIndexes deals with the logic to analyze the index containing the virtual column when the mode is full sampling.
func (e *AnalyzeColumnsExecV2) handleNDVForSpecialIndexes(indexInfos []*model.IndexInfo, totalResultCh chan analyzeIndexNDVTotalResult, statsConcurrncy int) {
defer func() {
if r := recover(); r != nil {
logutil.BgLogger().Error("analyze ndv for special index panicked", zap.Any("recover", r), zap.Stack("stack"))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
totalResultCh <- analyzeIndexNDVTotalResult{
err: getAnalyzePanicErr(r),
}
}
}()
tasks := e.buildSubIndexJobForSpecialIndex(indexInfos)
taskCh := make(chan *analyzeTask, len(tasks))
for _, task := range tasks {
AddNewAnalyzeJob(e.ctx, task.job)
}
resultsCh := make(chan *statistics.AnalyzeResults, len(tasks))
if len(tasks) < statsConcurrncy {
statsConcurrncy = len(tasks)
}
var subIndexWorkerWg = NewAnalyzeResultsNotifyWaitGroupWrapper(resultsCh)
subIndexWorkerWg.Add(statsConcurrncy)
for i := 0; i < statsConcurrncy; i++ {
subIndexWorkerWg.Run(func() { e.subIndexWorkerForNDV(taskCh, resultsCh) })
}
for _, task := range tasks {
taskCh <- task
}
close(taskCh)
panicCnt := 0
totalResult := analyzeIndexNDVTotalResult{
results: make(map[int64]*statistics.AnalyzeResults, len(indexInfos)),
}
var err error
for panicCnt < statsConcurrncy {
results, ok := <-resultsCh
if !ok {
break
}
if results.Err != nil {
err = results.Err
FinishAnalyzeJob(e.ctx, results.Job, err)
if isAnalyzeWorkerPanic(err) {
panicCnt++
}
continue
}
FinishAnalyzeJob(e.ctx, results.Job, nil)
totalResult.results[results.Ars[0].Hist[0].ID] = results
}
if err != nil {
totalResult.err = err
}
totalResultCh <- totalResult
}
// subIndexWorker receive the task for each index and return the result for them.
func (e *AnalyzeColumnsExecV2) subIndexWorkerForNDV(taskCh chan *analyzeTask, resultsCh chan *statistics.AnalyzeResults) {
var task *analyzeTask
defer func() {
if r := recover(); r != nil {
logutil.BgLogger().Error("analyze worker panicked", zap.Any("recover", r), zap.Stack("stack"))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
resultsCh <- &statistics.AnalyzeResults{
Err: getAnalyzePanicErr(r),
Job: task.job,
}
}
}()
for {
var ok bool
task, ok = <-taskCh
if !ok {
break
}
StartAnalyzeJob(e.ctx, task.job)
if task.taskType != idxTask {
resultsCh <- &statistics.AnalyzeResults{
Err: errors.Errorf("incorrect analyze type"),
Job: task.job,
}
continue
}
task.idxExec.job = task.job
resultsCh <- analyzeIndexNDVPushDown(task.idxExec)
}
}
// buildSubIndexJobForSpecialIndex builds sub index pushed down task to calculate the NDV information for indexes containing virtual column.
// This is because we cannot push the calculation of the virtual column down to the tikv side.
func (e *AnalyzeColumnsExecV2) buildSubIndexJobForSpecialIndex(indexInfos []*model.IndexInfo) []*analyzeTask {
_, offset := timeutil.Zone(e.ctx.GetSessionVars().Location())
tasks := make([]*analyzeTask, 0, len(indexInfos))
sc := e.ctx.GetSessionVars().StmtCtx
var concurrency int
if e.ctx.GetSessionVars().InRestrictedSQL {
// In restricted SQL, we use the default value of IndexSerialScanConcurrency. it is copied from tidb_sysproc_scan_concurrency.
concurrency = e.ctx.GetSessionVars().IndexSerialScanConcurrency()
} else {
concurrency = e.ctx.GetSessionVars().AnalyzeDistSQLScanConcurrency()
}
for _, indexInfo := range indexInfos {
base := baseAnalyzeExec{
ctx: e.ctx,
tableID: e.TableID,
concurrency: concurrency,
analyzePB: &tipb.AnalyzeReq{
Tp: tipb.AnalyzeType_TypeIndex,
Flags: sc.PushDownFlags(),
TimeZoneOffset: offset,
},
snapshot: e.snapshot,
}
idxExec := &AnalyzeIndexExec{
baseAnalyzeExec: base,
isCommonHandle: e.tableInfo.IsCommonHandle,
idxInfo: indexInfo,
}
idxExec.opts = make(map[ast.AnalyzeOptionType]uint64, len(ast.AnalyzeOptionString))
idxExec.opts[ast.AnalyzeOptNumTopN] = 0
idxExec.opts[ast.AnalyzeOptCMSketchDepth] = 0
idxExec.opts[ast.AnalyzeOptCMSketchWidth] = 0
idxExec.opts[ast.AnalyzeOptNumSamples] = 0
idxExec.opts[ast.AnalyzeOptNumBuckets] = 1
statsVersion := new(int32)
*statsVersion = statistics.Version1
// No Top-N
topnSize := int32(0)
idxExec.analyzePB.IdxReq = &tipb.AnalyzeIndexReq{
// One bucket to store the null for null histogram.
BucketSize: 1,
NumColumns: int32(len(indexInfo.Columns)),
TopNSize: &topnSize,
Version: statsVersion,
SketchSize: maxSketchSize,
}
if idxExec.isCommonHandle && indexInfo.Primary {
idxExec.analyzePB.Tp = tipb.AnalyzeType_TypeCommonHandle
}
// No CM-Sketch.
depth := int32(0)
width := int32(0)
idxExec.analyzePB.IdxReq.CmsketchDepth = &depth
idxExec.analyzePB.IdxReq.CmsketchWidth = &width
autoAnalyze := ""
if e.ctx.GetSessionVars().InRestrictedSQL {
autoAnalyze = "auto "
}
job := &statistics.AnalyzeJob{DBName: e.job.DBName, TableName: e.job.TableName, PartitionName: e.job.PartitionName, JobInfo: autoAnalyze + "analyze ndv for index " + indexInfo.Name.O}
idxExec.job = job
tasks = append(tasks, &analyzeTask{
taskType: idxTask,
idxExec: idxExec,
job: job,
})
}
return tasks
}
func (e *AnalyzeColumnsExecV2) subMergeWorker(resultCh chan<- *samplingMergeResult, taskCh <-chan []byte, l int, index int) {
// Only close the resultCh in the first worker.
closeTheResultCh := index == 0
defer func() {
if r := recover(); r != nil {
logutil.BgLogger().Error("analyze worker panicked", zap.Any("recover", r), zap.Stack("stack"))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
resultCh <- &samplingMergeResult{err: getAnalyzePanicErr(r)}
}
// Consume the remaining things.
for {
_, ok := <-taskCh
if !ok {
break
}
}
e.samplingMergeWg.Done()
if closeTheResultCh {
e.samplingMergeWg.Wait()
close(resultCh)
}
}()
failpoint.Inject("mockAnalyzeSamplingMergeWorkerPanic", func() {
panic("failpoint triggered")
})
failpoint.Inject("mockAnalyzeMergeWorkerSlowConsume", func(val failpoint.Value) {
times := val.(int)
for i := 0; i < times; i++ {
e.memTracker.Consume(5 << 20)
time.Sleep(100 * time.Millisecond)
}
})
retCollector := statistics.NewRowSampleCollector(int(e.analyzePB.ColReq.SampleSize), e.analyzePB.ColReq.GetSampleRate(), l)
for i := 0; i < l; i++ {
retCollector.Base().FMSketches = append(retCollector.Base().FMSketches, statistics.NewFMSketch(maxSketchSize))
}
for {
data, ok := <-taskCh
if !ok {
break
}
// Unmarshal the data.
dataSize := int64(cap(data))
colResp := &tipb.AnalyzeColumnsResp{}
err := colResp.Unmarshal(data)
if err != nil {
resultCh <- &samplingMergeResult{err: err}
return
}
// Consume the memory of the data.
colRespSize := int64(colResp.Size())
e.memTracker.Consume(colRespSize)
// Update processed rows.
subCollector := statistics.NewRowSampleCollector(int(e.analyzePB.ColReq.SampleSize), e.analyzePB.ColReq.GetSampleRate(), l)
subCollector.Base().FromProto(colResp.RowCollector, e.memTracker)
UpdateAnalyzeJob(e.ctx, e.job, subCollector.Base().Count)
// Print collect log.
oldRetCollectorSize := retCollector.Base().MemSize
oldRetCollectorCount := retCollector.Base().Count
retCollector.MergeCollector(subCollector)
newRetCollectorCount := retCollector.Base().Count
printAnalyzeMergeCollectorLog(oldRetCollectorCount, newRetCollectorCount, subCollector.Base().Count,
e.tableID.TableID, e.tableID.PartitionID, e.TableID.IsPartitionTable(),
"merge subCollector in concurrency in AnalyzeColumnsExecV2", index)
// Consume the memory of the result.
newRetCollectorSize := retCollector.Base().MemSize
subCollectorSize := subCollector.Base().MemSize
e.memTracker.Consume(newRetCollectorSize - oldRetCollectorSize - subCollectorSize)
e.memTracker.Release(dataSize + colRespSize)
subCollector.DestroyAndPutToPool()
}
resultCh <- &samplingMergeResult{collector: retCollector}
}
func (e *AnalyzeColumnsExecV2) subBuildWorker(resultCh chan error, taskCh chan *samplingBuildTask, hists []*statistics.Histogram, topns []*statistics.TopN, collectors []*statistics.SampleCollector, exitCh chan struct{}) {
defer func() {
if r := recover(); r != nil {
logutil.BgLogger().Error("analyze worker panicked", zap.Any("recover", r), zap.Stack("stack"))
metrics.PanicCounter.WithLabelValues(metrics.LabelAnalyze).Inc()
resultCh <- getAnalyzePanicErr(r)
}
}()
failpoint.Inject("mockAnalyzeSamplingBuildWorkerPanic", func() {
panic("failpoint triggered")
})
colLen := len(e.colsInfo)
bufferedMemSize := int64(0)
bufferedReleaseSize := int64(0)
defer e.memTracker.Consume(bufferedMemSize)
defer e.memTracker.Release(bufferedReleaseSize)
workLoop:
for {
select {
case task, ok := <-taskCh:
if !ok {
break workLoop
}
var collector *statistics.SampleCollector
if task.isColumn {
if e.colsInfo[task.slicePos].IsGenerated() && !e.colsInfo[task.slicePos].GeneratedStored {
hists[task.slicePos] = nil
topns[task.slicePos] = nil
continue
}
sampleNum := task.rootRowCollector.Base().Samples.Len()
sampleItems := make([]*statistics.SampleItem, 0, sampleNum)
// consume mandatory memory at the beginning, including empty SampleItems of all sample rows, if exceeds, fast fail
collectorMemSize := int64(sampleNum) * (8 + statistics.EmptySampleItemSize)
e.memTracker.Consume(collectorMemSize)
var collator collate.Collator
ft := e.colsInfo[task.slicePos].FieldType
// When it's new collation data, we need to use its collate key instead of original value because only
// the collate key can ensure the correct ordering.
// This is also corresponding to similar operation in (*statistics.Column).GetColumnRowCount().
if ft.EvalType() == types.ETString && ft.GetType() != mysql.TypeEnum && ft.GetType() != mysql.TypeSet {
collator = collate.GetCollator(ft.GetCollate())
}
for j, row := range task.rootRowCollector.Base().Samples {
if row.Columns[task.slicePos].IsNull() {
continue
}
val := row.Columns[task.slicePos]
// If this value is very big, we think that it is not a value that can occur many times. So we don't record it.
if len(val.GetBytes()) > statistics.MaxSampleValueLength {
continue
}
if collator != nil {
val.SetBytes(collator.Key(val.GetString()))
deltaSize := int64(cap(val.GetBytes()))
collectorMemSize += deltaSize
e.memTracker.BufferedConsume(&bufferedMemSize, deltaSize)
}
sampleItems = append(sampleItems, &statistics.SampleItem{
Value: val,
Ordinal: j,
})
// tmp memory usage
deltaSize := val.MemUsage() + 4 // content of SampleItem is copied
e.memTracker.BufferedConsume(&bufferedMemSize, deltaSize)
e.memTracker.BufferedRelease(&bufferedReleaseSize, deltaSize)
}
collector = &statistics.SampleCollector{
Samples: sampleItems,
NullCount: task.rootRowCollector.Base().NullCount[task.slicePos],
Count: task.rootRowCollector.Base().Count - task.rootRowCollector.Base().NullCount[task.slicePos],
FMSketch: task.rootRowCollector.Base().FMSketches[task.slicePos],
TotalSize: task.rootRowCollector.Base().TotalSizes[task.slicePos],
MemSize: collectorMemSize,
}
} else {
var tmpDatum types.Datum
var err error
idx := e.indexes[task.slicePos-colLen]
sampleNum := task.rootRowCollector.Base().Samples.Len()
sampleItems := make([]*statistics.SampleItem, 0, sampleNum)
// consume mandatory memory at the beginning, including all SampleItems, if exceeds, fast fail
// 8 is size of reference, 8 is the size of "b := make([]byte, 0, 8)"
collectorMemSize := int64(sampleNum) * (8 + statistics.EmptySampleItemSize + 8)
e.memTracker.Consume(collectorMemSize)
errCtx := e.ctx.GetSessionVars().StmtCtx.ErrCtx()
indexSampleCollectLoop:
for _, row := range task.rootRowCollector.Base().Samples {
if len(idx.Columns) == 1 && row.Columns[idx.Columns[0].Offset].IsNull() {
continue
}
b := make([]byte, 0, 8)
for _, col := range idx.Columns {
// If the index value contains one value which is too long, we think that it's a value that doesn't occur many times.
if len(row.Columns[col.Offset].GetBytes()) > statistics.MaxSampleValueLength {
continue indexSampleCollectLoop
}
if col.Length != types.UnspecifiedLength {
row.Columns[col.Offset].Copy(&tmpDatum)
ranger.CutDatumByPrefixLen(&tmpDatum, col.Length, &e.colsInfo[col.Offset].FieldType)
b, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx.TimeZone(), b, tmpDatum)
err = errCtx.HandleError(err)
if err != nil {
resultCh <- err
continue workLoop
}
continue
}
b, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx.TimeZone(), b, row.Columns[col.Offset])
err = errCtx.HandleError(err)
if err != nil {
resultCh <- err
continue workLoop
}
}
sampleItems = append(sampleItems, &statistics.SampleItem{
Value: types.NewBytesDatum(b),
})
// tmp memory usage
deltaSize := sampleItems[len(sampleItems)-1].Value.MemUsage()
e.memTracker.BufferedConsume(&bufferedMemSize, deltaSize)
e.memTracker.BufferedRelease(&bufferedReleaseSize, deltaSize)
}
collector = &statistics.SampleCollector{
Samples: sampleItems,
NullCount: task.rootRowCollector.Base().NullCount[task.slicePos],
Count: task.rootRowCollector.Base().Count - task.rootRowCollector.Base().NullCount[task.slicePos],
FMSketch: task.rootRowCollector.Base().FMSketches[task.slicePos],
TotalSize: task.rootRowCollector.Base().TotalSizes[task.slicePos],
MemSize: collectorMemSize,
}
}
if task.isColumn {
collectors[task.slicePos] = collector
}
releaseCollectorMemory := func() {
if !task.isColumn {
e.memTracker.Release(collector.MemSize)
}
}
hist, topn, err := statistics.BuildHistAndTopN(e.ctx, int(e.opts[ast.AnalyzeOptNumBuckets]), int(e.opts[ast.AnalyzeOptNumTopN]), task.id, collector, task.tp, task.isColumn, e.memTracker, e.ctx.GetSessionVars().EnableExtendedStats)
if err != nil {
resultCh <- err
releaseCollectorMemory()
continue
}
finalMemSize := hist.MemoryUsage() + topn.MemoryUsage()
e.memTracker.Consume(finalMemSize)
hists[task.slicePos] = hist
topns[task.slicePos] = topn
resultCh <- nil
releaseCollectorMemory()
case <-exitCh:
return
}
}
}
type analyzeIndexNDVTotalResult struct {
results map[int64]*statistics.AnalyzeResults
err error
}
type samplingMergeResult struct {
collector statistics.RowSampleCollector
err error
}
type samplingBuildTask struct {
id int64
rootRowCollector statistics.RowSampleCollector
tp *types.FieldType
isColumn bool
slicePos int
}
func readDataAndSendTask(ctx sessionctx.Context, handler *tableResultHandler, mergeTaskCh chan []byte, memTracker *memory.Tracker) error {
// After all tasks are sent, close the mergeTaskCh to notify the mergeWorker that all tasks have been sent.
defer close(mergeTaskCh)
for {
failpoint.Inject("mockKillRunningV2AnalyzeJob", func() {
dom := domain.GetDomain(ctx)
dom.SysProcTracker().KillSysProcess(dom.GetAutoAnalyzeProcID())
})
if err := ctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil {
return err
}
failpoint.Inject("mockSlowAnalyzeV2", func() {
time.Sleep(1000 * time.Second)
})
data, err := handler.nextRaw(context.TODO())
if err != nil {
return errors.Trace(err)
}
if data == nil {
break
}
memTracker.Consume(int64(cap(data)))
mergeTaskCh <- data
}
return nil
}
| pkg/executor/analyze_col_v2.go | 1 | https://github.com/pingcap/tidb/commit/66401b5c06c50c2f5f595528a51a989c9a801759 | [
0.016772368922829628,
0.0003896417038049549,
0.00016017074813134968,
0.00017376999312546104,
0.0017323083011433482
] |
{
"id": 0,
"code_window": [
"\t\toriStats, ok := sctx.GetSessionVars().GetSystemVar(variable.TiDBBuildStatsConcurrency)\n",
"\t\tif !ok {\n",
"\t\t\toriStats = strconv.Itoa(variable.DefBuildStatsConcurrency)\n",
"\t\t}\n",
"\t\toriScan := sctx.GetSessionVars().DistSQLScanConcurrency()\n",
"\t\toriIndex := sctx.GetSessionVars().IndexSerialScanConcurrency()\n",
"\t\toriIso, ok := sctx.GetSessionVars().GetSystemVar(variable.TxnIsolation)\n",
"\t\tif !ok {\n",
"\t\t\toriIso = \"REPEATABLE-READ\"\n",
"\t\t}\n",
"\t\tautoConcurrency, err1 := sctx.GetSessionVars().GetSessionOrGlobalSystemVar(ctx, variable.TiDBAutoBuildStatsConcurrency)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\toriScan := sctx.GetSessionVars().AnalyzeDistSQLScanConcurrency()\n"
],
"file_path": "pkg/executor/adapter.go",
"type": "replace",
"edit_start_line_idx": 515
} | set tidb_cost_model_version=1;
CREATE TABLE `t` (
`a` char(10) DEFAULT NULL
);
CREATE TABLE `t1` (
`a` char(10) COLLATE utf8mb4_general_ci DEFAULT NULL
);
insert into t values ("A");
insert into t1 values ("a");
select a as a_col from t where t.a = all (select a collate utf8mb4_general_ci from t1);
a_col
select a as a_col from t where t.a != any (select a collate utf8mb4_general_ci from t1);
a_col
A
select a as a_col from t where t.a <= all (select a collate utf8mb4_general_ci from t1);
a_col
A
select a as a_col from t where t.a <= any (select a collate utf8mb4_general_ci from t1);
a_col
A
select a as a_col from t where t.a = (select a collate utf8mb4_general_ci from t1);
a_col
drop table if exists t;
create table t(a enum('a', 'b'), b varchar(20));
insert into t values ("a", "b");
select * from t where a in (a);
a b
a b
drop table if exists t;
create table t(a enum('a', 'b') charset utf8mb4 collate utf8mb4_general_ci, b varchar(20));
insert into t values ("b", "c");
insert into t values ("B", "b");
select * from t where 'B' collate utf8mb4_general_ci in (a);
a b
select * from t where 'B' collate utf8mb4_bin in (a);
a b
select * from t where 'B' collate utf8mb4_bin in (a, b);
a b
select * from t where 'B' collate utf8mb4_bin in (a, "a", 1);
a b
select * from t where 'B' collate utf8mb4_bin in (a, "B", 1);
a b
b c
select * from t where 1 in (a);
a b
select * from t where 2 in (a);
a b
b c
select * from t where 1 in (a, 0);
a b
select * from t where a between 1 and 2;
a b
b c
select * from t where a between 1 and "a";
a b
select * from t where a between "a" and "b";
a b
b c
select * from t where 2 between a and "c";
a b
select * from t where 2 between a and 3;
a b
b c
select * from t where "b" between a and a;
a b
b c
select * from t where "b" collate utf8mb4_bin between a and a;
a b
b c
select * from t where "b" between a and 3;
a b
drop table if exists t;
create table t(a set('a', 'b'), b varchar(20));
insert into t values ("a", "b");
select * from t where a in (a);
a b
a b
drop table if exists t;
create table t(a set('a', 'b') charset utf8mb4 collate utf8mb4_general_ci, b varchar(20));
insert into t values ("b", "c");
insert into t values ("B", "b");
select * from t where 'B' collate utf8mb4_general_ci in (a);
a b
select * from t where 'B' collate utf8mb4_bin in (a);
a b
select * from t where 'B' collate utf8mb4_bin in (a, b);
a b
select * from t where 'B' collate utf8mb4_bin in (a, "a", 1);
a b
select * from t where 'B' collate utf8mb4_bin in (a, "B", 1);
a b
b c
select * from t where 1 in (a);
a b
select * from t where 2 in (a);
a b
b c
select * from t where 1 in (a, 0);
a b
select * from t where a between 1 and 2;
a b
b c
select * from t where a between 1 and "a";
a b
select * from t where a between "a" and "b";
a b
b c
select * from t where 2 between a and "c";
a b
select * from t where 2 between a and 3;
a b
b c
select * from t where "b" between a and a;
a b
b c
select * from t where "b" collate utf8mb4_bin between a and a;
a b
b c
select * from t where "b" between a and 3;
a b
drop table if exists tbl_2;
create table tbl_2 ( col_20 bigint not null , col_21 smallint not null , col_22 decimal(24,10) default null , col_23 tinyint default 71 not null , col_24 bigint not null , col_25 tinyint default 18 , col_26 varchar(330) collate utf8_bin not null , col_27 char(77) collate utf8mb4_unicode_ci , col_28 char(46) collate utf8_general_ci not null , col_29 smallint unsigned not null , primary key idx_13 ( col_27(5) ) , key idx_14 ( col_24 ) , unique key idx_15 ( col_23,col_21,col_28,col_29,col_24 ) ) collate utf8_bin ;
insert ignore into tbl_2 values ( 5888267793391993829,5371,94.63,-109,5728076076919247337,89,'WUicqUTgdGJcjbC','SapBPqczTWWSN','xUSwH',49462 );
select col_25 from tbl_2 where ( tbl_2.col_27 > 'nSWYrpTH' or not( tbl_2.col_27 between 'CsWIuxlSjU' and 'SfwoyjUEzgg' ) ) and ( tbl_2.col_23 <= -95);
col_25
select col_25 from tbl_2 use index(primary) where ( tbl_2.col_27 > 'nSWYrpTH' or not( tbl_2.col_27 between 'CsWIuxlSjU' and 'SfwoyjUEzgg' ) ) and ( tbl_2.col_23 <= -95);
col_25
drop table if exists t1;
drop table if exists t2;
create table t1(a char(20));
create table t2(b binary(20), c binary(20));
insert into t1 value('-1');
insert into t2 value(0x2D31, 0x67);
insert into t2 value(0x2D31, 0x73);
select a from t1, t2 where t1.a between t2.b and t2.c;
a
select a from t1, t2 where cast(t1.a as binary(20)) between t2.b and t2.c;
a
-1
-1
drop table if exists t1;
drop table if exists t2;
create table t1(a char(20)) collate utf8mb4_general_ci;
create table t2(b binary(20), c char(20)) collate utf8mb4_general_ci;
insert into t1 values ('a');
insert into t2 values (0x0, 'A');
select * from t1, t2 where t1.a between t2.b and t2.c;
a b c
insert into t1 values ('-1');
insert into t2 values (0x2d31, '');
select * from t1, t2 where t1.a in (t2.b, 3);
a b c
drop table if exists t0;
drop table if exists t1;
CREATE TABLE t0(c0 BOOL, c1 INT);
CREATE TABLE t1 LIKE t0;
CREATE VIEW v0(c0) AS SELECT IS_IPV4(t0.c1) FROM t0, t1;
INSERT INTO t0(c0, c1) VALUES (true, 0);
INSERT INTO t1(c0, c1) VALUES (true, 2);
SELECT v0.c0 FROM v0;
c0
0
SELECT (v0.c0)NOT LIKE(BINARY v0.c0) FROM v0;
(v0.c0)NOT LIKE(BINARY v0.c0)
0
SELECT v0.c0 FROM v0 WHERE (v0.c0)NOT LIKE(BINARY v0.c0);
c0
desc format='brief' SELECT v0.c0 FROM v0 WHERE (v0.c0)NOT LIKE(BINARY v0.c0);
id estRows task access object operator info
Projection 80000000.00 root is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20)))->Column#7
└─HashJoin 80000000.00 root CARTESIAN inner join
├─Selection(Build) 8000.00 root not(like(cast(is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20))), var_string(20)), cast(is_ipv4(cast(collation_check_use_collation.t0.c1, var_string(20))), binary(1)), 92))
│ └─TableReader 10000.00 root data:TableFullScan
│ └─TableFullScan 10000.00 cop[tikv] table:t0 keep order:false, stats:pseudo
└─TableReader(Probe) 10000.00 root data:TableFullScan
└─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo
| tests/integrationtest/r/collation_check_use_collation_disabled.result | 0 | https://github.com/pingcap/tidb/commit/66401b5c06c50c2f5f595528a51a989c9a801759 | [
0.00017693618428893387,
0.00017312946147285402,
0.0001617110101506114,
0.00017356910393573344,
0.000003528237357386388
] |
{
"id": 0,
"code_window": [
"\t\toriStats, ok := sctx.GetSessionVars().GetSystemVar(variable.TiDBBuildStatsConcurrency)\n",
"\t\tif !ok {\n",
"\t\t\toriStats = strconv.Itoa(variable.DefBuildStatsConcurrency)\n",
"\t\t}\n",
"\t\toriScan := sctx.GetSessionVars().DistSQLScanConcurrency()\n",
"\t\toriIndex := sctx.GetSessionVars().IndexSerialScanConcurrency()\n",
"\t\toriIso, ok := sctx.GetSessionVars().GetSystemVar(variable.TxnIsolation)\n",
"\t\tif !ok {\n",
"\t\t\toriIso = \"REPEATABLE-READ\"\n",
"\t\t}\n",
"\t\tautoConcurrency, err1 := sctx.GetSessionVars().GetSessionOrGlobalSystemVar(ctx, variable.TiDBAutoBuildStatsConcurrency)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\toriScan := sctx.GetSessionVars().AnalyzeDistSQLScanConcurrency()\n"
],
"file_path": "pkg/executor/adapter.go",
"type": "replace",
"edit_start_line_idx": 515
} | [lightning]
task-info-schema-name = 'lightning_task_info_dupe_resolve_incremental'
index-concurrency = 10
table-concurrency = 10
[tikv-importer]
backend = "local"
duplicate-resolution = "remove"
incremental-import = true
[checkpoint]
enable = true
schema = "tidb_lightning_checkpoint_dupe_resolve_incremental1"
driver = "mysql"
[[mydumper.files]]
pattern = '(?i).*(-schema-trigger|-schema-post)\.sql$'
type = 'ignore'
[[mydumper.files]]
pattern = '(?i)^(?:[^/]*/)*([^/.]+)-schema-create\.sql$'
schema = '$1'
type = 'schema-schema'
[[mydumper.files]]
pattern = '(?i)^(?:[^/]*/)*([^/.]+)\.(.*?)-schema\.sql$'
schema = '$1'
table = '$2'
type = 'table-schema'
[[mydumper.files]]
pattern = '(?i)^(?:[^/]*/)*([^/.]+)\.(.*?)\.0\.sql$'
schema = '$1'
table = '$2'
key = '0'
type = 'sql'
[post-restore]
analyze = false
checksum = "optional"
| br/tests/lightning_duplicate_resolution_incremental/config1.toml | 0 | https://github.com/pingcap/tidb/commit/66401b5c06c50c2f5f595528a51a989c9a801759 | [
0.00017448727157898247,
0.000171674502780661,
0.0001661984424572438,
0.00017254424165003002,
0.00000290334196506592
] |
{
"id": 0,
"code_window": [
"\t\toriStats, ok := sctx.GetSessionVars().GetSystemVar(variable.TiDBBuildStatsConcurrency)\n",
"\t\tif !ok {\n",
"\t\t\toriStats = strconv.Itoa(variable.DefBuildStatsConcurrency)\n",
"\t\t}\n",
"\t\toriScan := sctx.GetSessionVars().DistSQLScanConcurrency()\n",
"\t\toriIndex := sctx.GetSessionVars().IndexSerialScanConcurrency()\n",
"\t\toriIso, ok := sctx.GetSessionVars().GetSystemVar(variable.TxnIsolation)\n",
"\t\tif !ok {\n",
"\t\t\toriIso = \"REPEATABLE-READ\"\n",
"\t\t}\n",
"\t\tautoConcurrency, err1 := sctx.GetSessionVars().GetSessionOrGlobalSystemVar(ctx, variable.TiDBAutoBuildStatsConcurrency)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\toriScan := sctx.GetSessionVars().AnalyzeDistSQLScanConcurrency()\n"
],
"file_path": "pkg/executor/adapter.go",
"type": "replace",
"edit_start_line_idx": 515
} | package storage
import (
"bytes"
"context"
"io"
"github.com/klauspost/compress/gzip"
"github.com/klauspost/compress/snappy"
"github.com/klauspost/compress/zstd"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"go.uber.org/zap"
)
// CompressType represents the type of compression.
type CompressType uint8
const (
// NoCompression won't compress given bytes.
NoCompression CompressType = iota
// Gzip will compress given bytes in gzip format.
Gzip
// Snappy will compress given bytes in snappy format.
Snappy
// Zstd will compress given bytes in zstd format.
Zstd
)
// DecompressConfig is the config used for decompression.
type DecompressConfig struct {
// ZStdDecodeConcurrency only used for ZStd decompress, see WithDecoderConcurrency.
// if not 1, ZStd will decode file asynchronously.
ZStdDecodeConcurrency int
}
type flusher interface {
Flush() error
}
type emptyFlusher struct{}
func (*emptyFlusher) Flush() error {
return nil
}
type interceptBuffer interface {
io.WriteCloser
flusher
Len() int
Cap() int
Bytes() []byte
Reset()
Compressed() bool
}
func createSuffixString(compressType CompressType) string {
txtSuffix := ".txt"
switch compressType {
case Gzip:
txtSuffix += ".gz"
case Snappy:
txtSuffix += ".snappy"
case Zstd:
txtSuffix += ".zst"
default:
return ""
}
return txtSuffix
}
func newInterceptBuffer(chunkSize int, compressType CompressType) interceptBuffer {
if compressType == NoCompression {
return newNoCompressionBuffer(chunkSize)
}
return newSimpleCompressBuffer(chunkSize, compressType)
}
func newCompressWriter(compressType CompressType, w io.Writer) simpleCompressWriter {
switch compressType {
case Gzip:
return gzip.NewWriter(w)
case Snappy:
return snappy.NewBufferedWriter(w)
case Zstd:
newWriter, err := zstd.NewWriter(w)
if err != nil {
log.Warn("Met error when creating new writer for Zstd type file", zap.Error(err))
}
return newWriter
default:
return nil
}
}
func newCompressReader(compressType CompressType, cfg DecompressConfig, r io.Reader) (io.Reader, error) {
switch compressType {
case Gzip:
return gzip.NewReader(r)
case Snappy:
return snappy.NewReader(r), nil
case Zstd:
options := []zstd.DOption{}
if cfg.ZStdDecodeConcurrency > 0 {
options = append(options, zstd.WithDecoderConcurrency(cfg.ZStdDecodeConcurrency))
}
return zstd.NewReader(r, options...)
default:
return nil, nil
}
}
type noCompressionBuffer struct {
*bytes.Buffer
}
func (*noCompressionBuffer) Flush() error {
return nil
}
func (*noCompressionBuffer) Close() error {
return nil
}
func (*noCompressionBuffer) Compressed() bool {
return false
}
func newNoCompressionBuffer(chunkSize int) *noCompressionBuffer {
return &noCompressionBuffer{bytes.NewBuffer(make([]byte, 0, chunkSize))}
}
type simpleCompressWriter interface {
io.WriteCloser
flusher
}
type simpleCompressBuffer struct {
*bytes.Buffer
compressWriter simpleCompressWriter
cap int
}
func (b *simpleCompressBuffer) Write(p []byte) (int, error) {
written, err := b.compressWriter.Write(p)
return written, errors.Trace(err)
}
func (b *simpleCompressBuffer) Len() int {
return b.Buffer.Len()
}
func (b *simpleCompressBuffer) Cap() int {
return b.cap
}
func (b *simpleCompressBuffer) Reset() {
b.Buffer.Reset()
}
func (b *simpleCompressBuffer) Flush() error {
return b.compressWriter.Flush()
}
func (b *simpleCompressBuffer) Close() error {
return b.compressWriter.Close()
}
func (*simpleCompressBuffer) Compressed() bool {
return true
}
func newSimpleCompressBuffer(chunkSize int, compressType CompressType) *simpleCompressBuffer {
bf := bytes.NewBuffer(make([]byte, 0, chunkSize))
return &simpleCompressBuffer{
Buffer: bf,
cap: chunkSize,
compressWriter: newCompressWriter(compressType, bf),
}
}
type bufferedWriter struct {
buf interceptBuffer
writer ExternalFileWriter
}
func (u *bufferedWriter) Write(ctx context.Context, p []byte) (int, error) {
bytesWritten := 0
for u.buf.Len()+len(p) > u.buf.Cap() {
// We won't fit p in this chunk
// Is this chunk full?
chunkToFill := u.buf.Cap() - u.buf.Len()
if chunkToFill > 0 {
// It's not full so we write enough of p to fill it
prewrite := p[0:chunkToFill]
w, err := u.buf.Write(prewrite)
bytesWritten += w
if err != nil {
return bytesWritten, errors.Trace(err)
}
p = p[w:]
// continue buf because compressed data size may be less than Cap - Len
if u.buf.Compressed() {
continue
}
}
_ = u.buf.Flush()
err := u.uploadChunk(ctx)
if err != nil {
return 0, errors.Trace(err)
}
}
w, err := u.buf.Write(p)
bytesWritten += w
return bytesWritten, errors.Trace(err)
}
func (u *bufferedWriter) uploadChunk(ctx context.Context) error {
if u.buf.Len() == 0 {
return nil
}
b := u.buf.Bytes()
u.buf.Reset()
_, err := u.writer.Write(ctx, b)
return errors.Trace(err)
}
func (u *bufferedWriter) Close(ctx context.Context) error {
u.buf.Close()
err := u.uploadChunk(ctx)
if err != nil {
return errors.Trace(err)
}
return u.writer.Close(ctx)
}
// NewUploaderWriter wraps the Writer interface over an uploader.
func NewUploaderWriter(writer ExternalFileWriter, chunkSize int, compressType CompressType) ExternalFileWriter {
return newBufferedWriter(writer, chunkSize, compressType)
}
// newBufferedWriter is used to build a buffered writer.
func newBufferedWriter(writer ExternalFileWriter, chunkSize int, compressType CompressType) *bufferedWriter {
return &bufferedWriter{
writer: writer,
buf: newInterceptBuffer(chunkSize, compressType),
}
}
// BytesWriter is a Writer implementation on top of bytes.Buffer that is useful for testing.
type BytesWriter struct {
buf *bytes.Buffer
}
// Write delegates to bytes.Buffer.
func (u *BytesWriter) Write(_ context.Context, p []byte) (int, error) {
return u.buf.Write(p)
}
// Close delegates to bytes.Buffer.
func (*BytesWriter) Close(_ context.Context) error {
// noop
return nil
}
// Bytes delegates to bytes.Buffer.
func (u *BytesWriter) Bytes() []byte {
return u.buf.Bytes()
}
// String delegates to bytes.Buffer.
func (u *BytesWriter) String() string {
return u.buf.String()
}
// Reset delegates to bytes.Buffer.
func (u *BytesWriter) Reset() {
u.buf.Reset()
}
// NewBufferWriter creates a Writer that simply writes to a buffer (useful for testing).
func NewBufferWriter() *BytesWriter {
return &BytesWriter{buf: &bytes.Buffer{}}
}
| br/pkg/storage/writer.go | 0 | https://github.com/pingcap/tidb/commit/66401b5c06c50c2f5f595528a51a989c9a801759 | [
0.0002245131036033854,
0.00017105875303968787,
0.00016075321764219552,
0.00016879512986633927,
0.000011422379429859575
] |
{
"id": 0,
"code_window": [
"\tif err != nil {\n",
"\t\treturn\n",
"\t}\n",
"\tputOpts = miniogo.PutObjectOptions{\n",
"\t\tUserMetadata: meta,\n",
"\t\tUserTags: tag.ToMap(),\n",
"\t\tContentType: objInfo.ContentType,\n",
"\t\tContentEncoding: objInfo.ContentEncoding,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsc := dest.StorageClass\n",
"\tif sc == \"\" {\n",
"\t\tsc = objInfo.StorageClass\n",
"\t}\n"
],
"file_path": "cmd/bucket-replication.go",
"type": "add",
"edit_start_line_idx": 119
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"net/http"
"time"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/event"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
// gets replication config associated to a given bucket name.
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return nil, BucketReplicationConfigNotFound{Bucket: bucketName}
}
return globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
}
// validateReplicationDestination returns error if replication destination bucket missing or not configured
// It also returns true if replication destination is same as this server.
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
clnt := globalBucketTargetSys.GetReplicationTargetClient(ctx, rCfg.ReplicationArn)
if clnt == nil {
return false, BucketRemoteTargetNotFound{Bucket: bucket}
}
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
return false, BucketReplicationDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
}
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
if ret.LockEnabled {
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, rCfg.GetDestination().Bucket)
if err != nil || lock != "Enabled" {
return false, BucketReplicationDestinationMissingLock{Bucket: rCfg.GetDestination().Bucket}
}
}
}
// validate replication ARN against target endpoint
c, ok := globalBucketTargetSys.arnRemotesMap[rCfg.ReplicationArn]
if ok {
if c.EndpointURL().String() == clnt.EndpointURL().String() {
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
return sameTarget, nil
}
}
return false, BucketRemoteTargetNotFound{Bucket: bucket}
}
// mustReplicate returns true if object meets replication criteria.
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
if globalIsGateway {
return false
}
if rs, ok := meta[xhttp.AmzBucketReplicationStatus]; ok {
replStatus = rs
}
if replication.StatusType(replStatus) == replication.Replica {
return false
}
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, object, r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
return false
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
return false
}
opts := replication.ObjectOpts{
Name: object,
SSEC: crypto.SSEC.IsEncrypted(meta),
}
tagStr, ok := meta[xhttp.AmzObjectTagging]
if ok {
opts.UserTags = tagStr
}
return cfg.Replicate(opts)
}
func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
meta := make(map[string]string)
for k, v := range objInfo.UserDefined {
if k == xhttp.AmzBucketReplicationStatus {
continue
}
meta[k] = v
}
tag, err := tags.ParseObjectTags(objInfo.UserTags)
if err != nil {
return
}
putOpts = miniogo.PutObjectOptions{
UserMetadata: meta,
UserTags: tag.ToMap(),
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: dest.StorageClass,
ReplicationVersionID: objInfo.VersionID,
ReplicationStatus: miniogo.ReplicationStatusReplica,
ReplicationMTime: objInfo.ModTime,
}
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
rmode := miniogo.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
rdate, err := time.Parse(time.RFC3339, retainDateStr)
if err != nil {
return
}
putOpts.RetainUntilDate = rdate
}
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
}
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
putOpts.ServerSideEncryption = encrypt.NewSSE()
}
return
}
// replicateObject replicates the specified version of the object to destination bucket
// The source object is then updated to reflect the replication status.
func replicateObject(ctx context.Context, bucket, object, versionID string, objectAPI ObjectLayer, eventArg *eventArgs, healPending bool) {
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
return
}
tgt := globalBucketTargetSys.GetReplicationTargetClient(ctx, cfg.ReplicationArn)
if tgt == nil {
return
}
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
if err != nil {
return
}
defer gr.Close()
objInfo := gr.ObjInfo
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
return
}
dest := cfg.GetDestination()
if dest.Bucket == "" {
return
}
// In the rare event that replication is in pending state either due to
// server shut down/crash before replication completed or healing and PutObject
// race - do an additional stat to see if the version ID exists
if healPending {
_, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{VersionID: objInfo.VersionID})
if err == nil {
// object with same VersionID already exists, replication kicked off by
// PutObject might have completed.
return
}
}
putOpts := putReplicationOpts(dest, objInfo)
replicationStatus := replication.Complete
_, err = tgt.PutObject(ctx, dest.Bucket, object, gr, size, "", "", putOpts)
if err != nil {
replicationStatus = replication.Failed
// Notify replication failure event.
if eventArg == nil {
eventArg = &eventArgs{
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
}
}
eventArg.EventName = event.OperationReplicationFailed
eventArg.Object.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
sendEvent(*eventArg)
}
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
if objInfo.UserTags != "" {
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
}
objInfo.metadataOnly = true // Perform only metadata updates.
if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
VersionID: objInfo.VersionID,
}, ObjectOptions{VersionID: objInfo.VersionID}); err != nil {
logger.LogIf(ctx, err)
}
}
| cmd/bucket-replication.go | 1 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.998826801776886,
0.18419671058654785,
0.00016496081661898643,
0.0018151882104575634,
0.3813583552837372
] |
{
"id": 0,
"code_window": [
"\tif err != nil {\n",
"\t\treturn\n",
"\t}\n",
"\tputOpts = miniogo.PutObjectOptions{\n",
"\t\tUserMetadata: meta,\n",
"\t\tUserTags: tag.ToMap(),\n",
"\t\tContentType: objInfo.ContentType,\n",
"\t\tContentEncoding: objInfo.ContentEncoding,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsc := dest.StorageClass\n",
"\tif sc == \"\" {\n",
"\t\tsc = objInfo.StorageClass\n",
"\t}\n"
],
"file_path": "cmd/bucket-replication.go",
"type": "add",
"edit_start_line_idx": 119
} | /*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { shallow } from "enzyme"
import { ObjectContainer } from "../ObjectContainer"
describe("ObjectContainer", () => {
it("should render without crashing", () => {
shallow(<ObjectContainer object={{ name: "test1.jpg" }} />)
})
it("should render ObjectItem with props", () => {
const wrapper = shallow(<ObjectContainer object={{ name: "test1.jpg" }} />)
expect(wrapper.find("Connect(ObjectItem)").length).toBe(1)
expect(wrapper.find("Connect(ObjectItem)").prop("name")).toBe("test1.jpg")
})
it("should pass actions to ObjectItem", () => {
const wrapper = shallow(
<ObjectContainer object={{ name: "test1.jpg" }} checkedObjectsCount={0} />
)
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).not.toBe(
undefined
)
})
it("should pass empty actions to ObjectItem when checkedObjectCount is more than 0", () => {
const wrapper = shallow(
<ObjectContainer object={{ name: "test1.jpg" }} checkedObjectsCount={1} />
)
expect(wrapper.find("Connect(ObjectItem)").prop("actionButtons")).toBe(
undefined
)
})
})
| browser/app/js/objects/__tests__/ObjectContainer.test.js | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00017733308777678758,
0.00017110277258325368,
0.00016772405069787055,
0.00017128449690062553,
0.0000034976787901541684
] |
{
"id": 0,
"code_window": [
"\tif err != nil {\n",
"\t\treturn\n",
"\t}\n",
"\tputOpts = miniogo.PutObjectOptions{\n",
"\t\tUserMetadata: meta,\n",
"\t\tUserTags: tag.ToMap(),\n",
"\t\tContentType: objInfo.ContentType,\n",
"\t\tContentEncoding: objInfo.ContentEncoding,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsc := dest.StorageClass\n",
"\tif sc == \"\" {\n",
"\t\tsc = objInfo.StorageClass\n",
"\t}\n"
],
"file_path": "cmd/bucket-replication.go",
"type": "add",
"edit_start_line_idx": 119
} | // +build netbsd
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
// GetInfo returns total and free bytes available in a directory, e.g. `/`.
func GetInfo(path string) (info Info, err error) {
return Info{}, nil
}
| pkg/disk/stat_fallback.go | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.0019237661035731435,
0.0007573353941552341,
0.00017367441614624113,
0.00017456564819440246,
0.000824791204649955
] |
{
"id": 0,
"code_window": [
"\tif err != nil {\n",
"\t\treturn\n",
"\t}\n",
"\tputOpts = miniogo.PutObjectOptions{\n",
"\t\tUserMetadata: meta,\n",
"\t\tUserTags: tag.ToMap(),\n",
"\t\tContentType: objInfo.ContentType,\n",
"\t\tContentEncoding: objInfo.ContentEncoding,\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsc := dest.StorageClass\n",
"\tif sc == \"\" {\n",
"\t\tsc = objInfo.StorageClass\n",
"\t}\n"
],
"file_path": "cmd/bucket-replication.go",
"type": "add",
"edit_start_line_idx": 119
} | # Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 30
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 15
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- "security"
- "pending discussion"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: false
# Label to use when marking as stale
staleLabel: stale
# Comment to post when marking as stale. Set to `false` to disable
markComment: >-
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed after 15 days if no further activity
occurs. Thank you for your contributions.
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
# Comment to post when closing a stale Issue or Pull Request.
# closeComment: >
# Your comment here.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 1
# Limit to only `issues` or `pulls`
# only: issues
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
# pulls:
# daysUntilStale: 30
# markComment: >
# This pull request has been automatically marked as stale because it has not had
# recent activity. It will be closed if no further activity occurs. Thank you
# for your contributions.
# issues:
# exemptLabels:
# - confirmed
| .github/stale.yml | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00017309577378910035,
0.0001672749494900927,
0.00016324040188919753,
0.000166540703503415,
0.000003840394583676243
] |
{
"id": 1,
"code_window": [
"\tputOpts = miniogo.PutObjectOptions{\n",
"\t\tUserMetadata: meta,\n",
"\t\tUserTags: tag.ToMap(),\n",
"\t\tContentType: objInfo.ContentType,\n",
"\t\tContentEncoding: objInfo.ContentEncoding,\n",
"\t\tStorageClass: dest.StorageClass,\n",
"\t\tReplicationVersionID: objInfo.VersionID,\n",
"\t\tReplicationStatus: miniogo.ReplicationStatusReplica,\n",
"\t\tReplicationMTime: objInfo.ModTime,\n",
"\t}\n",
"\tif mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tStorageClass: sc,\n"
],
"file_path": "cmd/bucket-replication.go",
"type": "replace",
"edit_start_line_idx": 124
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"net/http"
"time"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/event"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
// gets replication config associated to a given bucket name.
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return nil, BucketReplicationConfigNotFound{Bucket: bucketName}
}
return globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
}
// validateReplicationDestination returns error if replication destination bucket missing or not configured
// It also returns true if replication destination is same as this server.
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
clnt := globalBucketTargetSys.GetReplicationTargetClient(ctx, rCfg.ReplicationArn)
if clnt == nil {
return false, BucketRemoteTargetNotFound{Bucket: bucket}
}
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
return false, BucketReplicationDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
}
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
if ret.LockEnabled {
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, rCfg.GetDestination().Bucket)
if err != nil || lock != "Enabled" {
return false, BucketReplicationDestinationMissingLock{Bucket: rCfg.GetDestination().Bucket}
}
}
}
// validate replication ARN against target endpoint
c, ok := globalBucketTargetSys.arnRemotesMap[rCfg.ReplicationArn]
if ok {
if c.EndpointURL().String() == clnt.EndpointURL().String() {
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
return sameTarget, nil
}
}
return false, BucketRemoteTargetNotFound{Bucket: bucket}
}
// mustReplicate returns true if object meets replication criteria.
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
if globalIsGateway {
return false
}
if rs, ok := meta[xhttp.AmzBucketReplicationStatus]; ok {
replStatus = rs
}
if replication.StatusType(replStatus) == replication.Replica {
return false
}
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, object, r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
return false
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
return false
}
opts := replication.ObjectOpts{
Name: object,
SSEC: crypto.SSEC.IsEncrypted(meta),
}
tagStr, ok := meta[xhttp.AmzObjectTagging]
if ok {
opts.UserTags = tagStr
}
return cfg.Replicate(opts)
}
func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
meta := make(map[string]string)
for k, v := range objInfo.UserDefined {
if k == xhttp.AmzBucketReplicationStatus {
continue
}
meta[k] = v
}
tag, err := tags.ParseObjectTags(objInfo.UserTags)
if err != nil {
return
}
putOpts = miniogo.PutObjectOptions{
UserMetadata: meta,
UserTags: tag.ToMap(),
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: dest.StorageClass,
ReplicationVersionID: objInfo.VersionID,
ReplicationStatus: miniogo.ReplicationStatusReplica,
ReplicationMTime: objInfo.ModTime,
}
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
rmode := miniogo.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
rdate, err := time.Parse(time.RFC3339, retainDateStr)
if err != nil {
return
}
putOpts.RetainUntilDate = rdate
}
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
}
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
putOpts.ServerSideEncryption = encrypt.NewSSE()
}
return
}
// replicateObject replicates the specified version of the object to destination bucket
// The source object is then updated to reflect the replication status.
func replicateObject(ctx context.Context, bucket, object, versionID string, objectAPI ObjectLayer, eventArg *eventArgs, healPending bool) {
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
return
}
tgt := globalBucketTargetSys.GetReplicationTargetClient(ctx, cfg.ReplicationArn)
if tgt == nil {
return
}
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
if err != nil {
return
}
defer gr.Close()
objInfo := gr.ObjInfo
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
return
}
dest := cfg.GetDestination()
if dest.Bucket == "" {
return
}
// In the rare event that replication is in pending state either due to
// server shut down/crash before replication completed or healing and PutObject
// race - do an additional stat to see if the version ID exists
if healPending {
_, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{VersionID: objInfo.VersionID})
if err == nil {
// object with same VersionID already exists, replication kicked off by
// PutObject might have completed.
return
}
}
putOpts := putReplicationOpts(dest, objInfo)
replicationStatus := replication.Complete
_, err = tgt.PutObject(ctx, dest.Bucket, object, gr, size, "", "", putOpts)
if err != nil {
replicationStatus = replication.Failed
// Notify replication failure event.
if eventArg == nil {
eventArg = &eventArgs{
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
}
}
eventArg.EventName = event.OperationReplicationFailed
eventArg.Object.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
sendEvent(*eventArg)
}
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
if objInfo.UserTags != "" {
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
}
objInfo.metadataOnly = true // Perform only metadata updates.
if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
VersionID: objInfo.VersionID,
}, ObjectOptions{VersionID: objInfo.VersionID}); err != nil {
logger.LogIf(ctx, err)
}
}
| cmd/bucket-replication.go | 1 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.9951688647270203,
0.1891874223947525,
0.00016235698421951383,
0.0017508440650999546,
0.3805350959300995
] |
{
"id": 1,
"code_window": [
"\tputOpts = miniogo.PutObjectOptions{\n",
"\t\tUserMetadata: meta,\n",
"\t\tUserTags: tag.ToMap(),\n",
"\t\tContentType: objInfo.ContentType,\n",
"\t\tContentEncoding: objInfo.ContentEncoding,\n",
"\t\tStorageClass: dest.StorageClass,\n",
"\t\tReplicationVersionID: objInfo.VersionID,\n",
"\t\tReplicationStatus: miniogo.ReplicationStatusReplica,\n",
"\t\tReplicationMTime: objInfo.ModTime,\n",
"\t}\n",
"\tif mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tStorageClass: sc,\n"
],
"file_path": "cmd/bucket-replication.go",
"type": "replace",
"edit_start_line_idx": 124
} | // +build linux darwin openbsd netbsd solaris
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sys
import (
"runtime"
"syscall"
)
// GetMaxOpenFileLimit - returns maximum file descriptor number that can be opened by this process.
func GetMaxOpenFileLimit() (curLimit, maxLimit uint64, err error) {
var rlimit syscall.Rlimit
if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err == nil {
curLimit = rlimit.Cur
maxLimit = rlimit.Max
}
return curLimit, maxLimit, err
}
// SetMaxOpenFileLimit - sets maximum file descriptor number that can be opened by this process.
func SetMaxOpenFileLimit(curLimit, maxLimit uint64) error {
if runtime.GOOS == "darwin" && curLimit > 10240 {
// The max file limit is 10240, even though
// the max returned by Getrlimit is 1<<63-1.
// This is OPEN_MAX in sys/syslimits.h.
// refer https://github.com/golang/go/issues/30401
curLimit = 10240
}
rlimit := syscall.Rlimit{Cur: curLimit, Max: maxLimit}
return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit)
}
| pkg/sys/rlimit-file_nix.go | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.0002966789179481566,
0.0001961039670277387,
0.00016445071378257126,
0.00017163994198199362,
0.000050438069592928514
] |
{
"id": 1,
"code_window": [
"\tputOpts = miniogo.PutObjectOptions{\n",
"\t\tUserMetadata: meta,\n",
"\t\tUserTags: tag.ToMap(),\n",
"\t\tContentType: objInfo.ContentType,\n",
"\t\tContentEncoding: objInfo.ContentEncoding,\n",
"\t\tStorageClass: dest.StorageClass,\n",
"\t\tReplicationVersionID: objInfo.VersionID,\n",
"\t\tReplicationStatus: miniogo.ReplicationStatusReplica,\n",
"\t\tReplicationMTime: objInfo.ModTime,\n",
"\t}\n",
"\tif mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tStorageClass: sc,\n"
],
"file_path": "cmd/bucket-replication.go",
"type": "replace",
"edit_start_line_idx": 124
} | /*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"strings"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/logger"
"github.com/minio/sio"
)
const (
// Represents Cache format json holding details on all other cache drives in use.
formatCache = "cache"
// formatCacheV1.Cache.Version
formatCacheVersionV1 = "1"
formatCacheVersionV2 = "2"
formatMetaVersion1 = "1"
formatCacheV1DistributionAlgo = "CRCMOD"
)
// Represents the current cache structure with list of
// disks comprising the disk cache
// formatCacheV1 - structure holds format config version '1'.
type formatCacheV1 struct {
formatMetaV1
Cache struct {
Version string `json:"version"` // Version of 'cache' format.
This string `json:"this"` // This field carries assigned disk uuid.
// Disks field carries the input disk order generated the first
// time when fresh disks were supplied.
Disks []string `json:"disks"`
// Distribution algorithm represents the hashing algorithm
// to pick the right set index for an object.
DistributionAlgo string `json:"distributionAlgo"`
} `json:"cache"` // Cache field holds cache format.
}
// formatCacheV2 is same as formatCacheV1
type formatCacheV2 = formatCacheV1
// Used to detect the version of "cache" format.
type formatCacheVersionDetect struct {
Cache struct {
Version string `json:"version"`
} `json:"cache"`
}
// Return a slice of format, to be used to format uninitialized disks.
func newFormatCacheV2(drives []string) []*formatCacheV2 {
diskCount := len(drives)
var disks = make([]string, diskCount)
var formats = make([]*formatCacheV2, diskCount)
for i := 0; i < diskCount; i++ {
format := &formatCacheV2{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV2
format.Cache.DistributionAlgo = formatCacheV1DistributionAlgo
format.Cache.This = mustGetUUID()
formats[i] = format
disks[i] = formats[i].Cache.This
}
for i := 0; i < diskCount; i++ {
format := formats[i]
format.Cache.Disks = disks
}
return formats
}
// Returns formatCache.Cache.Version
func formatCacheGetVersion(r io.ReadSeeker) (string, error) {
format := &formatCacheVersionDetect{}
if err := jsonLoad(r, format); err != nil {
return "", err
}
return format.Cache.Version, nil
}
// Creates a new cache format.json if unformatted.
func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
// open file using READ & WRITE permission
var file, err = os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
// Close the locked file upon return.
defer file.Close()
fi, err := file.Stat()
if err != nil {
return err
}
if fi.Size() != 0 {
// format.json already got created because of another minio process's createFormatCache()
return nil
}
return jsonSave(file, format)
}
// This function creates a cache format file on disk and returns a slice
// of format cache config
func initFormatCache(ctx context.Context, drives []string) (formats []*formatCacheV2, err error) {
nformats := newFormatCacheV2(drives)
for i, drive := range drives {
if err = os.MkdirAll(pathJoin(drive, minioMetaBucket), 0777); err != nil {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err
}
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
// Fresh disk - create format.json for this cfs
if err = createFormatCache(cacheFormatPath, nformats[i]); err != nil {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err
}
}
return nformats, nil
}
func loadFormatCache(ctx context.Context, drives []string) ([]*formatCacheV2, bool, error) {
formats := make([]*formatCacheV2, len(drives))
var formatV2 *formatCacheV2
migrating := false
for i, drive := range drives {
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0)
if err != nil {
if os.IsNotExist(err) {
continue
}
logger.LogIf(ctx, err)
return nil, migrating, err
}
defer f.Close()
format, err := formatMetaCacheV1(f)
if err != nil {
continue
}
formatV2 = format
if format.Cache.Version != formatCacheVersionV2 {
migrating = true
}
formats[i] = formatV2
}
return formats, migrating, nil
}
// unmarshalls the cache format.json into formatCacheV1
func formatMetaCacheV1(r io.ReadSeeker) (*formatCacheV1, error) {
format := &formatCacheV1{}
if err := jsonLoad(r, format); err != nil {
return nil, err
}
return format, nil
}
func checkFormatCacheValue(format *formatCacheV2, migrating bool) error {
if format.Format != formatCache {
return fmt.Errorf("Unsupported cache format [%s] found", format.Format)
}
// during migration one or more cache drive(s) formats can be out of sync
if migrating {
// Validate format version and format type.
if format.Version != formatMetaVersion1 {
return fmt.Errorf("Unsupported version of cache format [%s] found", format.Version)
}
if format.Cache.Version != formatCacheVersionV2 && format.Cache.Version != formatCacheVersionV1 {
return fmt.Errorf("Unsupported Cache backend format found [%s]", format.Cache.Version)
}
return nil
}
// Validate format version and format type.
if format.Version != formatMetaVersion1 {
return fmt.Errorf("Unsupported version of cache format [%s] found", format.Version)
}
if format.Cache.Version != formatCacheVersionV2 {
return fmt.Errorf("Unsupported Cache backend format found [%s]", format.Cache.Version)
}
return nil
}
func checkFormatCacheValues(migrating bool, formats []*formatCacheV2) (int, error) {
for i, formatCache := range formats {
if formatCache == nil {
continue
}
if err := checkFormatCacheValue(formatCache, migrating); err != nil {
return i, err
}
if len(formats) != len(formatCache.Cache.Disks) {
return i, fmt.Errorf("Expected number of cache drives %d , got %d",
len(formatCache.Cache.Disks), len(formats))
}
}
return -1, nil
}
// checkCacheDisksConsistency - checks if "This" disk uuid on each disk is consistent with all "Disks" slices
// across disks.
func checkCacheDiskConsistency(formats []*formatCacheV2) error {
var disks = make([]string, len(formats))
// Collect currently available disk uuids.
for index, format := range formats {
if format == nil {
disks[index] = ""
continue
}
disks[index] = format.Cache.This
}
for i, format := range formats {
if format == nil {
continue
}
j := findCacheDiskIndex(disks[i], format.Cache.Disks)
if j == -1 {
return fmt.Errorf("UUID on positions %d:%d do not match with , expected %s", i, j, disks[i])
}
if i != j {
return fmt.Errorf("UUID on positions %d:%d do not match with , expected %s got %s", i, j, disks[i], format.Cache.Disks[j])
}
}
return nil
}
// checkCacheDisksSliceConsistency - validate cache Disks order if they are consistent.
func checkCacheDisksSliceConsistency(formats []*formatCacheV2) error {
var sentinelDisks []string
// Extract first valid Disks slice.
for _, format := range formats {
if format == nil {
continue
}
sentinelDisks = format.Cache.Disks
break
}
for _, format := range formats {
if format == nil {
continue
}
currentDisks := format.Cache.Disks
if !reflect.DeepEqual(sentinelDisks, currentDisks) {
return errors.New("inconsistent cache drives found")
}
}
return nil
}
// findCacheDiskIndex returns position of cache disk in JBOD.
func findCacheDiskIndex(disk string, disks []string) int {
for index, uuid := range disks {
if uuid == disk {
return index
}
}
return -1
}
// validate whether cache drives order has changed
func validateCacheFormats(ctx context.Context, migrating bool, formats []*formatCacheV2) error {
count := 0
for _, format := range formats {
if format == nil {
count++
}
}
if count == len(formats) {
return errors.New("Cache format files missing on all drives")
}
if _, err := checkFormatCacheValues(migrating, formats); err != nil {
logger.LogIf(ctx, err)
return err
}
if err := checkCacheDisksSliceConsistency(formats); err != nil {
logger.LogIf(ctx, err)
return err
}
err := checkCacheDiskConsistency(formats)
logger.LogIf(ctx, err)
return err
}
// return true if all of the list of cache drives are
// fresh disks
func cacheDrivesUnformatted(drives []string) bool {
count := 0
for _, drive := range drives {
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
if _, err := os.Stat(cacheFormatPath); os.IsNotExist(err) {
count++
}
}
return count == len(drives)
}
// create format.json for each cache drive if fresh disk or load format from disk
// Then validate the format for all drives in the cache to ensure order
// of cache drives has not changed.
func loadAndValidateCacheFormat(ctx context.Context, drives []string) (formats []*formatCacheV2, migrating bool, err error) {
if cacheDrivesUnformatted(drives) {
formats, err = initFormatCache(ctx, drives)
} else {
formats, migrating, err = loadFormatCache(ctx, drives)
}
if err != nil {
return nil, false, err
}
if err = validateCacheFormats(ctx, migrating, formats); err != nil {
return nil, false, err
}
return formats, migrating, nil
}
// reads cached object on disk and writes it back after adding bitrot
// hashsum per block as per the new disk cache format.
func migrateCacheData(ctx context.Context, c *diskCache, bucket, object, oldfile, destDir string, metadata map[string]string) error {
st, err := os.Stat(oldfile)
if err != nil {
err = osErrToFileErr(err)
return err
}
readCloser, err := readCacheFileStream(oldfile, 0, st.Size())
if err != nil {
return err
}
var reader io.Reader = readCloser
actualSize := uint64(st.Size())
if globalCacheKMS != nil {
reader, err = newCacheEncryptReader(readCloser, bucket, object, metadata)
if err != nil {
return err
}
actualSize, _ = sio.EncryptedSize(uint64(st.Size()))
}
_, err = c.bitrotWriteToCache(destDir, cacheDataFile, reader, uint64(actualSize))
return err
}
// migrate cache contents from old cacheFS format to new backend format
// new format is flat
// sha(bucket,object)/ <== dir name
// - part.1 <== data
// - cache.json <== metadata
func migrateOldCache(ctx context.Context, c *diskCache) error {
oldCacheBucketsPath := path.Join(c.dir, minioMetaBucket, "buckets")
cacheFormatPath := pathJoin(c.dir, minioMetaBucket, formatConfigFile)
if _, err := os.Stat(oldCacheBucketsPath); err != nil {
// remove .minio.sys sub directories
removeAll(path.Join(c.dir, minioMetaBucket, "multipart"))
removeAll(path.Join(c.dir, minioMetaBucket, "tmp"))
removeAll(path.Join(c.dir, minioMetaBucket, "trash"))
removeAll(path.Join(c.dir, minioMetaBucket, "buckets"))
// just migrate cache format
return migrateCacheFormatJSON(cacheFormatPath)
}
buckets, err := readDir(oldCacheBucketsPath)
if err != nil {
return err
}
for _, bucket := range buckets {
bucket = strings.TrimSuffix(bucket, SlashSeparator)
var objMetaPaths []string
root := path.Join(oldCacheBucketsPath, bucket)
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if strings.HasSuffix(path, cacheMetaJSONFile) {
objMetaPaths = append(objMetaPaths, path)
}
return nil
})
if err != nil {
return err
}
for _, oMeta := range objMetaPaths {
objSlice := strings.SplitN(oMeta, cacheMetaJSONFile, 2)
object := strings.TrimPrefix(objSlice[0], path.Join(oldCacheBucketsPath, bucket))
object = strings.TrimSuffix(object, "/")
destdir := getCacheSHADir(c.dir, bucket, object)
if err := os.MkdirAll(destdir, 0777); err != nil {
return err
}
prevCachedPath := path.Join(c.dir, bucket, object)
// get old cached metadata
oldMetaPath := pathJoin(oldCacheBucketsPath, bucket, object, cacheMetaJSONFile)
metaPath := pathJoin(destdir, cacheMetaJSONFile)
metaBytes, err := ioutil.ReadFile(oldMetaPath)
if err != nil {
return err
}
// marshal cache metadata after adding version and stat info
meta := &cacheMeta{}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(metaBytes, &meta); err != nil {
return err
}
// move cached object to new cache directory path
// migrate cache data and add bit-rot protection hash sum
// at the start of each block
if err := migrateCacheData(ctx, c, bucket, object, prevCachedPath, destdir, meta.Meta); err != nil {
continue
}
stat, err := os.Stat(prevCachedPath)
if err != nil {
if err == errFileNotFound {
continue
}
logger.LogIf(ctx, err)
return err
}
// old cached file can now be removed
if err := os.Remove(prevCachedPath); err != nil {
return err
}
// move cached metadata after changing cache metadata version
meta.Checksum = CacheChecksumInfoV1{Algorithm: HighwayHash256S.String(), Blocksize: cacheBlkSize}
meta.Version = cacheMetaVersion
meta.Stat.Size = stat.Size()
meta.Stat.ModTime = stat.ModTime()
jsonData, err := json.Marshal(meta)
if err != nil {
return err
}
if err = ioutil.WriteFile(metaPath, jsonData, 0644); err != nil {
return err
}
}
// delete old bucket from cache, now that all contents are cleared
removeAll(path.Join(c.dir, bucket))
}
// remove .minio.sys sub directories
removeAll(path.Join(c.dir, minioMetaBucket, "multipart"))
removeAll(path.Join(c.dir, minioMetaBucket, "tmp"))
removeAll(path.Join(c.dir, minioMetaBucket, "trash"))
removeAll(path.Join(c.dir, minioMetaBucket, "buckets"))
return migrateCacheFormatJSON(cacheFormatPath)
}
func migrateCacheFormatJSON(cacheFormatPath string) error {
// now migrate format.json
f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0)
if err != nil {
return err
}
defer f.Close()
formatV1 := formatCacheV1{}
if err := jsonLoad(f, &formatV1); err != nil {
return err
}
formatV2 := &formatCacheV2{}
formatV2.formatMetaV1 = formatV1.formatMetaV1
formatV2.Version = formatMetaVersion1
formatV2.Cache = formatV1.Cache
formatV2.Cache.Version = formatCacheVersionV2
if err := jsonSave(f, formatV2); err != nil {
return err
}
return nil
}
| cmd/format-disk-cache.go | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.001970253186300397,
0.000212918093893677,
0.00016416102880612016,
0.0001700106222415343,
0.0002505083102732897
] |
{
"id": 1,
"code_window": [
"\tputOpts = miniogo.PutObjectOptions{\n",
"\t\tUserMetadata: meta,\n",
"\t\tUserTags: tag.ToMap(),\n",
"\t\tContentType: objInfo.ContentType,\n",
"\t\tContentEncoding: objInfo.ContentEncoding,\n",
"\t\tStorageClass: dest.StorageClass,\n",
"\t\tReplicationVersionID: objInfo.VersionID,\n",
"\t\tReplicationStatus: miniogo.ReplicationStatusReplica,\n",
"\t\tReplicationMTime: objInfo.ModTime,\n",
"\t}\n",
"\tif mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tStorageClass: sc,\n"
],
"file_path": "cmd/bucket-replication.go",
"type": "replace",
"edit_start_line_idx": 124
} | /*
* MinIO Cloud Storage (C) 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from 'react'
import connect from 'react-redux/lib/components/connect'
import Tooltip from 'react-bootstrap/lib/Tooltip'
import OverlayTrigger from 'react-bootstrap/lib/OverlayTrigger'
let BrowserUpdate = ({latestUiVersion}) => {
// Don't show an update if we're already updated!
if (latestUiVersion === currentUiVersion) return ( <noscript></noscript> )
return (
<li className="hidden-xs hidden-sm">
<a href="">
<OverlayTrigger placement="left" overlay={ <Tooltip id="tt-version-update">
New update available. Click to refresh.
</Tooltip> }> <i className="fas fa-sync"></i> </OverlayTrigger>
</a>
</li>
)
}
export default connect(state => {
return {
latestUiVersion: state.latestUiVersion
}
})(BrowserUpdate)
| browser/app/js/components/BrowserUpdate.js | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00017583122826181352,
0.0001715315447654575,
0.00016690834308974445,
0.00017145379388239235,
0.0000028473878046497703
] |
{
"id": 2,
"code_window": [
" ]\n",
"}\n",
"```\n",
"\n",
"```\n",
"mc replicate add myminio/srcbucket/Tax --priority 1 --arn \"arn:minio:replication::c5be6b16-769d-432a-9ef1-4567081f3566:destbucket\" --tags \"Year=2019&Company=AcmeCorp\" --storage-class \"STANDARD\"\n",
"Replication configuration applied successfully to myminio/srcbucket.\n",
"```\n",
"\n",
"Apart from *ReplicationArn* , rest of the configuration follows [AWS S3 Spec](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html). Any objects uploaded to the source bucket that meet replication criteria will now be automatically replicated by the MinIO server to the remote destination bucket. Replication can be disabled at any time by disabling specific rules in the configuration or deleting the replication configuration entirely.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"mc replicate add myminio/srcbucket/Tax --priority 1 --arn \"arn:minio:replication:us-east-1:c5be6b16-769d-432a-9ef1-4567081f3566:destbucket\" --tags \"Year=2019&Company=AcmeCorp\" --storage-class \"STANDARD\"\n"
],
"file_path": "docs/bucket/replication/README.md",
"type": "replace",
"edit_start_line_idx": 61
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package replication
import (
"bytes"
"encoding/xml"
)
// Status represents Enabled/Disabled status
type Status string
// Supported status types
const (
Enabled Status = "Enabled"
Disabled Status = "Disabled"
)
// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
type DeleteMarkerReplication struct {
Status Status `xml:"Status"` // should be set to "Disabled" by default
}
// IsEmpty returns true if DeleteMarkerReplication is not set
func (d DeleteMarkerReplication) IsEmpty() bool {
return len(d.Status) == 0
}
// Validate validates whether the status is disabled.
func (d DeleteMarkerReplication) Validate() error {
if d.IsEmpty() {
return errDeleteMarkerReplicationMissing
}
if d.Status != Disabled {
return errInvalidDeleteMarkerReplicationStatus
}
return nil
}
// Rule - a rule for replication configuration.
type Rule struct {
XMLName xml.Name `xml:"Rule" json:"Rule"`
ID string `xml:"ID,omitempty" json:"ID,omitempty"`
Status Status `xml:"Status" json:"Status"`
Priority int `xml:"Priority" json:"Priority"`
DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication" json:"DeleteMarkerReplication"`
Destination Destination `xml:"Destination" json:"Destination"`
Filter Filter `xml:"Filter" json:"Filter"`
}
var (
errInvalidRuleID = Errorf("ID must be less than 255 characters")
errEmptyRuleStatus = Errorf("Status should not be empty")
errInvalidRuleStatus = Errorf("Status must be set to either Enabled or Disabled")
errDeleteMarkerReplicationMissing = Errorf("DeleteMarkerReplication must be specified")
errPriorityMissing = Errorf("Priority must be specified")
errInvalidDeleteMarkerReplicationStatus = Errorf("Delete marker replication is currently not supported")
errDestinationSourceIdentical = Errorf("Destination bucket cannot be the same as the source bucket.")
)
// validateID - checks if ID is valid or not.
func (r Rule) validateID() error {
// cannot be longer than 255 characters
if len(r.ID) > 255 {
return errInvalidRuleID
}
return nil
}
// validateStatus - checks if status is valid or not.
func (r Rule) validateStatus() error {
// Status can't be empty
if len(r.Status) == 0 {
return errEmptyRuleStatus
}
// Status must be one of Enabled or Disabled
if r.Status != Enabled && r.Status != Disabled {
return errInvalidRuleStatus
}
return nil
}
func (r Rule) validateFilter() error {
if err := r.Filter.Validate(); err != nil {
return err
}
return nil
}
// Prefix - a rule can either have prefix under <filter></filter> or under
// <filter><and></and></filter>. This method returns the prefix from the
// location where it is available
func (r Rule) Prefix() string {
if r.Filter.Prefix != "" {
return r.Filter.Prefix
}
return r.Filter.And.Prefix
}
// Tags - a rule can either have tag under <filter></filter> or under
// <filter><and></and></filter>. This method returns all the tags from the
// rule in the format tag1=value1&tag2=value2
func (r Rule) Tags() string {
if !r.Filter.Tag.IsEmpty() {
return r.Filter.Tag.String()
}
if len(r.Filter.And.Tags) != 0 {
var buf bytes.Buffer
for _, t := range r.Filter.And.Tags {
if buf.Len() > 0 {
buf.WriteString("&")
}
buf.WriteString(t.String())
}
return buf.String()
}
return ""
}
// Validate - validates the rule element
func (r Rule) Validate(bucket string, sameTarget bool) error {
if err := r.validateID(); err != nil {
return err
}
if err := r.validateStatus(); err != nil {
return err
}
if err := r.validateFilter(); err != nil {
return err
}
if err := r.DeleteMarkerReplication.Validate(); err != nil {
return err
}
if r.Priority <= 0 {
return errPriorityMissing
}
if r.Destination.Bucket == bucket && sameTarget {
return errDestinationSourceIdentical
}
return nil
}
| pkg/bucket/replication/rule.go | 1 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.002377690514549613,
0.0004210141487419605,
0.00016309338388964534,
0.00016694908845238388,
0.0005627925856970251
] |
{
"id": 2,
"code_window": [
" ]\n",
"}\n",
"```\n",
"\n",
"```\n",
"mc replicate add myminio/srcbucket/Tax --priority 1 --arn \"arn:minio:replication::c5be6b16-769d-432a-9ef1-4567081f3566:destbucket\" --tags \"Year=2019&Company=AcmeCorp\" --storage-class \"STANDARD\"\n",
"Replication configuration applied successfully to myminio/srcbucket.\n",
"```\n",
"\n",
"Apart from *ReplicationArn* , rest of the configuration follows [AWS S3 Spec](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html). Any objects uploaded to the source bucket that meet replication criteria will now be automatically replicated by the MinIO server to the remote destination bucket. Replication can be disabled at any time by disabling specific rules in the configuration or deleting the replication configuration entirely.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"mc replicate add myminio/srcbucket/Tax --priority 1 --arn \"arn:minio:replication:us-east-1:c5be6b16-769d-432a-9ef1-4567081f3566:destbucket\" --tags \"Year=2019&Company=AcmeCorp\" --storage-class \"STANDARD\"\n"
],
"file_path": "docs/bucket/replication/README.md",
"type": "replace",
"edit_start_line_idx": 61
} | <project xmlns:ivy="antlib:org.apache.ivy.ant" name="aws-sdk-java-tests" default="run">
<property name="ivy.install.version" value="2.5.0" />
<condition property="ivy.home" value="${env.IVY_HOME}">
<isset property="env.IVY_HOME" />
</condition>
<property name="ivy.home" value="${user.home}/.ant" />
<property name="ivy.jar.dir" value="${ivy.home}/lib" />
<property name="ivy.jar.file" value="${ivy.jar.dir}/ivy.jar" />
<target name="download-ivy" unless="offline">
<mkdir dir="${ivy.jar.dir}"/>
<get src="https://repo1.maven.org/maven2/org/apache/ivy/ivy/${ivy.install.version}/ivy-${ivy.install.version}.jar"
dest="${ivy.jar.file}" usetimestamp="true"/>
</target>
<target name="init-ivy" depends="download-ivy">
<path id="ivy.lib.path">
<fileset dir="${ivy.jar.dir}" includes="*.jar"/>
</path>
<taskdef resource="org/apache/ivy/ant/antlib.xml"
uri="antlib:org.apache.ivy.ant" classpathref="ivy.lib.path"/>
</target>
<target name="resolve" description="--> retrieve dependencies with ivy">
<ivy:retrieve />
</target>
<target name="clean">
<delete dir="build"/>
</target>
<path id="aws-s3-sdk-deps">
<fileset dir="lib">
<include name="*.jar"/>
</fileset>
</path>
<target name="compile">
<mkdir dir="build/classes"/>
<javac srcdir="src" destdir="build/classes">
<classpath refid="aws-s3-sdk-deps" />
</javac>
</target>
<target name="jar">
<mkdir dir="build/jar"/>
<jar destfile="build/jar/FunctionalTests.jar" basedir="build/classes">
<archives>
<zips>
<fileset dir="lib/" includes="*.jar"/>
</zips>
</archives>
<manifest>
<attribute name="Main-Class" value="io.minio.awssdk.tests.FunctionalTests"/>
</manifest>
</jar>
</target>
</project>
| mint/build/aws-sdk-java/build.xml | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00017300354375038296,
0.00016941210196819156,
0.00016597560897935182,
0.0001689334458205849,
0.000002611401441754424
] |
{
"id": 2,
"code_window": [
" ]\n",
"}\n",
"```\n",
"\n",
"```\n",
"mc replicate add myminio/srcbucket/Tax --priority 1 --arn \"arn:minio:replication::c5be6b16-769d-432a-9ef1-4567081f3566:destbucket\" --tags \"Year=2019&Company=AcmeCorp\" --storage-class \"STANDARD\"\n",
"Replication configuration applied successfully to myminio/srcbucket.\n",
"```\n",
"\n",
"Apart from *ReplicationArn* , rest of the configuration follows [AWS S3 Spec](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html). Any objects uploaded to the source bucket that meet replication criteria will now be automatically replicated by the MinIO server to the remote destination bucket. Replication can be disabled at any time by disabling specific rules in the configuration or deleting the replication configuration entirely.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"mc replicate add myminio/srcbucket/Tax --priority 1 --arn \"arn:minio:replication:us-east-1:c5be6b16-769d-432a-9ef1-4567081f3566:destbucket\" --tags \"Year=2019&Company=AcmeCorp\" --storage-class \"STANDARD\"\n"
],
"file_path": "docs/bucket/replication/README.md",
"type": "replace",
"edit_start_line_idx": 61
} | /*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { shallow, mount } from "enzyme"
import { ChangePasswordModal } from "../ChangePasswordModal"
import jwtDecode from "jwt-decode"
jest.mock("jwt-decode")
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
jest.mock("../../web", () => ({
GenerateAuth: jest.fn(() => {
return Promise.resolve({ accessKey: "gen1", secretKey: "gen2" })
}),
SetAuth: jest.fn(
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
if (
currentAccessKey == "minio" &&
currentSecretKey == "minio123" &&
newAccessKey == "test" &&
newSecretKey == "test1234"
) {
return Promise.resolve({})
} else {
return Promise.reject({
message: "Error"
})
}
}
),
GetToken: jest.fn(() => "")
}))
jest.mock("../../utils", () => ({
getRandomAccessKey: () => "raccesskey",
getRandomSecretKey: () => "rsecretkey"
}))
describe("ChangePasswordModal", () => {
const serverInfo = {
version: "test",
platform: "test",
runtime: "test",
info: {},
userInfo: { isIAMUser: true }
}
it("should render without crashing", () => {
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
})
it("should not allow changing password when not IAM user", () => {
const newServerInfo = {
...serverInfo,
userInfo: { isIAMUser: false }
}
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(
wrapper
.find("ModalBody")
.childAt(0)
.text()
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
})
it("should not allow changing password for STS user", () => {
const newServerInfo = {
...serverInfo,
userInfo: { isTempUser: true }
}
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(
wrapper
.find("ModalBody")
.childAt(0)
.text()
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
})
it("should not generate accessKey for IAM User", () => {
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
wrapper.find("#generate-keys").simulate("click")
setImmediate(() => {
expect(wrapper.state("newAccessKey")).toBe("minio")
expect(wrapper.state("newSecretKey")).toBe("rsecretkey")
})
})
it("should not show new accessKey field for IAM User", () => {
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
expect(wrapper.find("#newAccesskey").exists()).toBeFalsy()
})
it("should disable Update button for secretKey", () => {
const showAlert = jest.fn()
const wrapper = shallow(
<ChangePasswordModal serverInfo={serverInfo} showAlert={showAlert} />
)
wrapper
.find("#currentSecretKey")
.simulate("change", { target: { value: "minio123" } })
wrapper
.find("#newSecretKey")
.simulate("change", { target: { value: "t1" } })
expect(wrapper.find("#update-keys").prop("disabled")).toBeTruthy()
})
it("should call hideChangePassword when Cancel button is clicked", () => {
const hideChangePassword = jest.fn()
const wrapper = shallow(
<ChangePasswordModal
serverInfo={serverInfo}
hideChangePassword={hideChangePassword}
/>
)
wrapper.find("#cancel-change-password").simulate("click")
expect(hideChangePassword).toHaveBeenCalled()
})
})
| browser/app/js/browser/__tests__/ChangePasswordModal.test.js | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00017156302055809647,
0.00016794978000689298,
0.000161960837431252,
0.00016856449656188488,
0.0000024253185983980075
] |
{
"id": 2,
"code_window": [
" ]\n",
"}\n",
"```\n",
"\n",
"```\n",
"mc replicate add myminio/srcbucket/Tax --priority 1 --arn \"arn:minio:replication::c5be6b16-769d-432a-9ef1-4567081f3566:destbucket\" --tags \"Year=2019&Company=AcmeCorp\" --storage-class \"STANDARD\"\n",
"Replication configuration applied successfully to myminio/srcbucket.\n",
"```\n",
"\n",
"Apart from *ReplicationArn* , rest of the configuration follows [AWS S3 Spec](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html). Any objects uploaded to the source bucket that meet replication criteria will now be automatically replicated by the MinIO server to the remote destination bucket. Replication can be disabled at any time by disabling specific rules in the configuration or deleting the replication configuration entirely.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"mc replicate add myminio/srcbucket/Tax --priority 1 --arn \"arn:minio:replication:us-east-1:c5be6b16-769d-432a-9ef1-4567081f3566:destbucket\" --tags \"Year=2019&Company=AcmeCorp\" --storage-class \"STANDARD\"\n"
],
"file_path": "docs/bucket/replication/README.md",
"type": "replace",
"edit_start_line_idx": 61
} | /*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { shallow } from "enzyme"
import { AboutModal } from "../AboutModal"
describe("AboutModal", () => {
const serverInfo = {
version: "test",
platform: "test",
runtime: "test"
}
it("should render without crashing", () => {
shallow(<AboutModal serverInfo={serverInfo} />)
})
it("should call hideAbout when close button is clicked", () => {
const hideAbout = jest.fn()
const wrapper = shallow(
<AboutModal serverInfo={serverInfo} hideAbout={hideAbout} />
)
wrapper.find("button").simulate("click")
expect(hideAbout).toHaveBeenCalled()
})
})
| browser/app/js/browser/__tests__/AboutModal.test.js | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.000185570286703296,
0.0001720311847748235,
0.000161960837431252,
0.0001707599003566429,
0.00000760257034926326
] |
{
"id": 3,
"code_window": [
"\n",
"// GetDestination returns destination bucket and storage class.\n",
"func (c Config) GetDestination() Destination {\n",
"\tfor _, rule := range c.Rules {\n",
"\t\tif rule.Status == Disabled {\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\treturn rule.Destination\n",
"\t}\n",
"\treturn Destination{}\n",
"}\n",
"\n",
"// Replicate returns true if the object should be replicated.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(c.Rules) > 0 {\n",
"\t\treturn c.Rules[0].Destination\n"
],
"file_path": "pkg/bucket/replication/replication.go",
"type": "replace",
"edit_start_line_idx": 147
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package replication
import (
"bytes"
"encoding/xml"
)
// Status represents Enabled/Disabled status
type Status string
// Supported status types
const (
Enabled Status = "Enabled"
Disabled Status = "Disabled"
)
// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
type DeleteMarkerReplication struct {
Status Status `xml:"Status"` // should be set to "Disabled" by default
}
// IsEmpty returns true if DeleteMarkerReplication is not set
func (d DeleteMarkerReplication) IsEmpty() bool {
return len(d.Status) == 0
}
// Validate validates whether the status is disabled.
func (d DeleteMarkerReplication) Validate() error {
if d.IsEmpty() {
return errDeleteMarkerReplicationMissing
}
if d.Status != Disabled {
return errInvalidDeleteMarkerReplicationStatus
}
return nil
}
// Rule - a rule for replication configuration.
type Rule struct {
XMLName xml.Name `xml:"Rule" json:"Rule"`
ID string `xml:"ID,omitempty" json:"ID,omitempty"`
Status Status `xml:"Status" json:"Status"`
Priority int `xml:"Priority" json:"Priority"`
DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication" json:"DeleteMarkerReplication"`
Destination Destination `xml:"Destination" json:"Destination"`
Filter Filter `xml:"Filter" json:"Filter"`
}
var (
errInvalidRuleID = Errorf("ID must be less than 255 characters")
errEmptyRuleStatus = Errorf("Status should not be empty")
errInvalidRuleStatus = Errorf("Status must be set to either Enabled or Disabled")
errDeleteMarkerReplicationMissing = Errorf("DeleteMarkerReplication must be specified")
errPriorityMissing = Errorf("Priority must be specified")
errInvalidDeleteMarkerReplicationStatus = Errorf("Delete marker replication is currently not supported")
errDestinationSourceIdentical = Errorf("Destination bucket cannot be the same as the source bucket.")
)
// validateID - checks if ID is valid or not.
func (r Rule) validateID() error {
// cannot be longer than 255 characters
if len(r.ID) > 255 {
return errInvalidRuleID
}
return nil
}
// validateStatus - checks if status is valid or not.
func (r Rule) validateStatus() error {
// Status can't be empty
if len(r.Status) == 0 {
return errEmptyRuleStatus
}
// Status must be one of Enabled or Disabled
if r.Status != Enabled && r.Status != Disabled {
return errInvalidRuleStatus
}
return nil
}
func (r Rule) validateFilter() error {
if err := r.Filter.Validate(); err != nil {
return err
}
return nil
}
// Prefix - a rule can either have prefix under <filter></filter> or under
// <filter><and></and></filter>. This method returns the prefix from the
// location where it is available
func (r Rule) Prefix() string {
if r.Filter.Prefix != "" {
return r.Filter.Prefix
}
return r.Filter.And.Prefix
}
// Tags - a rule can either have tag under <filter></filter> or under
// <filter><and></and></filter>. This method returns all the tags from the
// rule in the format tag1=value1&tag2=value2
func (r Rule) Tags() string {
if !r.Filter.Tag.IsEmpty() {
return r.Filter.Tag.String()
}
if len(r.Filter.And.Tags) != 0 {
var buf bytes.Buffer
for _, t := range r.Filter.And.Tags {
if buf.Len() > 0 {
buf.WriteString("&")
}
buf.WriteString(t.String())
}
return buf.String()
}
return ""
}
// Validate - validates the rule element
func (r Rule) Validate(bucket string, sameTarget bool) error {
if err := r.validateID(); err != nil {
return err
}
if err := r.validateStatus(); err != nil {
return err
}
if err := r.validateFilter(); err != nil {
return err
}
if err := r.DeleteMarkerReplication.Validate(); err != nil {
return err
}
if r.Priority <= 0 {
return errPriorityMissing
}
if r.Destination.Bucket == bucket && sameTarget {
return errDestinationSourceIdentical
}
return nil
}
| pkg/bucket/replication/rule.go | 1 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.8930094242095947,
0.05937255546450615,
0.00016773877723608166,
0.0019108024425804615,
0.2153179794549942
] |
{
"id": 3,
"code_window": [
"\n",
"// GetDestination returns destination bucket and storage class.\n",
"func (c Config) GetDestination() Destination {\n",
"\tfor _, rule := range c.Rules {\n",
"\t\tif rule.Status == Disabled {\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\treturn rule.Destination\n",
"\t}\n",
"\treturn Destination{}\n",
"}\n",
"\n",
"// Replicate returns true if the object should be replicated.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(c.Rules) > 0 {\n",
"\t\treturn c.Rules[0].Destination\n"
],
"file_path": "pkg/bucket/replication/replication.go",
"type": "replace",
"edit_start_line_idx": 147
} | # MinIO Server 配置指南 [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
## 配置目录
默认的配置目录是 `${HOME}/.minio`,你可以使用`--config-dir`命令行选项重写之。MinIO server在首次启动时会生成一个新的`config.json`,里面带有自动生成的访问凭据。
```sh
minio server --config-dir /etc/minio /data
```
截止到 MinIO `RELEASE.2018-08-02T23-11-36Z` 版本, MinIO server 的配置文件(`config.json`) 被存储在通过 `--config-dir` 指定的目录或者默认的 `${HOME}/.minio` 目录。 但是从 `RELEASE.2018-08-18T03-49-57Z` 版本之后, 配置文件 (仅仅), 已经被迁移到存储后端 (存储后端指的是启动一个服务器的时候,传递给MinIO server的目录)。
您可以使用`--config-dir`指定现有配置的位置, MinIO 会迁移 `config.json` 配置到你的存储后端。 迁移成功后,你当前 `--config-dir` 目录中的 `config.json` 将被重命名为 `config.json.deprecated`。 迁移后,所有现有配置都将得到保留。
此外,`--config-dir`现在是一个旧配置,计划在将来删除,因此请相应地更新本地startup和ansible脚本。
```sh
minio server /data
```
MinIO还使用管理员凭据对所有配置,IAM和策略内容进行加密。
### 证书目录
TLS证书存在``${HOME}/.minio/certs``目录下,你需要将证书放在该目录下来启用`HTTPS` 。如果你是一个乐学上进的好青年,这里有一本免费的秘籍传授一你: [如何使用TLS安全的访问minio](https://docs.min.io/cn/how-to-secure-access-to-minio-server-with-tls).
以下是一个具有TLS证书的MinIO server的目录结构。
```sh
$ mc tree --files ~/.minio
/home/user1/.minio
└─ certs
├─ CAs
├─ private.key
└─ public.crt
```
你可以使用`--certs-dir`命令行选项提供自定义certs目录。
#### 凭据
只能通过环境变量`MINIO_ACCESS_KEY` 和 `MINIO_SECRET_KEY` 更改MinIO的admin凭据和root凭据。使用这两个值的组合,MinIO加密存储在后端的配置
```
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio13
minio server /data
```
##### 使用新的凭据轮换加密
另外,如果您想更改管理员凭据,则MinIO将自动检测到该凭据,并使用新凭据重新加密,如下所示。一次只需要设置如下所示的环境变量即可轮换加密配置。
> 旧的环境变量永远不会在内存中被记住,并且在使用新凭据迁移现有内容后立即销毁。在服务器再次成功重启后,你可以安全的删除它们。
```
export MINIO_ACCESS_KEY=newminio
export MINIO_SECRET_KEY=newminio123
export MINIO_ACCESS_KEY_OLD=minio
export MINIO_SECRET_KEY_OLD=minio123
minio server /data
```
迁移完成后, 服务器会自动的取消进程空间中的`MINIO_ACCESS_KEY_OLD` and `MINIO_SECRET_KEY_OLD`设置。
> **注意: 在下一次服务重新启动前,要确保移除脚本或者服务文件中的 `MINIO_ACCESS_KEY_OLD` and `MINIO_SECRET_KEY_OLD`, 避免现有的内容被双重加密**
#### 区域
```
KEY:
region 服务器的物理位置标记
ARGS:
name (string) 服务器的物理位置名字,例如 "us-west-rack2"
comment (sentence) 为这个设置添加一个可选的注释
```
或者通过环境变量
```
KEY:
region 服务器的物理位置标记
ARGS:
MINIO_REGION_NAME (string) 服务器的物理位置名字,例如 "us-west-rack2"
MINIO_REGION_COMMENT (sentence) 为这个设置添加一个可选的注释
```
示例:
```sh
export MINIO_REGION_NAME="my_region"
minio server /data
```
### 存储类型
默认情况下,标准存储类型的奇偶校验值设置为N/2,低冗余的存储类型奇偶校验值设置为2。在[此处](https://github.com/minio/minio/blob/master/docs/zh_CN/erasure/storage-class/README.md)了解有关MinIO服务器存储类型的更多信息。
```
KEY:
storage_class 定义对象级冗余
ARGS:
standard (string) 设置默认标准存储类型的奇偶校验计数,例如"EC:4"
rrs (string) 设置默认低冗余存储类型的奇偶校验计数,例如"EC:2"
comment (sentence) 为这个设置添加一个可选的注释
```
或者通过环境变量
```
KEY:
storage_class 定义对象级冗余
ARGS:
MINIO_STORAGE_CLASS_STANDARD (string) 设置默认标准存储类型的奇偶校验计数,例如"EC:4"
MINIO_STORAGE_CLASS_RRS (string) 设置默认低冗余存储类型的奇偶校验计数,例如"EC:2"
MINIO_STORAGE_CLASS_COMMENT (sentence) 为这个设置添加一个可选的注释
```
### 缓存
MinIO为主要的网关部署提供了缓存存储层,使您可以缓存内容以实现更快的读取速度,并节省从云中重复下载的成本。
```
KEY:
cache 添加缓存存储层
ARGS:
drives* (csv) 逗号分隔的挂载点,例如 "/optane1,/optane2"
expiry (number) 缓存有效期限(天),例如 "90"
quota (number) 以百分比限制缓存驱动器的使用,例如 "90"
exclude (csv) 逗号分隔的通配符排除模式,例如 "bucket/*.tmp,*.exe"
after (number) 缓存对象之前的最小可访问次数
comment (sentence) 为这个设置添加一个可选的注释
```
或者通过环境变量
```
KEY:
cache 添加缓存存储层
ARGS:
MINIO_CACHE_DRIVES* (csv) 逗号分隔的挂载点,例如 "/optane1,/optane2"
MINIO_CACHE_EXPIRY (number) 缓存有效期限(天),例如 "90"
MINIO_CACHE_QUOTA (number) 以百分比限制缓存驱动器的使用,例如 "90"
MINIO_CACHE_EXCLUDE (csv) 逗号分隔的通配符排除模式,例如 "bucket/*.tmp,*.exe"
MINIO_CACHE_AFTER (number) 缓存对象之前的最小可访问次数
MINIO_CACHE_COMMENT (sentence) 为这个设置添加一个可选的注释
```
### 浏览器
|参数|类型|描述|
|:---|:---|:---|
|``browser``| _string_ | 开启或关闭浏览器访问,默认是开启的,你可以通过``MINIO_BROWSER``环境变量进行修改|
示例:
```sh
export MINIO_BROWSER=off
minio server /data
```
#### 通知
|参数|类型|描述|
|:---|:---|:---|
|``notify``| |通知通过以下方式开启存储桶事件通知,用于lambda计算|
|``notify.amqp``| |[通过AMQP发布MinIO事件](https://docs.min.io/cn/minio-bucket-notification-guide#AMQP)|
|``notify.mqtt``| |[通过MQTT发布MinIO事件](https://docs.min.io/cn/minio-bucket-notification-guide#MQTT)|
|``notify.elasticsearch``| |[通过Elasticsearch发布MinIO事件](https://docs.min.io/cn/minio-bucket-notification-guide#Elasticsearch)|
|``notify.redis``| |[通过Redis发布MinIO事件](https://docs.min.io/cn/minio-bucket-notification-guide#Redis)|
|``notify.nats``| |[通过NATS发布MinIO事件](https://docs.min.io/cn/minio-bucket-notification-guide#NATS)|
|``notify.postgresql``| |[通过PostgreSQL发布MinIO事件](https://docs.min.io/cn/minio-bucket-notification-guide#PostgreSQL)|
|``notify.kafka``| |[通过Apache Kafka发布MinIO事件](https://docs.min.io/cn/minio-bucket-notification-guide#apache-kafka)|
|``notify.webhook``| |[通过Webhooks发布MinIO事件](https://docs.min.io/cn/minio-bucket-notification-guide#webhooks)|
## 了解更多
* [MinIO Quickstart Guide](https://docs.min.io/cn/minio-quickstart-guide)
| docs/zh_CN/config/README.md | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.000696034578140825,
0.0001967713178601116,
0.00016264905571006238,
0.00016802811296656728,
0.0001211213821079582
] |
{
"id": 3,
"code_window": [
"\n",
"// GetDestination returns destination bucket and storage class.\n",
"func (c Config) GetDestination() Destination {\n",
"\tfor _, rule := range c.Rules {\n",
"\t\tif rule.Status == Disabled {\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\treturn rule.Destination\n",
"\t}\n",
"\treturn Destination{}\n",
"}\n",
"\n",
"// Replicate returns true if the object should be replicated.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(c.Rules) > 0 {\n",
"\t\treturn c.Rules[0].Destination\n"
],
"file_path": "pkg/bucket/replication/replication.go",
"type": "replace",
"edit_start_line_idx": 147
} | #!/usr/bin/env/env python3
import boto3
s3 = boto3.client('s3',
endpoint_url='http://localhost:9000',
aws_access_key_id='minio',
aws_secret_access_key='minio123',
region_name='us-east-1')
r = s3.select_object_content(
Bucket='mycsvbucket',
Key='sampledata/TotalPopulation.csv.gz',
ExpressionType='SQL',
Expression="select * from s3object s where s.Location like '%United States%'",
InputSerialization={
'CSV': {
"FileHeaderInfo": "USE",
},
'CompressionType': 'GZIP',
},
OutputSerialization={'CSV': {}},
)
for event in r['Payload']:
if 'Records' in event:
records = event['Records']['Payload'].decode('utf-8')
print(records)
elif 'Stats' in event:
statsDetails = event['Stats']['Details']
print("Stats details bytesScanned: ")
print(statsDetails['BytesScanned'])
print("Stats details bytesProcessed: ")
print(statsDetails['BytesProcessed'])
| docs/select/select.py | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00017544320144224912,
0.00017162293079309165,
0.00016582771786488593,
0.0001726104092085734,
0.000003645265451268642
] |
{
"id": 3,
"code_window": [
"\n",
"// GetDestination returns destination bucket and storage class.\n",
"func (c Config) GetDestination() Destination {\n",
"\tfor _, rule := range c.Rules {\n",
"\t\tif rule.Status == Disabled {\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\treturn rule.Destination\n",
"\t}\n",
"\treturn Destination{}\n",
"}\n",
"\n",
"// Replicate returns true if the object should be replicated.\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif len(c.Rules) > 0 {\n",
"\t\treturn c.Rules[0].Destination\n"
],
"file_path": "pkg/bucket/replication/replication.go",
"type": "replace",
"edit_start_line_idx": 147
} | # MinIO HDFS Gateway [](https://slack.minio.io)
MinIO HDFS gateway adds Amazon S3 API support to Hadoop HDFS filesystem. Applications can use both the S3 and file APIs concurrently without requiring any data migration. Since the gateway is stateless and shared-nothing, you may elastically provision as many MinIO instances as needed to distribute the load.
> NOTE: Intention of this gateway implementation it to make it easy to migrate your existing data on HDFS clusters to MinIO clusters using standard tools like `mc` or `aws-cli`, if the goal is to use HDFS perpetually we recommend that HDFS should be used directly for all write operations.
## Run MinIO Gateway for HDFS Storage
### Using Binary
Namenode information is obtained by reading `core-site.xml` automatically from your hadoop environment variables *$HADOOP_HOME*
```
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
minio gateway hdfs
```
You can also override the namenode endpoint as shown below.
```
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
minio gateway hdfs hdfs://namenode:8200
```
### Using Docker
Using docker is experimental, most Hadoop environments are not dockerized and may require additional steps in getting this to work properly. You are better off just using the binary in this situation.
```
docker run -p 9000:9000 \
--name hdfs-s3 \
-e "MINIO_ACCESS_KEY=minio" \
-e "MINIO_SECRET_KEY=minio123" \
minio/minio gateway hdfs hdfs://namenode:8200
```
## Test using MinIO Browser
*MinIO gateway* comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure that your server has started successfully.

## Test using MinIO Client `mc`
`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services.
### Configure `mc`
```
mc config host add myhdfs http://gateway-ip:9000 access_key secret_key
```
### List buckets on hdfs
```
mc ls myhdfs
[2017-02-22 01:50:43 PST] 0B user/
[2017-02-26 21:43:51 PST] 0B datasets/
[2017-02-26 22:10:11 PST] 0B assets/
```
### Known limitations
Gateway inherits the following limitations of HDFS storage layer:
- No bucket policy support (HDFS has no such concept)
- No bucket notification APIs are not supported (HDFS has no support for fsnotify)
- No server side encryption support (Intentionally not implemented)
- No server side compression support (Intentionally not implemented)
- Concurrent multipart operations are not supported (HDFS lacks safe locking support, or poorly implemented)
## Explore Further
- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide)
- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio)
- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide)
| docs/gateway/hdfs.md | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00018538633594289422,
0.00017048408335540444,
0.00015952327521517873,
0.00017024633416440338,
0.000007475638085452374
] |
{
"id": 4,
"code_window": [
"\t\treturn err\n",
"\t}\n",
"\tif err := r.DeleteMarkerReplication.Validate(); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\tif r.Priority <= 0 {\n",
"\t\treturn errPriorityMissing\n",
"\t}\n",
"\tif r.Destination.Bucket == bucket && sameTarget {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif r.Priority < 0 {\n"
],
"file_path": "pkg/bucket/replication/rule.go",
"type": "replace",
"edit_start_line_idx": 148
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"net/http"
"time"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/event"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
// gets replication config associated to a given bucket name.
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return nil, BucketReplicationConfigNotFound{Bucket: bucketName}
}
return globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
}
// validateReplicationDestination returns error if replication destination bucket missing or not configured
// It also returns true if replication destination is same as this server.
func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config) (bool, error) {
clnt := globalBucketTargetSys.GetReplicationTargetClient(ctx, rCfg.ReplicationArn)
if clnt == nil {
return false, BucketRemoteTargetNotFound{Bucket: bucket}
}
if found, _ := clnt.BucketExists(ctx, rCfg.GetDestination().Bucket); !found {
return false, BucketReplicationDestinationNotFound{Bucket: rCfg.GetDestination().Bucket}
}
if ret, err := globalBucketObjectLockSys.Get(bucket); err == nil {
if ret.LockEnabled {
lock, _, _, _, err := clnt.GetObjectLockConfig(ctx, rCfg.GetDestination().Bucket)
if err != nil || lock != "Enabled" {
return false, BucketReplicationDestinationMissingLock{Bucket: rCfg.GetDestination().Bucket}
}
}
}
// validate replication ARN against target endpoint
c, ok := globalBucketTargetSys.arnRemotesMap[rCfg.ReplicationArn]
if ok {
if c.EndpointURL().String() == clnt.EndpointURL().String() {
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
return sameTarget, nil
}
}
return false, BucketRemoteTargetNotFound{Bucket: bucket}
}
// mustReplicate returns true if object meets replication criteria.
func mustReplicate(ctx context.Context, r *http.Request, bucket, object string, meta map[string]string, replStatus string) bool {
if globalIsGateway {
return false
}
if rs, ok := meta[xhttp.AmzBucketReplicationStatus]; ok {
replStatus = rs
}
if replication.StatusType(replStatus) == replication.Replica {
return false
}
if s3Err := isPutActionAllowed(getRequestAuthType(r), bucket, object, r, iampolicy.GetReplicationConfigurationAction); s3Err != ErrNone {
return false
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
return false
}
opts := replication.ObjectOpts{
Name: object,
SSEC: crypto.SSEC.IsEncrypted(meta),
}
tagStr, ok := meta[xhttp.AmzObjectTagging]
if ok {
opts.UserTags = tagStr
}
return cfg.Replicate(opts)
}
func putReplicationOpts(dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
meta := make(map[string]string)
for k, v := range objInfo.UserDefined {
if k == xhttp.AmzBucketReplicationStatus {
continue
}
meta[k] = v
}
tag, err := tags.ParseObjectTags(objInfo.UserTags)
if err != nil {
return
}
putOpts = miniogo.PutObjectOptions{
UserMetadata: meta,
UserTags: tag.ToMap(),
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: dest.StorageClass,
ReplicationVersionID: objInfo.VersionID,
ReplicationStatus: miniogo.ReplicationStatusReplica,
ReplicationMTime: objInfo.ModTime,
}
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
rmode := miniogo.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
rdate, err := time.Parse(time.RFC3339, retainDateStr)
if err != nil {
return
}
putOpts.RetainUntilDate = rdate
}
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
}
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
putOpts.ServerSideEncryption = encrypt.NewSSE()
}
return
}
// replicateObject replicates the specified version of the object to destination bucket
// The source object is then updated to reflect the replication status.
func replicateObject(ctx context.Context, bucket, object, versionID string, objectAPI ObjectLayer, eventArg *eventArgs, healPending bool) {
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
return
}
tgt := globalBucketTargetSys.GetReplicationTargetClient(ctx, cfg.ReplicationArn)
if tgt == nil {
return
}
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
if err != nil {
return
}
defer gr.Close()
objInfo := gr.ObjInfo
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
return
}
dest := cfg.GetDestination()
if dest.Bucket == "" {
return
}
// In the rare event that replication is in pending state either due to
// server shut down/crash before replication completed or healing and PutObject
// race - do an additional stat to see if the version ID exists
if healPending {
_, err := tgt.StatObject(ctx, dest.Bucket, object, miniogo.StatObjectOptions{VersionID: objInfo.VersionID})
if err == nil {
// object with same VersionID already exists, replication kicked off by
// PutObject might have completed.
return
}
}
putOpts := putReplicationOpts(dest, objInfo)
replicationStatus := replication.Complete
_, err = tgt.PutObject(ctx, dest.Bucket, object, gr, size, "", "", putOpts)
if err != nil {
replicationStatus = replication.Failed
// Notify replication failure event.
if eventArg == nil {
eventArg = &eventArgs{
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
}
}
eventArg.EventName = event.OperationReplicationFailed
eventArg.Object.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
sendEvent(*eventArg)
}
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
if objInfo.UserTags != "" {
objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
}
objInfo.metadataOnly = true // Perform only metadata updates.
if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
VersionID: objInfo.VersionID,
}, ObjectOptions{VersionID: objInfo.VersionID}); err != nil {
logger.LogIf(ctx, err)
}
}
| cmd/bucket-replication.go | 1 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.004599123727530241,
0.001225910265929997,
0.00016477357712574303,
0.00047974579501897097,
0.0013545783003792167
] |
{
"id": 4,
"code_window": [
"\t\treturn err\n",
"\t}\n",
"\tif err := r.DeleteMarkerReplication.Validate(); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\tif r.Priority <= 0 {\n",
"\t\treturn errPriorityMissing\n",
"\t}\n",
"\tif r.Destination.Bucket == bucket && sameTarget {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif r.Priority < 0 {\n"
],
"file_path": "pkg/bucket/replication/rule.go",
"type": "replace",
"edit_start_line_idx": 148
} | /*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { connect } from "react-redux"
import { Modal, ModalHeader } from "react-bootstrap"
import * as actionsBuckets from "./actions"
import PolicyInput from "./PolicyInput"
import Policy from "./Policy"
export const BucketPolicyModal = ({ showBucketPolicy, currentBucket, hideBucketPolicy, policies }) => {
return (
<Modal className="modal-policy"
animation={ false }
show={ showBucketPolicy }
onHide={ hideBucketPolicy }
>
<ModalHeader>
Bucket Policy (
{ currentBucket })
<button className="close close-alt" onClick={ hideBucketPolicy }>
<span>×</span>
</button>
</ModalHeader>
<div className="pm-body">
<PolicyInput />
{ policies.map((policy, i) => <Policy key={ i } prefix={ policy.prefix } policy={ policy.policy } />
) }
</div>
</Modal>
)
}
const mapStateToProps = state => {
return {
currentBucket: state.buckets.currentBucket,
showBucketPolicy: state.buckets.showBucketPolicy,
policies: state.buckets.policies
}
}
const mapDispatchToProps = dispatch => {
return {
hideBucketPolicy: () => dispatch(actionsBuckets.hideBucketPolicy())
}
}
export default connect(mapStateToProps, mapDispatchToProps)(BucketPolicyModal) | browser/app/js/buckets/BucketPolicyModal.js | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00018552091205492616,
0.0001740939187584445,
0.00016726553440093994,
0.00017295009456574917,
0.000005376873104978586
] |
{
"id": 4,
"code_window": [
"\t\treturn err\n",
"\t}\n",
"\tif err := r.DeleteMarkerReplication.Validate(); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\tif r.Priority <= 0 {\n",
"\t\treturn errPriorityMissing\n",
"\t}\n",
"\tif r.Destination.Bucket == bucket && sameTarget {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif r.Priority < 0 {\n"
],
"file_path": "pkg/bucket/replication/rule.go",
"type": "replace",
"edit_start_line_idx": 148
} | /*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package json
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/minio/minio/pkg/s3select/sql"
)
func TestNewReader(t *testing.T) {
files, err := ioutil.ReadDir("testdata")
if err != nil {
t.Fatal(err)
}
for _, file := range files {
t.Run(file.Name(), func(t *testing.T) {
f, err := os.Open(filepath.Join("testdata", file.Name()))
if err != nil {
t.Fatal(err)
}
r := NewReader(f, &ReaderArgs{})
var record sql.Record
for {
record, err = r.Read(record)
if err != nil {
break
}
}
r.Close()
if err != io.EOF {
t.Fatalf("Reading failed with %s, %s", err, file.Name())
}
})
t.Run(file.Name()+"-close", func(t *testing.T) {
f, err := os.Open(filepath.Join("testdata", file.Name()))
if err != nil {
t.Fatal(err)
}
r := NewReader(f, &ReaderArgs{})
r.Close()
var record sql.Record
for {
record, err = r.Read(record)
if err != nil {
break
}
}
if err != io.EOF {
t.Fatalf("Reading failed with %s, %s", err, file.Name())
}
})
}
}
func BenchmarkReader(b *testing.B) {
files, err := ioutil.ReadDir("testdata")
if err != nil {
b.Fatal(err)
}
for _, file := range files {
b.Run(file.Name(), func(b *testing.B) {
f, err := ioutil.ReadFile(filepath.Join("testdata", file.Name()))
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(f)))
b.ReportAllocs()
b.ResetTimer()
var record sql.Record
for i := 0; i < b.N; i++ {
r := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{})
for {
record, err = r.Read(record)
if err != nil {
break
}
}
r.Close()
if err != io.EOF {
b.Fatalf("Reading failed with %s, %s", err, file.Name())
}
}
})
}
}
| pkg/s3select/json/reader_test.go | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.0029977532103657722,
0.0006661543156951666,
0.0001658118999330327,
0.000177308582351543,
0.0008861529640853405
] |
{
"id": 4,
"code_window": [
"\t\treturn err\n",
"\t}\n",
"\tif err := r.DeleteMarkerReplication.Validate(); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\tif r.Priority <= 0 {\n",
"\t\treturn errPriorityMissing\n",
"\t}\n",
"\tif r.Destination.Bucket == bucket && sameTarget {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif r.Priority < 0 {\n"
],
"file_path": "pkg/bucket/replication/rule.go",
"type": "replace",
"edit_start_line_idx": 148
} | /*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"os"
"path/filepath"
"testing"
)
// TestFSFormatFS - tests initFormatFS, formatMetaGetFormatBackendFS, formatFSGetVersion.
func TestFSFormatFS(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer os.RemoveAll(disk)
fsFormatPath := pathJoin(disk, minioMetaBucket, formatConfigFile)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
rlk, err := initFormatFS(context.Background(), disk)
if err != nil {
t.Fatal(err)
}
rlk.Close()
// Do the basic sanity checks to check if initFormatFS() did its job.
f, err := os.OpenFile(fsFormatPath, os.O_RDWR|os.O_SYNC, 0)
if err != nil {
t.Fatal(err)
}
defer f.Close()
format, err := formatMetaGetFormatBackendFS(f)
if err != nil {
t.Fatal(err)
}
if format != formatBackendFS {
t.Fatalf(`expected: %s, got: %s`, formatBackendFS, format)
}
version, err := formatFSGetVersion(f)
if err != nil {
t.Fatal(err)
}
if version != formatFSVersionV2 {
t.Fatalf(`expected: %s, got: %s`, formatFSVersionV2, version)
}
// Corrupt the format.json file and test the functions.
// formatMetaGetFormatBackendFS, formatFSGetVersion, initFormatFS should return errors.
if err = f.Truncate(0); err != nil {
t.Fatal(err)
}
if _, err = f.WriteString("b"); err != nil {
t.Fatal(err)
}
if _, err = formatMetaGetFormatBackendFS(f); err == nil {
t.Fatal("expected to fail")
}
if _, err = formatFSGetVersion(rlk); err == nil {
t.Fatal("expected to fail")
}
if _, err = initFormatFS(context.Background(), disk); err == nil {
t.Fatal("expected to fail")
}
// With unknown formatMetaV1.Version formatMetaGetFormatBackendFS, initFormatFS should return error.
if err = f.Truncate(0); err != nil {
t.Fatal(err)
}
// Here we set formatMetaV1.Version to "2"
if _, err = f.WriteString(`{"version":"2","format":"fs","fs":{"version":"1"}}`); err != nil {
t.Fatal(err)
}
if _, err = formatMetaGetFormatBackendFS(f); err == nil {
t.Fatal("expected to fail")
}
if _, err = initFormatFS(context.Background(), disk); err == nil {
t.Fatal("expected to fail")
}
}
| cmd/format-fs_test.go | 0 | https://github.com/minio/minio/commit/121164db56c18aee87af730e1e2f909af33bbd32 | [
0.00023286482610274106,
0.00017841764201875776,
0.00016100986977107823,
0.00017456681234762073,
0.000019538058040780015
] |
{
"id": 0,
"code_window": [
"\t\treturn err\n",
"\t}\n",
"\tnow := s.timeSource.Now()\n",
"\tstate.update(now)\n",
"\tstate.Bucket.Reconfigure(\n",
"\t\tavailableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,\n",
"\t\tnow, state.Consumption.RU,\n",
"\t)\n",
"\tif err := h.updateTenantState(state); err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tctx, tenantID, availableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/configure.go",
"type": "replace",
"edit_start_line_idx": 46
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
// Package tenanttokenbucket implements the tenant token bucket server-side
// algorithm described in the distributed token bucket RFC. It has minimal
// dependencies and is meant to be testable on its own.
package tenanttokenbucket
import (
"context"
"math"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
// State of the distributed token bucket.
type State struct {
// RUBurstLimit is the burst limit in RUs.
// TODO(radu): this is ignored for now.
RUBurstLimit float64
// RURefillRate is the refill rate in RUs/second.
RURefillRate float64
// RUCurrent is the available (burst) RUs.
RUCurrent float64
// CurrentShareSum is the sum of the last reported share value for
// each active SQL pod for the tenant.
CurrentShareSum float64
}
// fallbackRateTimeFrame is a time frame used to calculate a fallback rate.
//
// The fallback rate is used when the tenant can't get a TokenBucket request
// through. It is calculated so that all available (burst) RUs are used through
// this time period. We assume that this time frame is enough to react to and
// fix an infrastructure problem.
const fallbackRateTimeFrame = time.Hour
// Update accounts for passing of time, replenishing tokens according to the
// rate.
func (s *State) Update(since time.Duration) {
if since > 0 {
s.RUCurrent += s.RURefillRate * since.Seconds()
}
}
// Request processes a request for more tokens and updates the State
// accordingly.
func (s *State) Request(
ctx context.Context, req *roachpb.TokenBucketRequest,
) roachpb.TokenBucketResponse {
var res roachpb.TokenBucketResponse
// Calculate the fallback rate.
res.FallbackRate = s.RURefillRate
if s.RUCurrent > 0 {
res.FallbackRate += s.RUCurrent / fallbackRateTimeFrame.Seconds()
}
if log.V(1) {
log.Infof(ctx, "token bucket request (tenant=%d requested=%g current=%g)", req.TenantID, req.RequestedRU, s.RUCurrent)
}
needed := req.RequestedRU
if needed <= 0 {
return res
}
if s.RUCurrent >= needed {
s.RUCurrent -= needed
res.GrantedRU = needed
if log.V(1) {
log.Infof(ctx, "request granted (tenant=%d remaining=%g)", req.TenantID, s.RUCurrent)
}
return res
}
var grantedTokens float64
if s.RUCurrent > 0 {
grantedTokens = s.RUCurrent
needed -= s.RUCurrent
}
availableRate := s.RURefillRate
if debt := -s.RUCurrent; debt > 0 {
// We pre-distribute tokens over the next TargetRefillPeriod; any debt over
// that is a systematic error we need to account for.
debt -= req.TargetRequestPeriod.Seconds() * s.RURefillRate
if debt > 0 {
// Say that we want to pay the debt over the next RefillPeriod (but use at
// most 95% of the rate for the debt).
// TODO(radu): make this configurable?
debtRate := debt / req.TargetRequestPeriod.Seconds()
availableRate -= debtRate
availableRate = math.Max(availableRate, 0.05*s.RURefillRate)
}
}
// TODO(radu): support multiple instances by giving out only a share of the rate.
// Without this, all instances will get roughly equal rates even if they have
// different levels of load (in addition, we are heavily relying on the debt
// mechanism above).
allowedRate := availableRate
duration := time.Duration(float64(time.Second) * (needed / allowedRate))
if duration <= req.TargetRequestPeriod {
grantedTokens += needed
} else {
// We don't want to plan ahead for more than the target period; give out
// fewer tokens.
duration = req.TargetRequestPeriod
grantedTokens += allowedRate * duration.Seconds()
}
s.RUCurrent -= grantedTokens
res.GrantedRU = grantedTokens
res.TrickleDuration = duration
if log.V(1) {
log.Infof(ctx, "request granted over time (tenant=%d granted=%g trickle=%s)", req.TenantID, res.GrantedRU, res.TrickleDuration)
}
return res
}
// Reconfigure updates the settings for the token bucket.
//
// Arguments:
//
// - availableRU is the amount of Request Units that the tenant can consume at
// will. Also known as "burst RUs".
//
// - refillRate is the amount of Request Units per second that the tenant
// receives.
//
// - maxBurstRU is the maximum amount of Request Units that can be accumulated
// from the refill rate, or 0 if there is no limit.
//
// - asOf is a timestamp; the reconfiguration request is assumed to be based on
// the consumption at that time. This timestamp is used to compensate for any
// refill that would have happened in the meantime.
//
// - asOfConsumedRequestUnits is the total number of consumed RUs based on
// which the reconfiguration values were calculated (i.e. at the asOf time).
// It is used to adjust availableRU with the consumption that happened in the
// meantime.
//
// - now is the current time.
//
// - currentConsumedRequestUnits is the current total number of consumed RUs.
//
func (s *State) Reconfigure(
availableRU float64,
refillRate float64,
maxBurstRU float64,
asOf time.Time,
asOfConsumedRequestUnits float64,
now time.Time,
currentConsumedRequestUnits float64,
) {
// TODO(radu): adjust available RUs according to asOf and asOfConsumedUnits
// and add tests.
s.RUCurrent = availableRU
s.RURefillRate = refillRate
s.RUBurstLimit = maxBurstRU
}
| pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go | 1 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.003610140411183238,
0.0014218718279153109,
0.0001633275969652459,
0.0009832460200414062,
0.0012612093705683947
] |
{
"id": 0,
"code_window": [
"\t\treturn err\n",
"\t}\n",
"\tnow := s.timeSource.Now()\n",
"\tstate.update(now)\n",
"\tstate.Bucket.Reconfigure(\n",
"\t\tavailableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,\n",
"\t\tnow, state.Consumption.RU,\n",
"\t)\n",
"\tif err := h.updateTenantState(state); err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tctx, tenantID, availableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/configure.go",
"type": "replace",
"edit_start_line_idx": 46
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
"net/http"
"strings"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/cluster"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/test"
"github.com/cockroachdb/cockroach/pkg/roachprod/install"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/util/httputil"
)
// RunBuildInfo is a test that sanity checks the build info.
func RunBuildInfo(ctx context.Context, t test.Test, c cluster.Cluster) {
c.Put(ctx, t.Cockroach(), "./cockroach")
c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings())
var details serverpb.DetailsResponse
adminUIAddrs, err := c.ExternalAdminUIAddr(ctx, t.L(), c.Node(1))
if err != nil {
t.Fatal(err)
}
url := `http://` + adminUIAddrs[0] + `/_status/details/local`
err = httputil.GetJSON(http.Client{}, url, &details)
if err != nil {
t.Fatal(err)
}
bi := details.BuildInfo
testData := map[string]string{
"go_version": bi.GoVersion,
"tag": bi.Tag,
"time": bi.Time,
"revision": bi.Revision,
}
for key, val := range testData {
if val == "" {
t.Fatalf("build info not set for \"%s\"", key)
}
}
}
// RunBuildAnalyze performs static analysis on the built binary to
// ensure it's built as expected.
func RunBuildAnalyze(ctx context.Context, t test.Test, c cluster.Cluster) {
if c.IsLocal() {
// This test is linux-specific and needs to be able to install apt
// packages, so only run it on dedicated remote VMs.
t.Skip("local execution not supported")
}
c.Put(ctx, t.Cockroach(), "./cockroach")
// 1. Check for executable stack.
//
// Executable stack memory is a security risk (not a vulnerability
// in itself, but makes it easier to exploit other vulnerabilities).
// Whether or not the stack is executable is a property of the built
// executable, subject to some subtle heuristics. This test ensures
// that we're not hitting anything that causes our stacks to become
// executable.
//
// References:
// https://www.airs.com/blog/archives/518
// https://wiki.ubuntu.com/SecurityTeam/Roadmap/ExecutableStacks
// https://github.com/cockroachdb/cockroach/issues/37885
// There are several ways to do this analysis: `readelf -lW`,
// `scanelf -qe`, and `execstack -q`. `readelf` is part of binutils,
// so it's relatively ubiquitous, but we don't have it in the
// roachtest environment. Since we don't have anything preinstalled
// we can use, choose `scanelf` for being the simplest to use (empty
// output indicates everything's fine, non-empty means something
// bad).
c.Run(ctx, c.Node(1), "sudo apt-get update")
c.Run(ctx, c.Node(1), "sudo apt-get -qqy install pax-utils")
result, err := c.RunWithDetailsSingleNode(ctx, t.L(), c.Node(1), "scanelf -qe cockroach")
if err != nil {
t.Fatalf("scanelf failed: %s", err)
}
output := strings.TrimSpace(result.Stdout)
if len(output) > 0 {
t.Fatalf("scanelf returned non-empty output (executable stack): %s", output)
}
}
| pkg/cmd/roachtest/tests/build_info.go | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.0001798156154109165,
0.00017439544899389148,
0.000168746366398409,
0.00017468286387156695,
0.0000027189018965145806
] |
{
"id": 0,
"code_window": [
"\t\treturn err\n",
"\t}\n",
"\tnow := s.timeSource.Now()\n",
"\tstate.update(now)\n",
"\tstate.Bucket.Reconfigure(\n",
"\t\tavailableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,\n",
"\t\tnow, state.Consumption.RU,\n",
"\t)\n",
"\tif err := h.updateTenantState(state); err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tctx, tenantID, availableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/configure.go",
"type": "replace",
"edit_start_line_idx": 46
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { createSlice, PayloadAction } from "@reduxjs/toolkit";
import { cockroach } from "@cockroachlabs/crdb-protobuf-client";
import { DOMAIN_NAME, noopReducer } from "../utils";
import { ICancelQueryRequest, ICancelSessionRequest } from ".";
type CancelQueryResponse = cockroach.server.serverpb.CancelQueryResponse;
export type TerminateQueryState = {
data: CancelQueryResponse;
lastError: Error;
valid: boolean;
};
const initialState: TerminateQueryState = {
data: null,
lastError: null,
valid: true,
};
const terminateQuery = createSlice({
name: `${DOMAIN_NAME}/terminateQuery`,
initialState,
reducers: {
terminateSession: (
_state,
_action: PayloadAction<ICancelSessionRequest>,
) => {},
terminateSessionCompleted: noopReducer,
terminateSessionFailed: (_state, _action: PayloadAction<Error>) => {},
terminateQuery: (_state, _action: PayloadAction<ICancelQueryRequest>) => {},
terminateQueryCompleted: noopReducer,
terminateQueryFailed: (_state, _action: PayloadAction<Error>) => {},
},
});
export const { reducer, actions } = terminateQuery;
| pkg/ui/workspaces/cluster-ui/src/store/terminateQuery/terminateQuery.reducer.ts | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.00017953704809769988,
0.000173037318745628,
0.00016761693404987454,
0.0001737738202791661,
0.000004072935098520247
] |
{
"id": 0,
"code_window": [
"\t\treturn err\n",
"\t}\n",
"\tnow := s.timeSource.Now()\n",
"\tstate.update(now)\n",
"\tstate.Bucket.Reconfigure(\n",
"\t\tavailableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,\n",
"\t\tnow, state.Consumption.RU,\n",
"\t)\n",
"\tif err := h.updateTenantState(state); err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tctx, tenantID, availableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/configure.go",
"type": "replace",
"edit_start_line_idx": 46
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
module.exports = {
projects: ["jest.testing.config.js", "jest.lint.config.js"],
setupFilesAfterEnv: ["./enzyme.setup.js"],
};
| pkg/ui/workspaces/cluster-ui/jest.config.js | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.00017953704809769988,
0.00017738077440299094,
0.00017522451526019722,
0.00017738077440299094,
0.000002156266418751329
] |
{
"id": 1,
"code_window": [
"//\n",
"// - currentConsumedRequestUnits is the current total number of consumed RUs.\n",
"//\n",
"func (s *State) Reconfigure(\n",
"\tavailableRU float64,\n",
"\trefillRate float64,\n",
"\tmaxBurstRU float64,\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tctx context.Context,\n",
"\ttenantID roachpb.TenantID,\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go",
"type": "add",
"edit_start_line_idx": 156
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
// Package tenanttokenbucket implements the tenant token bucket server-side
// algorithm described in the distributed token bucket RFC. It has minimal
// dependencies and is meant to be testable on its own.
package tenanttokenbucket
import (
"context"
"math"
"time"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
// State of the distributed token bucket.
type State struct {
// RUBurstLimit is the burst limit in RUs.
// TODO(radu): this is ignored for now.
RUBurstLimit float64
// RURefillRate is the refill rate in RUs/second.
RURefillRate float64
// RUCurrent is the available (burst) RUs.
RUCurrent float64
// CurrentShareSum is the sum of the last reported share value for
// each active SQL pod for the tenant.
CurrentShareSum float64
}
// fallbackRateTimeFrame is a time frame used to calculate a fallback rate.
//
// The fallback rate is used when the tenant can't get a TokenBucket request
// through. It is calculated so that all available (burst) RUs are used through
// this time period. We assume that this time frame is enough to react to and
// fix an infrastructure problem.
const fallbackRateTimeFrame = time.Hour
// Update accounts for passing of time, replenishing tokens according to the
// rate.
func (s *State) Update(since time.Duration) {
if since > 0 {
s.RUCurrent += s.RURefillRate * since.Seconds()
}
}
// Request processes a request for more tokens and updates the State
// accordingly.
func (s *State) Request(
ctx context.Context, req *roachpb.TokenBucketRequest,
) roachpb.TokenBucketResponse {
var res roachpb.TokenBucketResponse
// Calculate the fallback rate.
res.FallbackRate = s.RURefillRate
if s.RUCurrent > 0 {
res.FallbackRate += s.RUCurrent / fallbackRateTimeFrame.Seconds()
}
if log.V(1) {
log.Infof(ctx, "token bucket request (tenant=%d requested=%g current=%g)", req.TenantID, req.RequestedRU, s.RUCurrent)
}
needed := req.RequestedRU
if needed <= 0 {
return res
}
if s.RUCurrent >= needed {
s.RUCurrent -= needed
res.GrantedRU = needed
if log.V(1) {
log.Infof(ctx, "request granted (tenant=%d remaining=%g)", req.TenantID, s.RUCurrent)
}
return res
}
var grantedTokens float64
if s.RUCurrent > 0 {
grantedTokens = s.RUCurrent
needed -= s.RUCurrent
}
availableRate := s.RURefillRate
if debt := -s.RUCurrent; debt > 0 {
// We pre-distribute tokens over the next TargetRefillPeriod; any debt over
// that is a systematic error we need to account for.
debt -= req.TargetRequestPeriod.Seconds() * s.RURefillRate
if debt > 0 {
// Say that we want to pay the debt over the next RefillPeriod (but use at
// most 95% of the rate for the debt).
// TODO(radu): make this configurable?
debtRate := debt / req.TargetRequestPeriod.Seconds()
availableRate -= debtRate
availableRate = math.Max(availableRate, 0.05*s.RURefillRate)
}
}
// TODO(radu): support multiple instances by giving out only a share of the rate.
// Without this, all instances will get roughly equal rates even if they have
// different levels of load (in addition, we are heavily relying on the debt
// mechanism above).
allowedRate := availableRate
duration := time.Duration(float64(time.Second) * (needed / allowedRate))
if duration <= req.TargetRequestPeriod {
grantedTokens += needed
} else {
// We don't want to plan ahead for more than the target period; give out
// fewer tokens.
duration = req.TargetRequestPeriod
grantedTokens += allowedRate * duration.Seconds()
}
s.RUCurrent -= grantedTokens
res.GrantedRU = grantedTokens
res.TrickleDuration = duration
if log.V(1) {
log.Infof(ctx, "request granted over time (tenant=%d granted=%g trickle=%s)", req.TenantID, res.GrantedRU, res.TrickleDuration)
}
return res
}
// Reconfigure updates the settings for the token bucket.
//
// Arguments:
//
// - availableRU is the amount of Request Units that the tenant can consume at
// will. Also known as "burst RUs".
//
// - refillRate is the amount of Request Units per second that the tenant
// receives.
//
// - maxBurstRU is the maximum amount of Request Units that can be accumulated
// from the refill rate, or 0 if there is no limit.
//
// - asOf is a timestamp; the reconfiguration request is assumed to be based on
// the consumption at that time. This timestamp is used to compensate for any
// refill that would have happened in the meantime.
//
// - asOfConsumedRequestUnits is the total number of consumed RUs based on
// which the reconfiguration values were calculated (i.e. at the asOf time).
// It is used to adjust availableRU with the consumption that happened in the
// meantime.
//
// - now is the current time.
//
// - currentConsumedRequestUnits is the current total number of consumed RUs.
//
func (s *State) Reconfigure(
availableRU float64,
refillRate float64,
maxBurstRU float64,
asOf time.Time,
asOfConsumedRequestUnits float64,
now time.Time,
currentConsumedRequestUnits float64,
) {
// TODO(radu): adjust available RUs according to asOf and asOfConsumedUnits
// and add tests.
s.RUCurrent = availableRU
s.RURefillRate = refillRate
s.RUBurstLimit = maxBurstRU
}
| pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go | 1 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.9979569911956787,
0.32648614048957825,
0.00017002665845211595,
0.06086511164903641,
0.4139411151409149
] |
{
"id": 1,
"code_window": [
"//\n",
"// - currentConsumedRequestUnits is the current total number of consumed RUs.\n",
"//\n",
"func (s *State) Reconfigure(\n",
"\tavailableRU float64,\n",
"\trefillRate float64,\n",
"\tmaxBurstRU float64,\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tctx context.Context,\n",
"\ttenantID roachpb.TenantID,\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go",
"type": "add",
"edit_start_line_idx": 156
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package bank_test
import (
"os"
"testing"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/security/securitytest"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
)
func TestMain(m *testing.M) {
security.SetAssetLoader(securitytest.EmbeddedAssets)
randutil.SeedForTests()
serverutils.InitTestServerFactory(server.TestServerFactory)
serverutils.InitTestClusterFactory(testcluster.TestClusterFactory)
os.Exit(m.Run())
}
//go:generate ../../util/leaktest/add-leaktest.sh *_test.go
| pkg/workload/bank/main_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.00017733851564116776,
0.0001721254229778424,
0.00016933628648985177,
0.0001709134376142174,
0.0000031116810532694217
] |
{
"id": 1,
"code_window": [
"//\n",
"// - currentConsumedRequestUnits is the current total number of consumed RUs.\n",
"//\n",
"func (s *State) Reconfigure(\n",
"\tavailableRU float64,\n",
"\trefillRate float64,\n",
"\tmaxBurstRU float64,\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tctx context.Context,\n",
"\ttenantID roachpb.TenantID,\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go",
"type": "add",
"edit_start_line_idx": 156
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import * as React from "react";
import classNames from "classnames/bind";
import styles from "./text.module.scss";
export interface TextProps {
textType?: TextTypes;
disabled?: boolean;
children: React.ReactNode;
className?: string;
noWrap?: boolean;
}
export enum TextTypes {
Heading1,
Heading2,
Heading3,
Heading4,
Heading5,
Heading6,
Body,
BodyStrong,
Caption,
CaptionStrong,
Code,
}
const getClassByTextType = (textType: TextTypes) => {
switch (textType) {
case TextTypes.Heading1:
return "text--heading-1";
case TextTypes.Heading2:
return "text--heading-2";
case TextTypes.Heading3:
return "text--heading-3";
case TextTypes.Heading4:
return "text--heading-4";
case TextTypes.Heading5:
return "text--heading-5";
case TextTypes.Heading6:
return "text--heading-6";
case TextTypes.Body:
return "text--body";
case TextTypes.BodyStrong:
return "text--body-strong";
case TextTypes.Caption:
return "text--caption";
case TextTypes.CaptionStrong:
return "text--caption-strong";
case TextTypes.Code:
return "text--code";
default:
return "text--body";
}
};
const cx = classNames.bind(styles);
const getElementByTextType = (textType: TextTypes) => {
switch (textType) {
case TextTypes.Heading1:
return "h1";
case TextTypes.Heading2:
return "h2";
case TextTypes.Heading3:
return "h3";
case TextTypes.Heading4:
return "h4";
case TextTypes.Heading5:
return "h5";
case TextTypes.Heading6:
return "h6";
case TextTypes.Body:
case TextTypes.BodyStrong:
case TextTypes.Caption:
case TextTypes.CaptionStrong:
case TextTypes.Code:
default:
return "span";
}
};
export function Text(props: TextProps) {
const { textType, disabled, noWrap, className } = props;
const textTypeClass = cx(
"text",
getClassByTextType(textType),
{
"text--disabled": disabled,
"text--no-wrap": noWrap,
},
className,
);
const elementName = getElementByTextType(textType);
const componentProps = {
className: textTypeClass,
};
return React.createElement(elementName, componentProps, props.children);
}
Text.defaultProps = {
textType: TextTypes.Body,
disabled: false,
className: "",
noWrap: false,
};
| pkg/ui/workspaces/cluster-ui/src/text/text.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.00017734493303578347,
0.00017270214448217303,
0.00016474891162943095,
0.00017291225958615541,
0.000003348627160448814
] |
{
"id": 1,
"code_window": [
"//\n",
"// - currentConsumedRequestUnits is the current total number of consumed RUs.\n",
"//\n",
"func (s *State) Reconfigure(\n",
"\tavailableRU float64,\n",
"\trefillRate float64,\n",
"\tmaxBurstRU float64,\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tctx context.Context,\n",
"\ttenantID roachpb.TenantID,\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go",
"type": "add",
"edit_start_line_idx": 156
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tracing_test
import (
"os"
"testing"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/security/securitytest"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
)
func TestMain(m *testing.M) {
security.SetAssetLoader(securitytest.EmbeddedAssets)
serverutils.InitTestServerFactory(server.TestServerFactory)
os.Exit(m.Run())
}
| pkg/util/tracing/main_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.00017739228496793658,
0.00017129439220298082,
0.00016769352077972144,
0.00016879737086128443,
0.0000043353466026019305
] |
{
"id": 2,
"code_window": [
"\ts.RUCurrent = availableRU\n",
"\ts.RURefillRate = refillRate\n",
"\ts.RUBurstLimit = maxBurstRU\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tlog.Infof(\n",
"\t\tctx, \"token bucket for tenant %s reconfigured: available=%g refill-rate=%g burst-limit=%g\",\n",
"\t\ttenantID.String(), s.RUCurrent, s.RURefillRate, s.RUBurstLimit,\n",
"\t)\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go",
"type": "add",
"edit_start_line_idx": 169
} | // Copyright 2021 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package tenantcostserver
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/errors"
)
// ReconfigureTokenBucket updates a tenant's token bucket settings. It is part
// of the TenantUsageServer interface; see that for more details.
func (s *instance) ReconfigureTokenBucket(
ctx context.Context,
txn *kv.Txn,
tenantID roachpb.TenantID,
availableRU float64,
refillRate float64,
maxBurstRU float64,
asOf time.Time,
asOfConsumedRequestUnits float64,
) error {
if err := s.checkTenantID(ctx, txn, tenantID); err != nil {
return err
}
h := makeSysTableHelper(ctx, s.executor, txn, tenantID)
state, err := h.readTenantState()
if err != nil {
return err
}
now := s.timeSource.Now()
state.update(now)
state.Bucket.Reconfigure(
availableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits,
now, state.Consumption.RU,
)
if err := h.updateTenantState(state); err != nil {
return err
}
return nil
}
// checkTenantID verifies that the tenant exists and is active.
func (s *instance) checkTenantID(
ctx context.Context, txn *kv.Txn, tenantID roachpb.TenantID,
) error {
row, err := s.executor.QueryRowEx(
ctx, "check-tenant", txn, sessiondata.NodeUserSessionDataOverride,
`SELECT active FROM system.tenants WHERE id = $1`, tenantID.ToUint64(),
)
if err != nil {
return err
}
if row == nil {
return pgerror.Newf(pgcode.UndefinedObject, "tenant %q does not exist", tenantID)
}
if active := *row[0].(*tree.DBool); !active {
return errors.Errorf("tenant %q is not active", tenantID)
}
return nil
}
| pkg/ccl/multitenantccl/tenantcostserver/configure.go | 1 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.036767374724149704,
0.005462902598083019,
0.0001690102944849059,
0.0007638296810910106,
0.011878320015966892
] |
{
"id": 2,
"code_window": [
"\ts.RUCurrent = availableRU\n",
"\ts.RURefillRate = refillRate\n",
"\ts.RUBurstLimit = maxBurstRU\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tlog.Infof(\n",
"\t\tctx, \"token bucket for tenant %s reconfigured: available=%g refill-rate=%g burst-limit=%g\",\n",
"\t\ttenantID.String(), s.RUCurrent, s.RURefillRate, s.RUBurstLimit,\n",
"\t)\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go",
"type": "add",
"edit_start_line_idx": 169
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage
import (
"bytes"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
)
// RowCounter is a helper that counts how many distinct rows appear in the KVs
// that is is shown via `Count`. Note: the `DataSize` field of the BulkOpSummary
// is *not* populated by this and should be set separately.
type RowCounter struct {
roachpb.BulkOpSummary
prev roachpb.Key
}
// Count examines each key passed to it and increments the running count when it
// sees a key that belongs to a new row.
func (r *RowCounter) Count(key roachpb.Key) error {
// EnsureSafeSplitKey is usually used to avoid splitting a row across ranges,
// by returning the row's key prefix.
// We reuse it here to count "rows" by counting when it changes.
// Non-SQL keys are returned unchanged or may error -- we ignore them, since
// non-SQL keys are obviously thus not SQL rows.
//
// TODO(ajwerner): provide a separate mechanism to determine whether the key
// is a valid SQL key which explicitly indicates whether the key is valid as
// a split key independent of an error. See #43423.
row, err := keys.EnsureSafeSplitKey(key)
if err != nil || len(key) == len(row) {
// TODO(ajwerner): Determine which errors should be ignored and only
// ignore those.
return nil //nolint:returnerrcheck
}
// no change key prefix => no new row.
if bytes.Equal(row, r.prev) {
return nil
}
r.prev = append(r.prev[:0], row...)
rem, _, err := keys.DecodeTenantPrefix(row)
if err != nil {
return err
}
_, tableID, indexID, err := keys.DecodeTableIDIndexID(rem)
if err != nil {
return err
}
if r.EntryCounts == nil {
r.EntryCounts = make(map[uint64]int64)
}
r.EntryCounts[roachpb.BulkOpSummaryID(uint64(tableID), uint64(indexID))]++
if indexID == 1 {
r.DeprecatedRows++
} else {
r.DeprecatedIndexEntries++
}
return nil
}
| pkg/storage/row_counter.go | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.000178179529029876,
0.00017210943042300642,
0.00016825857164803892,
0.0001709699718048796,
0.0000034169993341492955
] |
{
"id": 2,
"code_window": [
"\ts.RUCurrent = availableRU\n",
"\ts.RURefillRate = refillRate\n",
"\ts.RUBurstLimit = maxBurstRU\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tlog.Infof(\n",
"\t\tctx, \"token bucket for tenant %s reconfigured: available=%g refill-rate=%g burst-limit=%g\",\n",
"\t\ttenantID.String(), s.RUCurrent, s.RURefillRate, s.RUBurstLimit,\n",
"\t)\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go",
"type": "add",
"edit_start_line_idx": 169
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package opgen
import (
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
)
func init() {
opRegistry.register((*scpb.Column)(nil),
toPublic(
scpb.Status_ABSENT,
to(scpb.Status_DELETE_ONLY,
minPhase(scop.PreCommitPhase),
emit(func(this *scpb.Column) scop.Op {
return &scop.MakeAddedColumnDeleteOnly{
TableID: this.TableID,
ColumnID: this.ColumnID,
FamilyName: this.FamilyName,
FamilyID: this.FamilyID,
ColumnType: this.Type,
Nullable: this.Nullable,
DefaultExpr: this.DefaultExpr,
OnUpdateExpr: this.OnUpdateExpr,
Hidden: this.Hidden,
Inaccessible: this.Inaccessible,
GeneratedAsIdentityType: this.GeneratedAsIdentityType,
GeneratedAsIdentitySequenceOption: this.GeneratedAsIdentitySequenceOption,
UsesSequenceIDs: this.UsesSequenceIDs,
ComputerExpr: this.ComputerExpr,
PgAttributeNum: this.PgAttributeNum,
SystemColumnKind: this.SystemColumnKind,
Virtual: this.Virtual,
}
}),
emit(func(this *scpb.Column, ts scpb.TargetState) scop.Op {
return newLogEventOp(this, ts)
}),
),
to(scpb.Status_DELETE_AND_WRITE_ONLY,
minPhase(scop.PostCommitPhase),
emit(func(this *scpb.Column) scop.Op {
return &scop.MakeAddedColumnDeleteAndWriteOnly{
TableID: this.TableID,
ColumnID: this.ColumnID,
}
}),
),
to(scpb.Status_PUBLIC,
emit(func(this *scpb.Column) scop.Op {
return &scop.MakeColumnPublic{
TableID: this.TableID,
ColumnID: this.ColumnID,
}
}),
),
),
toAbsent(
scpb.Status_PUBLIC,
to(scpb.Status_DELETE_AND_WRITE_ONLY,
emit(func(this *scpb.Column) scop.Op {
return &scop.MakeDroppedColumnDeleteAndWriteOnly{
TableID: this.TableID,
ColumnID: this.ColumnID,
}
}),
emit(func(this *scpb.Column, ts scpb.TargetState) scop.Op {
return newLogEventOp(this, ts)
}),
),
to(scpb.Status_DELETE_ONLY,
minPhase(scop.PostCommitPhase),
revertible(false),
emit(func(this *scpb.Column) scop.Op {
return &scop.MakeDroppedColumnDeleteOnly{
TableID: this.TableID,
ColumnID: this.ColumnID,
}
}),
),
to(scpb.Status_ABSENT,
emit(func(this *scpb.Column) scop.Op {
return &scop.MakeColumnAbsent{
TableID: this.TableID,
ColumnID: this.ColumnID,
}
}),
),
),
)
}
| pkg/sql/schemachanger/scplan/internal/opgen/opgen_column.go | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.00017828098498284817,
0.00016968925774563104,
0.00016330438666045666,
0.00016846567450556904,
0.0000044365251596900634
] |
{
"id": 2,
"code_window": [
"\ts.RUCurrent = availableRU\n",
"\ts.RURefillRate = refillRate\n",
"\ts.RUBurstLimit = maxBurstRU\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tlog.Infof(\n",
"\t\tctx, \"token bucket for tenant %s reconfigured: available=%g refill-rate=%g burst-limit=%g\",\n",
"\t\ttenantID.String(), s.RUCurrent, s.RURefillRate, s.RUBurstLimit,\n",
"\t)\n"
],
"file_path": "pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket/tenant_token_bucket.go",
"type": "add",
"edit_start_line_idx": 169
} | {6ba7b810-9dad-11d1-80b4-00c04fd430c8}f | pkg/util/uuid/testdata/corpus/seed_invalid_5 | 0 | https://github.com/cockroachdb/cockroach/commit/486df176ad8a89c62035b2c96d47263e720faba6 | [
0.00016795545525383204,
0.00016795545525383204,
0.00016795545525383204,
0.00016795545525383204,
0
] |
{
"id": 0,
"code_window": [
"\t})\n",
"}\n",
"\n",
"func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, \"latest\", \"vaulttestdb\")\n",
"\tdefer cleanup()\n",
"\n",
"\tuc := userCreator(func(t *testing.T, username, password string) {\n",
"\t\ttestCreateDBUser(t, connURL, \"vaulttestdb\", username, password)\n",
"\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, \"5.0.10\", \"vaulttestdb\")\n"
],
"file_path": "builtin/logical/database/rotation_test.go",
"type": "replace",
"edit_start_line_idx": 710
} | package mongodb
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/helper/testhelpers/certhelpers"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }`
func TestMongoDB_Initialize(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
config := map[string]interface{}{
"connection_url": connURL,
}
// Make a copy since the original map could be modified by the Initialize call
expectedConfig := copyConfig(config)
req := dbplugin.InitializeRequest{
Config: config,
VerifyConnection: true,
}
resp := dbtesting.AssertInitialize(t, db, req)
if !reflect.DeepEqual(resp.Config, expectedConfig) {
t.Fatalf("Actual config: %#v\nExpected config: %#v", resp.Config, expectedConfig)
}
if !db.Initialized {
t.Fatal("Database should be initialized")
}
}
func TestNewUser_usernameTemplate(t *testing.T) {
type testCase struct {
usernameTemplate string
newUserReq dbplugin.NewUserRequest
expectedUsernameRegex string
}
tests := map[string]testCase{
"default username template": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"default username template with invalid chars": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "a.bad.account",
RoleName: "a.bad.role",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-a-bad-account-a-bad-role-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"custom username template": {
usernameTemplate: "{{random 2 | uppercase}}_{{unix_time}}_{{.RoleName | uppercase}}_{{.DisplayName | uppercase}}",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"username_template": test.usernameTemplate,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
ctx := context.Background()
newUserResp, err := db.NewUser(ctx, test.newUserReq)
require.NoError(t, err)
require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username)
assertCredsExist(t, newUserResp.Username, test.newUserReq.Password, connURL)
})
}
}
func TestMongoDB_CreateUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_CreateUser_writeConcern(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"write_concern": `{ "wmode": "majority", "wtimeout": 5000 }`,
},
VerifyConnection: true,
}
db := new()
defer dbtesting.AssertClose(t, db)
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_DeleteUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
// Test default revocation statement
delReq := dbplugin.DeleteUserRequest{
Username: createResp.Username,
}
dbtesting.AssertDeleteUser(t, db, delReq)
assertCredsDoNotExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_UpdateUser_Password(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
// The docker test method PrepareTestContainer defaults to a database "test"
// if none is provided
connURL = connURL + "/test"
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
// create the database user in advance, and test the connection
dbUser := "testmongouser"
startingPassword := "password"
createDBUser(t, connURL, "test", dbUser, startingPassword)
newPassword := "myreallysecurecredentials"
updateReq := dbplugin.UpdateUserRequest{
Username: dbUser,
Password: &dbplugin.ChangePassword{
NewPassword: newPassword,
},
}
dbtesting.AssertUpdateUser(t, db, updateReq)
assertCredsExist(t, dbUser, newPassword, connURL)
}
func TestGetTLSAuth(t *testing.T) {
ca := certhelpers.NewCert(t,
certhelpers.CommonName("certificate authority"),
certhelpers.IsCA(true),
certhelpers.SelfSign(),
)
cert := certhelpers.NewCert(t,
certhelpers.CommonName("test cert"),
certhelpers.Parent(ca),
)
type testCase struct {
username string
tlsCAData []byte
tlsKeyData []byte
expectOpts *options.ClientOptions
expectErr bool
}
tests := map[string]testCase{
"no TLS data set": {
expectOpts: nil,
expectErr: false,
},
"bad CA": {
tlsCAData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"bad key": {
tlsKeyData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"good ca": {
tlsCAData: cert.Pem,
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
RootCAs: appendToCertPool(t, x509.NewCertPool(), cert.Pem),
},
),
expectErr: false,
},
"good key": {
username: "unittest",
tlsKeyData: cert.CombinedPEM(),
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
Certificates: []tls.Certificate{cert.TLSCert},
},
).
SetAuth(options.Credential{
AuthMechanism: "MONGODB-X509",
Username: "unittest",
}),
expectErr: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
c := new()
c.Username = test.username
c.TLSCAData = test.tlsCAData
c.TLSCertificateKeyData = test.tlsKeyData
actual, err := c.getTLSAuth()
if test.expectErr && err == nil {
t.Fatalf("err expected, got nil")
}
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
assertDeepEqual(t, test.expectOpts, actual)
})
}
}
func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.CertPool {
t.Helper()
ok := pool.AppendCertsFromPEM(caPem)
if !ok {
t.Fatalf("Unable to append cert to cert pool")
}
return pool
}
var cmpClientOptionsOpts = cmp.Options{
cmp.AllowUnexported(options.ClientOptions{}),
cmp.AllowUnexported(tls.Config{}),
cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}),
// 'lazyCerts' has a func field which can't be compared.
cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
cmp.AllowUnexported(x509.CertPool{}),
}
// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16.
// See: https://github.com/golang/go/issues/45891
func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) {
t.Helper()
if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}
func createDBUser(t testing.TB, connURL, db, username, password string) {
t.Helper()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatal(err)
}
createUserCmd := &createUserCommand{
Username: username,
Password: password,
Roles: []interface{}{},
}
result := client.Database(db).RunCommand(ctx, createUserCmd, nil)
if result.Err() != nil {
t.Fatalf("failed to create user in mongodb: %s", result.Err())
}
assertCredsExist(t, username, password, connURL)
}
func assertCredsExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatalf("Failed to connect to mongo: %s", err)
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
t.Fatalf("Failed to ping mongo with user %q: %s", username, err)
}
}
func assertCredsDoNotExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
return // Creds don't exist as expected
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
return // Creds don't exist as expected
}
t.Fatalf("User %q exists and was able to authenticate", username)
}
func copyConfig(config map[string]interface{}) map[string]interface{} {
newConfig := map[string]interface{}{}
for k, v := range config {
newConfig[k] = v
}
return newConfig
}
| plugins/database/mongodb/mongodb_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9961909055709839,
0.29806721210479736,
0.00016724670422263443,
0.0005325002712197602,
0.4222652316093445
] |
{
"id": 0,
"code_window": [
"\t})\n",
"}\n",
"\n",
"func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, \"latest\", \"vaulttestdb\")\n",
"\tdefer cleanup()\n",
"\n",
"\tuc := userCreator(func(t *testing.T, username, password string) {\n",
"\t\ttestCreateDBUser(t, connURL, \"vaulttestdb\", username, password)\n",
"\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, \"5.0.10\", \"vaulttestdb\")\n"
],
"file_path": "builtin/logical/database/rotation_test.go",
"type": "replace",
"edit_start_line_idx": 710
} | package cache
import (
"bufio"
"bytes"
"context"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-secure-stdlib/base62"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agent/cache/cacheboltdb"
"github.com/hashicorp/vault/command/agent/cache/cachememdb"
"github.com/hashicorp/vault/helper/namespace"
nshelper "github.com/hashicorp/vault/helper/namespace"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/cryptoutil"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/locksutil"
"github.com/hashicorp/vault/sdk/logical"
gocache "github.com/patrickmn/go-cache"
"go.uber.org/atomic"
)
const (
vaultPathTokenCreate = "/v1/auth/token/create"
vaultPathTokenRevoke = "/v1/auth/token/revoke"
vaultPathTokenRevokeSelf = "/v1/auth/token/revoke-self"
vaultPathTokenRevokeAccessor = "/v1/auth/token/revoke-accessor"
vaultPathTokenRevokeOrphan = "/v1/auth/token/revoke-orphan"
vaultPathTokenLookup = "/v1/auth/token/lookup"
vaultPathTokenLookupSelf = "/v1/auth/token/lookup-self"
vaultPathTokenRenew = "/v1/auth/token/renew"
vaultPathTokenRenewSelf = "/v1/auth/token/renew-self"
vaultPathLeaseRevoke = "/v1/sys/leases/revoke"
vaultPathLeaseRevokeForce = "/v1/sys/leases/revoke-force"
vaultPathLeaseRevokePrefix = "/v1/sys/leases/revoke-prefix"
)
var (
contextIndexID = contextIndex{}
errInvalidType = errors.New("invalid type provided")
revocationPaths = []string{
strings.TrimPrefix(vaultPathTokenRevoke, "/v1"),
strings.TrimPrefix(vaultPathTokenRevokeSelf, "/v1"),
strings.TrimPrefix(vaultPathTokenRevokeAccessor, "/v1"),
strings.TrimPrefix(vaultPathTokenRevokeOrphan, "/v1"),
strings.TrimPrefix(vaultPathLeaseRevoke, "/v1"),
strings.TrimPrefix(vaultPathLeaseRevokeForce, "/v1"),
strings.TrimPrefix(vaultPathLeaseRevokePrefix, "/v1"),
}
)
type contextIndex struct{}
type cacheClearRequest struct {
Type string `json:"type"`
Value string `json:"value"`
Namespace string `json:"namespace"`
}
// LeaseCache is an implementation of Proxier that handles
// the caching of responses. It passes the incoming request
// to an underlying Proxier implementation.
type LeaseCache struct {
client *api.Client
proxier Proxier
logger hclog.Logger
db *cachememdb.CacheMemDB
baseCtxInfo *cachememdb.ContextInfo
l *sync.RWMutex
// idLocks is used during cache lookup to ensure that identical requests made
// in parallel won't trigger multiple renewal goroutines.
idLocks []*locksutil.LockEntry
// inflightCache keeps track of inflight requests
inflightCache *gocache.Cache
// ps is the persistent storage for tokens and leases
ps *cacheboltdb.BoltStorage
// shuttingDown is used to determine if cache needs to be evicted or not
// when the context is cancelled
shuttingDown atomic.Bool
}
// LeaseCacheConfig is the configuration for initializing a new
// Lease.
type LeaseCacheConfig struct {
Client *api.Client
BaseContext context.Context
Proxier Proxier
Logger hclog.Logger
Storage *cacheboltdb.BoltStorage
}
type inflightRequest struct {
// ch is closed by the request that ends up processing the set of
// parallel request
ch chan struct{}
// remaining is the number of remaining inflight request that needs to
// be processed before this object can be cleaned up
remaining *atomic.Uint64
}
func newInflightRequest() *inflightRequest {
return &inflightRequest{
ch: make(chan struct{}),
remaining: atomic.NewUint64(0),
}
}
// NewLeaseCache creates a new instance of a LeaseCache.
func NewLeaseCache(conf *LeaseCacheConfig) (*LeaseCache, error) {
if conf == nil {
return nil, errors.New("nil configuration provided")
}
if conf.Proxier == nil || conf.Logger == nil {
return nil, fmt.Errorf("missing configuration required params: %v", conf)
}
if conf.Client == nil {
return nil, fmt.Errorf("nil API client")
}
db, err := cachememdb.New()
if err != nil {
return nil, err
}
// Create a base context for the lease cache layer
baseCtxInfo := cachememdb.NewContextInfo(conf.BaseContext)
return &LeaseCache{
client: conf.Client,
proxier: conf.Proxier,
logger: conf.Logger,
db: db,
baseCtxInfo: baseCtxInfo,
l: &sync.RWMutex{},
idLocks: locksutil.CreateLocks(),
inflightCache: gocache.New(gocache.NoExpiration, gocache.NoExpiration),
ps: conf.Storage,
}, nil
}
// SetShuttingDown is a setter for the shuttingDown field
func (c *LeaseCache) SetShuttingDown(in bool) {
c.shuttingDown.Store(in)
}
// SetPersistentStorage is a setter for the persistent storage field in
// LeaseCache
func (c *LeaseCache) SetPersistentStorage(storageIn *cacheboltdb.BoltStorage) {
c.ps = storageIn
}
// checkCacheForRequest checks the cache for a particular request based on its
// computed ID. It returns a non-nil *SendResponse if an entry is found.
func (c *LeaseCache) checkCacheForRequest(id string) (*SendResponse, error) {
index, err := c.db.Get(cachememdb.IndexNameID, id)
if err != nil {
return nil, err
}
if index == nil {
return nil, nil
}
// Cached request is found, deserialize the response
reader := bufio.NewReader(bytes.NewReader(index.Response))
resp, err := http.ReadResponse(reader, nil)
if err != nil {
c.logger.Error("failed to deserialize response", "error", err)
return nil, err
}
sendResp, err := NewSendResponse(&api.Response{Response: resp}, index.Response)
if err != nil {
c.logger.Error("failed to create new send response", "error", err)
return nil, err
}
sendResp.CacheMeta.Hit = true
respTime, err := http.ParseTime(resp.Header.Get("Date"))
if err != nil {
c.logger.Error("failed to parse cached response date", "error", err)
return nil, err
}
sendResp.CacheMeta.Age = time.Now().Sub(respTime)
return sendResp, nil
}
// Send performs a cache lookup on the incoming request. If it's a cache hit,
// it will return the cached response, otherwise it will delegate to the
// underlying Proxier and cache the received response.
func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) {
// Compute the index ID
id, err := computeIndexID(req)
if err != nil {
c.logger.Error("failed to compute cache key", "error", err)
return nil, err
}
// Check the inflight cache to see if there are other inflight requests
// of the same kind, based on the computed ID. If so, we increment a counter
var inflight *inflightRequest
defer func() {
// Cleanup on the cache if there are no remaining inflight requests.
// This is the last step, so we defer the call first
if inflight != nil && inflight.remaining.Load() == 0 {
c.inflightCache.Delete(id)
}
}()
idLock := locksutil.LockForKey(c.idLocks, id)
// Briefly grab an ID-based lock in here to emulate a load-or-store behavior
// and prevent concurrent cacheable requests from being proxied twice if
// they both miss the cache due to it being clean when peeking the cache
// entry.
idLock.Lock()
inflightRaw, found := c.inflightCache.Get(id)
if found {
idLock.Unlock()
inflight = inflightRaw.(*inflightRequest)
inflight.remaining.Inc()
defer inflight.remaining.Dec()
// If found it means that there's an inflight request being processed.
// We wait until that's finished before proceeding further.
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-inflight.ch:
}
} else {
inflight = newInflightRequest()
inflight.remaining.Inc()
defer inflight.remaining.Dec()
c.inflightCache.Set(id, inflight, gocache.NoExpiration)
idLock.Unlock()
// Signal that the processing request is done
defer close(inflight.ch)
}
// Check if the response for this request is already in the cache
cachedResp, err := c.checkCacheForRequest(id)
if err != nil {
return nil, err
}
if cachedResp != nil {
c.logger.Debug("returning cached response", "path", req.Request.URL.Path)
return cachedResp, nil
}
c.logger.Debug("forwarding request", "method", req.Request.Method, "path", req.Request.URL.Path)
// Pass the request down and get a response
resp, err := c.proxier.Send(ctx, req)
if err != nil {
return resp, err
}
// If this is a non-2xx or if the returned response does not contain JSON payload,
// we skip caching
if resp.Response.StatusCode >= 300 || resp.Response.Header.Get("Content-Type") != "application/json" {
return resp, err
}
// Get the namespace from the request header
namespace := req.Request.Header.Get(consts.NamespaceHeaderName)
// We need to populate an empty value since go-memdb will skip over indexes
// that contain empty values.
if namespace == "" {
namespace = "root/"
}
// Build the index to cache based on the response received
index := &cachememdb.Index{
ID: id,
Namespace: namespace,
RequestPath: req.Request.URL.Path,
LastRenewed: time.Now().UTC(),
}
secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody))
if err != nil {
c.logger.Error("failed to parse response as secret", "error", err)
return nil, err
}
isRevocation, err := c.handleRevocationRequest(ctx, req, resp)
if err != nil {
c.logger.Error("failed to process the response", "error", err)
return nil, err
}
// If this is a revocation request, do not go through cache logic.
if isRevocation {
return resp, nil
}
// Fast path for responses with no secrets
if secret == nil {
c.logger.Debug("pass-through response; no secret in response", "method", req.Request.Method, "path", req.Request.URL.Path)
return resp, nil
}
// Short-circuit if the secret is not renewable
tokenRenewable, err := secret.TokenIsRenewable()
if err != nil {
c.logger.Error("failed to parse renewable param", "error", err)
return nil, err
}
if !secret.Renewable && !tokenRenewable {
c.logger.Debug("pass-through response; secret not renewable", "method", req.Request.Method, "path", req.Request.URL.Path)
return resp, nil
}
var renewCtxInfo *cachememdb.ContextInfo
switch {
case secret.LeaseID != "":
c.logger.Debug("processing lease response", "method", req.Request.Method, "path", req.Request.URL.Path)
entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token)
if err != nil {
return nil, err
}
// If the lease belongs to a token that is not managed by the agent,
// return the response without caching it.
if entry == nil {
c.logger.Debug("pass-through lease response; token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path)
return resp, nil
}
// Derive a context for renewal using the token's context
renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx)
index.Lease = secret.LeaseID
index.LeaseToken = req.Token
index.Type = cacheboltdb.LeaseType
case secret.Auth != nil:
c.logger.Debug("processing auth response", "method", req.Request.Method, "path", req.Request.URL.Path)
// Check if this token creation request resulted in a non-orphan token, and if so
// correctly set the parentCtx to the request's token context.
var parentCtx context.Context
if !secret.Auth.Orphan {
entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token)
if err != nil {
return nil, err
}
// If parent token is not managed by the agent, child shouldn't be
// either.
if entry == nil {
c.logger.Debug("pass-through auth response; parent token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path)
return resp, nil
}
c.logger.Debug("setting parent context", "method", req.Request.Method, "path", req.Request.URL.Path)
parentCtx = entry.RenewCtxInfo.Ctx
index.TokenParent = req.Token
}
renewCtxInfo = c.createCtxInfo(parentCtx)
index.Token = secret.Auth.ClientToken
index.TokenAccessor = secret.Auth.Accessor
index.Type = cacheboltdb.LeaseType
default:
// We shouldn't be hitting this, but will err on the side of caution and
// simply proxy.
c.logger.Debug("pass-through response; secret without lease and token", "method", req.Request.Method, "path", req.Request.URL.Path)
return resp, nil
}
// Serialize the response to store it in the cached index
var respBytes bytes.Buffer
err = resp.Response.Write(&respBytes)
if err != nil {
c.logger.Error("failed to serialize response", "error", err)
return nil, err
}
// Reset the response body for upper layers to read
if resp.Response.Body != nil {
resp.Response.Body.Close()
}
resp.Response.Body = ioutil.NopCloser(bytes.NewReader(resp.ResponseBody))
// Set the index's Response
index.Response = respBytes.Bytes()
// Store the index ID in the lifetimewatcher context
renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID)
// Store the lifetime watcher context in the index
index.RenewCtxInfo = &cachememdb.ContextInfo{
Ctx: renewCtx,
CancelFunc: renewCtxInfo.CancelFunc,
DoneCh: renewCtxInfo.DoneCh,
}
// Add extra information necessary for restoring from persisted cache
index.RequestMethod = req.Request.Method
index.RequestToken = req.Token
index.RequestHeader = req.Request.Header
// Store the index in the cache
c.logger.Debug("storing response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path)
err = c.Set(ctx, index)
if err != nil {
c.logger.Error("failed to cache the proxied response", "error", err)
return nil, err
}
// Start renewing the secret in the response
go c.startRenewing(renewCtx, index, req, secret)
return resp, nil
}
func (c *LeaseCache) createCtxInfo(ctx context.Context) *cachememdb.ContextInfo {
if ctx == nil {
c.l.RLock()
ctx = c.baseCtxInfo.Ctx
c.l.RUnlock()
}
return cachememdb.NewContextInfo(ctx)
}
func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) {
defer func() {
id := ctx.Value(contextIndexID).(string)
if c.shuttingDown.Load() {
c.logger.Trace("not evicting index from cache during shutdown", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path)
return
}
c.logger.Debug("evicting index from cache", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path)
err := c.Evict(index)
if err != nil {
c.logger.Error("failed to evict index", "id", id, "error", err)
return
}
}()
client, err := c.client.Clone()
if err != nil {
c.logger.Error("failed to create API client in the lifetime watcher", "error", err)
return
}
client.SetToken(req.Token)
client.SetHeaders(req.Request.Header)
watcher, err := client.NewLifetimeWatcher(&api.LifetimeWatcherInput{
Secret: secret,
})
if err != nil {
c.logger.Error("failed to create secret lifetime watcher", "error", err)
return
}
c.logger.Debug("initiating renewal", "method", req.Request.Method, "path", req.Request.URL.Path)
go watcher.Start()
defer watcher.Stop()
for {
select {
case <-ctx.Done():
// This is the case which captures context cancellations from token
// and leases. Since all the contexts are derived from the agent's
// context, this will also cover the shutdown scenario.
c.logger.Debug("context cancelled; stopping lifetime watcher", "path", req.Request.URL.Path)
return
case err := <-watcher.DoneCh():
// This case covers renewal completion and renewal errors
if err != nil {
c.logger.Error("failed to renew secret", "error", err)
return
}
c.logger.Debug("renewal halted; evicting from cache", "path", req.Request.URL.Path)
return
case <-watcher.RenewCh():
c.logger.Debug("secret renewed", "path", req.Request.URL.Path)
if c.ps != nil {
if err := c.updateLastRenewed(ctx, index, time.Now().UTC()); err != nil {
c.logger.Warn("not able to update lastRenewed time for cached index", "id", index.ID)
}
}
case <-index.RenewCtxInfo.DoneCh:
// This case indicates the renewal process to shutdown and evict
// the cache entry. This is triggered when a specific secret
// renewal needs to be killed without affecting any of the derived
// context renewals.
c.logger.Debug("done channel closed")
return
}
}
}
func (c *LeaseCache) updateLastRenewed(ctx context.Context, index *cachememdb.Index, t time.Time) error {
idLock := locksutil.LockForKey(c.idLocks, index.ID)
idLock.Lock()
defer idLock.Unlock()
getIndex, err := c.db.Get(cachememdb.IndexNameID, index.ID)
if err != nil {
return err
}
index.LastRenewed = t
if err := c.Set(ctx, getIndex); err != nil {
return err
}
return nil
}
// computeIndexID results in a value that uniquely identifies a request
// received by the agent. It does so by SHA256 hashing the serialized request
// object containing the request path, query parameters and body parameters.
func computeIndexID(req *SendRequest) (string, error) {
var b bytes.Buffer
cloned := req.Request.Clone(context.Background())
cloned.Header.Del(vaulthttp.VaultIndexHeaderName)
cloned.Header.Del(vaulthttp.VaultForwardHeaderName)
cloned.Header.Del(vaulthttp.VaultInconsistentHeaderName)
// Serialize the request
if err := cloned.Write(&b); err != nil {
return "", fmt.Errorf("failed to serialize request: %v", err)
}
// Reset the request body after it has been closed by Write
req.Request.Body = ioutil.NopCloser(bytes.NewReader(req.RequestBody))
// Append req.Token into the byte slice. This is needed since auto-auth'ed
// requests sets the token directly into SendRequest.Token
if _, err := b.Write([]byte(req.Token)); err != nil {
return "", fmt.Errorf("failed to write token to hash input: %w", err)
}
return hex.EncodeToString(cryptoutil.Blake2b256Hash(string(b.Bytes()))), nil
}
// HandleCacheClear returns a handlerFunc that can perform cache clearing operations.
func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Only handle POST/PUT requests
switch r.Method {
case http.MethodPost:
case http.MethodPut:
default:
return
}
req := new(cacheClearRequest)
if err := jsonutil.DecodeJSONFromReader(r.Body, req); err != nil {
if err == io.EOF {
err = errors.New("empty JSON provided")
}
logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse JSON input: %w", err))
return
}
c.logger.Debug("received cache-clear request", "type", req.Type, "namespace", req.Namespace, "value", req.Value)
in, err := parseCacheClearInput(req)
if err != nil {
c.logger.Error("unable to parse clear input", "error", err)
logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse clear input: %w", err))
return
}
if err := c.handleCacheClear(ctx, in); err != nil {
// Default to 500 on error, unless the user provided an invalid type,
// which would then be a 400.
httpStatus := http.StatusInternalServerError
if err == errInvalidType {
httpStatus = http.StatusBadRequest
}
logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err))
return
}
return
})
}
func (c *LeaseCache) handleCacheClear(ctx context.Context, in *cacheClearInput) error {
if in == nil {
return errors.New("no value(s) provided to clear corresponding cache entries")
}
switch in.Type {
case "request_path":
// For this particular case, we need to ensure that there are 2 provided
// indexers for the proper lookup.
if in.RequestPath == "" {
return errors.New("request path not provided")
}
// The first value provided for this case will be the namespace, but if it's
// an empty value we need to overwrite it with "root/" to ensure proper
// cache lookup.
if in.Namespace == "" {
in.Namespace = "root/"
}
// Find all the cached entries which has the given request path and
// cancel the contexts of all the respective lifetime watchers
indexes, err := c.db.GetByPrefix(cachememdb.IndexNameRequestPath, in.Namespace, in.RequestPath)
if err != nil {
return err
}
for _, index := range indexes {
index.RenewCtxInfo.CancelFunc()
}
case "token":
if in.Token == "" {
return errors.New("token not provided")
}
// Get the context for the given token and cancel its context
index, err := c.db.Get(cachememdb.IndexNameToken, in.Token)
if err != nil {
return err
}
if index == nil {
return nil
}
c.logger.Debug("canceling context of index attached to token")
index.RenewCtxInfo.CancelFunc()
case "token_accessor":
if in.TokenAccessor == "" {
return errors.New("token accessor not provided")
}
// Get the cached index and cancel the corresponding lifetime watcher
// context
index, err := c.db.Get(cachememdb.IndexNameTokenAccessor, in.TokenAccessor)
if err != nil {
return err
}
if index == nil {
return nil
}
c.logger.Debug("canceling context of index attached to accessor")
index.RenewCtxInfo.CancelFunc()
case "lease":
if in.Lease == "" {
return errors.New("lease not provided")
}
// Get the cached index and cancel the corresponding lifetime watcher
// context
index, err := c.db.Get(cachememdb.IndexNameLease, in.Lease)
if err != nil {
return err
}
if index == nil {
return nil
}
c.logger.Debug("canceling context of index attached to accessor")
index.RenewCtxInfo.CancelFunc()
case "all":
// Cancel the base context which triggers all the goroutines to
// stop and evict entries from cache.
c.logger.Debug("canceling base context")
c.l.Lock()
c.baseCtxInfo.CancelFunc()
// Reset the base context
baseCtx, baseCancel := context.WithCancel(ctx)
c.baseCtxInfo = &cachememdb.ContextInfo{
Ctx: baseCtx,
CancelFunc: baseCancel,
}
c.l.Unlock()
// Reset the memdb instance (and persistent storage if enabled)
if err := c.Flush(); err != nil {
return err
}
default:
return errInvalidType
}
c.logger.Debug("successfully cleared matching cache entries")
return nil
}
// handleRevocationRequest checks whether the originating request is a
// revocation request, and if so perform applicable cache cleanups.
// Returns true is this is a revocation request.
func (c *LeaseCache) handleRevocationRequest(ctx context.Context, req *SendRequest, resp *SendResponse) (bool, error) {
// Lease and token revocations return 204's on success. Fast-path if that's
// not the case.
if resp.Response.StatusCode != http.StatusNoContent {
return false, nil
}
_, path := deriveNamespaceAndRevocationPath(req)
switch {
case path == vaultPathTokenRevoke:
// Get the token from the request body
jsonBody := map[string]interface{}{}
if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {
return false, err
}
tokenRaw, ok := jsonBody["token"]
if !ok {
return false, fmt.Errorf("failed to get token from request body")
}
token, ok := tokenRaw.(string)
if !ok {
return false, fmt.Errorf("expected token in the request body to be string")
}
// Clear the cache entry associated with the token and all the other
// entries belonging to the leases derived from this token.
in := &cacheClearInput{
Type: "token",
Token: token,
}
if err := c.handleCacheClear(ctx, in); err != nil {
return false, err
}
case path == vaultPathTokenRevokeSelf:
// Clear the cache entry associated with the token and all the other
// entries belonging to the leases derived from this token.
in := &cacheClearInput{
Type: "token",
Token: req.Token,
}
if err := c.handleCacheClear(ctx, in); err != nil {
return false, err
}
case path == vaultPathTokenRevokeAccessor:
jsonBody := map[string]interface{}{}
if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {
return false, err
}
accessorRaw, ok := jsonBody["accessor"]
if !ok {
return false, fmt.Errorf("failed to get accessor from request body")
}
accessor, ok := accessorRaw.(string)
if !ok {
return false, fmt.Errorf("expected accessor in the request body to be string")
}
in := &cacheClearInput{
Type: "token_accessor",
TokenAccessor: accessor,
}
if err := c.handleCacheClear(ctx, in); err != nil {
return false, err
}
case path == vaultPathTokenRevokeOrphan:
jsonBody := map[string]interface{}{}
if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {
return false, err
}
tokenRaw, ok := jsonBody["token"]
if !ok {
return false, fmt.Errorf("failed to get token from request body")
}
token, ok := tokenRaw.(string)
if !ok {
return false, fmt.Errorf("expected token in the request body to be string")
}
// Kill the lifetime watchers of all the leases attached to the revoked
// token
indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLeaseToken, token)
if err != nil {
return false, err
}
for _, index := range indexes {
index.RenewCtxInfo.CancelFunc()
}
// Kill the lifetime watchers of the revoked token
index, err := c.db.Get(cachememdb.IndexNameToken, token)
if err != nil {
return false, err
}
if index == nil {
return true, nil
}
// Indicate the lifetime watcher goroutine for this index to return.
// This will not affect the child tokens because the context is not
// getting cancelled.
close(index.RenewCtxInfo.DoneCh)
// Clear the parent references of the revoked token in the entries
// belonging to the child tokens of the revoked token.
indexes, err = c.db.GetByPrefix(cachememdb.IndexNameTokenParent, token)
if err != nil {
return false, err
}
for _, index := range indexes {
index.TokenParent = ""
err = c.db.Set(index)
if err != nil {
c.logger.Error("failed to persist index", "error", err)
return false, err
}
}
case path == vaultPathLeaseRevoke:
// TODO: Should lease present in the URL itself be considered here?
// Get the lease from the request body
jsonBody := map[string]interface{}{}
if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {
return false, err
}
leaseIDRaw, ok := jsonBody["lease_id"]
if !ok {
return false, fmt.Errorf("failed to get lease_id from request body")
}
leaseID, ok := leaseIDRaw.(string)
if !ok {
return false, fmt.Errorf("expected lease_id the request body to be string")
}
in := &cacheClearInput{
Type: "lease",
Lease: leaseID,
}
if err := c.handleCacheClear(ctx, in); err != nil {
return false, err
}
case strings.HasPrefix(path, vaultPathLeaseRevokeForce):
// Trim the URL path to get the request path prefix
prefix := strings.TrimPrefix(path, vaultPathLeaseRevokeForce)
// Get all the cache indexes that use the request path containing the
// prefix and cancel the lifetime watcher context of each.
indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix)
if err != nil {
return false, err
}
_, tokenNSID := namespace.SplitIDFromString(req.Token)
for _, index := range indexes {
_, leaseNSID := namespace.SplitIDFromString(index.Lease)
// Only evict leases that match the token's namespace
if tokenNSID == leaseNSID {
index.RenewCtxInfo.CancelFunc()
}
}
case strings.HasPrefix(path, vaultPathLeaseRevokePrefix):
// Trim the URL path to get the request path prefix
prefix := strings.TrimPrefix(path, vaultPathLeaseRevokePrefix)
// Get all the cache indexes that use the request path containing the
// prefix and cancel the lifetime watcher context of each.
indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix)
if err != nil {
return false, err
}
_, tokenNSID := namespace.SplitIDFromString(req.Token)
for _, index := range indexes {
_, leaseNSID := namespace.SplitIDFromString(index.Lease)
// Only evict leases that match the token's namespace
if tokenNSID == leaseNSID {
index.RenewCtxInfo.CancelFunc()
}
}
default:
return false, nil
}
c.logger.Debug("triggered caching eviction from revocation request")
return true, nil
}
// Set stores the index in the cachememdb, and also stores it in the persistent
// cache (if enabled)
func (c *LeaseCache) Set(ctx context.Context, index *cachememdb.Index) error {
if err := c.db.Set(index); err != nil {
return err
}
if c.ps != nil {
plaintext, err := index.Serialize()
if err != nil {
return err
}
if err := c.ps.Set(ctx, index.ID, plaintext, index.Type); err != nil {
return err
}
c.logger.Trace("set entry in persistent storage", "type", index.Type, "path", index.RequestPath, "id", index.ID)
}
return nil
}
// Evict removes an Index from the cachememdb, and also removes it from the
// persistent cache (if enabled)
func (c *LeaseCache) Evict(index *cachememdb.Index) error {
if err := c.db.Evict(cachememdb.IndexNameID, index.ID); err != nil {
return err
}
if c.ps != nil {
if err := c.ps.Delete(index.ID, index.Type); err != nil {
return err
}
c.logger.Trace("deleted item from persistent storage", "id", index.ID)
}
return nil
}
// Flush the cachememdb and persistent cache (if enabled)
func (c *LeaseCache) Flush() error {
if err := c.db.Flush(); err != nil {
return err
}
if c.ps != nil {
c.logger.Trace("clearing persistent storage")
return c.ps.Clear()
}
return nil
}
// Restore loads the cachememdb from the persistent storage passed in. Loads
// tokens first, since restoring a lease's renewal context and watcher requires
// looking up the token in the cachememdb.
func (c *LeaseCache) Restore(ctx context.Context, storage *cacheboltdb.BoltStorage) error {
var errs *multierror.Error
// Process tokens first
tokens, err := storage.GetByType(ctx, cacheboltdb.TokenType)
if err != nil {
errs = multierror.Append(errs, err)
} else {
if err := c.restoreTokens(tokens); err != nil {
errs = multierror.Append(errs, err)
}
}
// Then process leases
leases, err := storage.GetByType(ctx, cacheboltdb.LeaseType)
if err != nil {
errs = multierror.Append(errs, err)
} else {
for _, lease := range leases {
newIndex, err := cachememdb.Deserialize(lease)
if err != nil {
errs = multierror.Append(errs, err)
continue
}
c.logger.Trace("restoring lease", "id", newIndex.ID, "path", newIndex.RequestPath)
// Check if this lease has already expired
expired, err := c.hasExpired(time.Now().UTC(), newIndex)
if err != nil {
c.logger.Warn("failed to check if lease is expired", "id", newIndex.ID, "error", err)
}
if expired {
continue
}
if err := c.restoreLeaseRenewCtx(newIndex); err != nil {
errs = multierror.Append(errs, err)
continue
}
if err := c.db.Set(newIndex); err != nil {
errs = multierror.Append(errs, err)
continue
}
c.logger.Trace("restored lease", "id", newIndex.ID, "path", newIndex.RequestPath)
}
}
return errs.ErrorOrNil()
}
func (c *LeaseCache) restoreTokens(tokens [][]byte) error {
var errors *multierror.Error
for _, token := range tokens {
newIndex, err := cachememdb.Deserialize(token)
if err != nil {
errors = multierror.Append(errors, err)
continue
}
newIndex.RenewCtxInfo = c.createCtxInfo(nil)
if err := c.db.Set(newIndex); err != nil {
errors = multierror.Append(errors, err)
continue
}
c.logger.Trace("restored token", "id", newIndex.ID)
}
return errors.ErrorOrNil()
}
// restoreLeaseRenewCtx re-creates a RenewCtx for an index object and starts
// the watcher go routine
func (c *LeaseCache) restoreLeaseRenewCtx(index *cachememdb.Index) error {
if index.Response == nil {
return fmt.Errorf("cached response was nil for %s", index.ID)
}
// Parse the secret to determine which type it is
reader := bufio.NewReader(bytes.NewReader(index.Response))
resp, err := http.ReadResponse(reader, nil)
if err != nil {
c.logger.Error("failed to deserialize response", "error", err)
return err
}
secret, err := api.ParseSecret(resp.Body)
if err != nil {
c.logger.Error("failed to parse response as secret", "error", err)
return err
}
var renewCtxInfo *cachememdb.ContextInfo
switch {
case secret.LeaseID != "":
entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken)
if err != nil {
return err
}
if entry == nil {
return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath)
}
// Derive a context for renewal using the token's context
renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx)
case secret.Auth != nil:
var parentCtx context.Context
if !secret.Auth.Orphan {
entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken)
if err != nil {
return err
}
// If parent token is not managed by the agent, child shouldn't be
// either.
if entry == nil {
return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath)
}
c.logger.Debug("setting parent context", "method", index.RequestMethod, "path", index.RequestPath)
parentCtx = entry.RenewCtxInfo.Ctx
}
renewCtxInfo = c.createCtxInfo(parentCtx)
default:
return fmt.Errorf("unknown cached index item: %s", index.ID)
}
renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID)
index.RenewCtxInfo = &cachememdb.ContextInfo{
Ctx: renewCtx,
CancelFunc: renewCtxInfo.CancelFunc,
DoneCh: renewCtxInfo.DoneCh,
}
sendReq := &SendRequest{
Token: index.RequestToken,
Request: &http.Request{
Header: index.RequestHeader,
Method: index.RequestMethod,
URL: &url.URL{
Path: index.RequestPath,
},
},
}
go c.startRenewing(renewCtx, index, sendReq, secret)
return nil
}
// deriveNamespaceAndRevocationPath returns the namespace and relative path for
// revocation paths.
//
// If the path contains a namespace, but it's not a revocation path, it will be
// returned as-is, since there's no way to tell where the namespace ends and
// where the request path begins purely based off a string.
//
// Case 1: /v1/ns1/leases/revoke -> ns1/, /v1/leases/revoke
// Case 2: ns1/ /v1/leases/revoke -> ns1/, /v1/leases/revoke
// Case 3: /v1/ns1/foo/bar -> root/, /v1/ns1/foo/bar
// Case 4: ns1/ /v1/foo/bar -> ns1/, /v1/foo/bar
func deriveNamespaceAndRevocationPath(req *SendRequest) (string, string) {
namespace := "root/"
nsHeader := req.Request.Header.Get(consts.NamespaceHeaderName)
if nsHeader != "" {
namespace = nsHeader
}
fullPath := req.Request.URL.Path
nonVersionedPath := strings.TrimPrefix(fullPath, "/v1")
for _, pathToCheck := range revocationPaths {
// We use strings.Contains here for paths that can contain
// vars in the path, e.g. /v1/lease/revoke-prefix/:prefix
i := strings.Index(nonVersionedPath, pathToCheck)
// If there's no match, move on to the next check
if i == -1 {
continue
}
// If the index is 0, this is a relative path with no namespace preppended,
// so we can break early
if i == 0 {
break
}
// We need to turn /ns1 into ns1/, this makes it easy
namespaceInPath := nshelper.Canonicalize(nonVersionedPath[:i])
// If it's root, we replace, otherwise we join
if namespace == "root/" {
namespace = namespaceInPath
} else {
namespace = namespace + namespaceInPath
}
return namespace, fmt.Sprintf("/v1%s", nonVersionedPath[i:])
}
return namespace, fmt.Sprintf("/v1%s", nonVersionedPath)
}
// RegisterAutoAuthToken adds the provided auto-token into the cache. This is
// primarily used to register the auto-auth token and should only be called
// within a sink's WriteToken func.
func (c *LeaseCache) RegisterAutoAuthToken(token string) error {
// Get the token from the cache
oldIndex, err := c.db.Get(cachememdb.IndexNameToken, token)
if err != nil {
return err
}
// If the index is found, just keep it in the cache and ignore the incoming
// token (since they're the same)
if oldIndex != nil {
c.logger.Trace("auto-auth token already exists in cache; no need to store it again")
return nil
}
// The following randomly generated values are required for index stored by
// the cache, but are not actually used. We use random values to prevent
// accidental access.
id, err := base62.Random(5)
if err != nil {
return err
}
namespace, err := base62.Random(5)
if err != nil {
return err
}
requestPath, err := base62.Random(5)
if err != nil {
return err
}
index := &cachememdb.Index{
ID: id,
Token: token,
Namespace: namespace,
RequestPath: requestPath,
Type: cacheboltdb.TokenType,
}
// Derive a context off of the lease cache's base context
ctxInfo := c.createCtxInfo(nil)
index.RenewCtxInfo = &cachememdb.ContextInfo{
Ctx: ctxInfo.Ctx,
CancelFunc: ctxInfo.CancelFunc,
DoneCh: ctxInfo.DoneCh,
}
// Store the index in the cache
c.logger.Debug("storing auto-auth token into the cache")
err = c.Set(c.baseCtxInfo.Ctx, index)
if err != nil {
c.logger.Error("failed to cache the auto-auth token", "error", err)
return err
}
return nil
}
type cacheClearInput struct {
Type string
RequestPath string
Namespace string
Token string
TokenAccessor string
Lease string
}
func parseCacheClearInput(req *cacheClearRequest) (*cacheClearInput, error) {
if req == nil {
return nil, errors.New("nil request options provided")
}
if req.Type == "" {
return nil, errors.New("no type provided")
}
in := &cacheClearInput{
Type: req.Type,
Namespace: req.Namespace,
}
switch req.Type {
case "request_path":
in.RequestPath = req.Value
case "token":
in.Token = req.Value
case "token_accessor":
in.TokenAccessor = req.Value
case "lease":
in.Lease = req.Value
}
return in, nil
}
func (c *LeaseCache) hasExpired(currentTime time.Time, index *cachememdb.Index) (bool, error) {
reader := bufio.NewReader(bytes.NewReader(index.Response))
resp, err := http.ReadResponse(reader, nil)
if err != nil {
return false, fmt.Errorf("failed to deserialize response: %w", err)
}
secret, err := api.ParseSecret(resp.Body)
if err != nil {
return false, fmt.Errorf("failed to parse response as secret: %w", err)
}
elapsed := currentTime.Sub(index.LastRenewed)
var leaseDuration int
switch {
case secret.LeaseID != "":
leaseDuration = secret.LeaseDuration
case secret.Auth != nil:
leaseDuration = secret.Auth.LeaseDuration
default:
return false, errors.New("secret without lease encountered in expiration check")
}
if int(elapsed.Seconds()) > leaseDuration {
c.logger.Trace("secret has expired", "id", index.ID, "elapsed", elapsed, "lease duration", leaseDuration)
return true, nil
}
return false, nil
}
| command/agent/cache/lease_cache.go | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0006877348059788346,
0.0001752076204866171,
0.0001590408937772736,
0.00017111604392994195,
0.000045492281060433015
] |
{
"id": 0,
"code_window": [
"\t})\n",
"}\n",
"\n",
"func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, \"latest\", \"vaulttestdb\")\n",
"\tdefer cleanup()\n",
"\n",
"\tuc := userCreator(func(t *testing.T, username, password string) {\n",
"\t\ttestCreateDBUser(t, connURL, \"vaulttestdb\", username, password)\n",
"\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, \"5.0.10\", \"vaulttestdb\")\n"
],
"file_path": "builtin/logical/database/rotation_test.go",
"type": "replace",
"edit_start_line_idx": 710
} | {{! template-lint-disable quotes }}
<WizardContent @headerText="Tools" @glyph="tour">
<WizardSection
@headerText="Lookup wrapped data"
@docText="API: Lookup Data"
@docPath="/api/system/wrapping-lookup.html"
@instructions='Paste the token that you copied and click "Lookup Token".'
>
<p>
Lookup lets you see information about your token without unwrapping it or changing it. Paste your token here and click
"Lookup". If you find that your data didn't copy for some reason, you can always go back and do it again.
</p>
</WizardSection>
</WizardContent> | ui/app/templates/components/wizard/tools-lookup.hbs | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017534568905830383,
0.00017470559396315366,
0.0001740654988680035,
0.00017470559396315366,
6.400950951501727e-7
] |
{
"id": 0,
"code_window": [
"\t})\n",
"}\n",
"\n",
"func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, \"latest\", \"vaulttestdb\")\n",
"\tdefer cleanup()\n",
"\n",
"\tuc := userCreator(func(t *testing.T, username, password string) {\n",
"\t\ttestCreateDBUser(t, connURL, \"vaulttestdb\", username, password)\n",
"\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, \"5.0.10\", \"vaulttestdb\")\n"
],
"file_path": "builtin/logical/database/rotation_test.go",
"type": "replace",
"edit_start_line_idx": 710
} | <form onsubmit={{action "doSubmit"}}>
{{#if (eq this.selectedAction "hash")}}
{{! template-lint-disable no-passed-in-event-handlers }}
<ToolHash
@onClear={{action "onClear"}}
@sum={{this.sum}}
@algorithm={{this.algorithm}}
@errors={{this.errors}}
@format={{this.format}}
@input={{input}}
/>
{{! template-lint-enable no-passed-in-event-handlers }}
{{else if (eq this.selectedAction "random")}}
<ToolRandom
@onClear={{action "onClear"}}
@random_bytes={{this.random_bytes}}
@errors={{this.errors}}
@format={{this.format}}
@bytes={{this.bytes}}
/>
{{else if (eq this.selectedAction "rewrap")}}
<ToolRewrap
@onClear={{action "onClear"}}
@rewrap_token={{this.rewrap_token}}
@selectedAction={{this.selectedAction}}
@errors={{this.errors}}
@token={{this.token}}
@bytes={{this.bytes}}
/>
{{else if (eq this.selectedAction "unwrap")}}
<ToolUnwrap
@onClear={{action "onClear"}}
@unwrap_data={{this.unwrap_data}}
@unwrapActiveTab={{this.unwrapActiveTab}}
@details={{this.details}}
@errors={{this.errors}}
@token={{this.token}}
/>
{{else if (eq this.selectedAction "lookup")}}
<ToolLookup
@creation_time={{this.creation_time}}
@creation_ttl={{this.creation_ttl}}
@creation_path={{this.creation_path}}
@expirationDate={{this.expirationDate}}
@selectedAction={{this.selectedAction}}
@token={{this.token}}
@onClear={{action "onClear"}}
@errors={{this.errors}}
/>
{{else if (eq this.selectedAction "wrap")}}
<ToolWrap
@token={{this.token}}
@selectedAction={{this.selectedAction}}
@onClear={{action "onClear"}}
@codemirrorUpdated={{action "codemirrorUpdated"}}
@updateTtl={{action "updateTtl"}}
@buttonDisabled={{this.buttonDisabled}}
@errors={{this.errors}}
/>
{{else}}
<EmptyState @title="Tool not available" />
{{/if}}
</form> | ui/app/templates/components/tool-actions-form.hbs | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0001775574346538633,
0.00017616059631109238,
0.00017410932923667133,
0.000176366520463489,
0.0000010301254178557429
] |
{
"id": 1,
"code_window": [
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 59
} | package mongodb
import (
"context"
"fmt"
"log"
"strings"
"sync"
"testing"
logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
"github.com/hashicorp/vault/sdk/logical"
"github.com/mitchellh/mapstructure"
)
var testImagePull sync.Once
func TestBackend_config_connection(t *testing.T) {
var resp *logical.Response
var err error
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
configData := map[string]interface{}{
"uri": "sample_connection_uri",
"verify_connection": false,
}
configReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/connection",
Storage: config.StorageView,
Data: configData,
}
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
}
func TestBackend_basic(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepRole(),
testAccStepReadCreds("web"),
},
})
}
func TestBackend_roleCrud(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepRole(),
testAccStepReadRole("web", testDb, testMongoDBRoles),
testAccStepDeleteRole("web"),
testAccStepReadRole("web", "", ""),
},
})
}
func TestBackend_leaseWriteRead(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepWriteLease(),
testAccStepReadLease(),
},
})
}
func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/connection",
Data: d,
ErrorOk: true,
Check: func(resp *logical.Response) error {
if expectError {
if resp.Data == nil {
return fmt.Errorf("data is nil")
}
var e struct {
Error string `mapstructure:"error"`
}
if err := mapstructure.Decode(resp.Data, &e); err != nil {
return err
}
if len(e.Error) == 0 {
return fmt.Errorf("expected error, but write succeeded")
}
return nil
} else if resp != nil && resp.IsError() {
return fmt.Errorf("got an error response: %v", resp.Error())
}
return nil
},
}
}
func testAccStepRole() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "roles/web",
Data: map[string]interface{}{
"db": testDb,
"roles": testMongoDBRoles,
},
}
}
func testAccStepDeleteRole(n string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.DeleteOperation,
Path: "roles/" + n,
}
}
func testAccStepReadCreds(name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "creds/" + name,
Check: func(resp *logical.Response) error {
var d struct {
DB string `mapstructure:"db"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.DB == "" {
return fmt.Errorf("bad: %#v", resp)
}
if d.Username == "" {
return fmt.Errorf("bad: %#v", resp)
}
if !strings.HasPrefix(d.Username, "vault-root-") {
return fmt.Errorf("bad: %#v", resp)
}
if d.Password == "" {
return fmt.Errorf("bad: %#v", resp)
}
log.Printf("[WARN] Generated credentials: %v", d)
return nil
},
}
}
func testAccStepReadRole(name, db, mongoDBRoles string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "roles/" + name,
Check: func(resp *logical.Response) error {
if resp == nil {
if db == "" && mongoDBRoles == "" {
return nil
}
return fmt.Errorf("bad: %#v", resp)
}
var d struct {
DB string `mapstructure:"db"`
MongoDBRoles string `mapstructure:"roles"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.DB != db {
return fmt.Errorf("bad: %#v", resp)
}
if d.MongoDBRoles != mongoDBRoles {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
func testAccStepWriteLease() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/lease",
Data: map[string]interface{}{
"ttl": "1h5m",
"max_ttl": "24h",
},
}
}
func testAccStepReadLease() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "config/lease",
Check: func(resp *logical.Response) error {
if resp.Data["ttl"].(float64) != 3900 || resp.Data["max_ttl"].(float64) != 86400 {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
const (
testDb = "foo"
testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]`
)
| builtin/logical/mongodb/backend_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9978231191635132,
0.2215941697359085,
0.00016152671014424413,
0.00017614419630263,
0.4139450490474701
] |
{
"id": 1,
"code_window": [
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 59
} | ---
layout: docs
page_title: Upgrading to Vault 1.3.10 - Guides
description: |-
This page contains the list of deprecations and important or breaking changes
for Vault 1.3.10. Please read it carefully.
---
# Overview
This page contains the list of deprecations and important or breaking changes
for Vault 1.3.10 compared to 1.3.9. Please read it carefully.
@include 'aws-invalid-header-fix.mdx'
| website/content/docs/upgrading/upgrade-to-1.3.10.mdx | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017514917999505997,
0.00017124404257629067,
0.00016733890515752137,
0.00017124404257629067,
0.0000039051374187693
] |
{
"id": 1,
"code_window": [
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 59
} | - `token_bound_cidrs` `(array: [] or comma-delimited string: "")` - List of
CIDR blocks; if set, specifies blocks of IP addresses which can authenticate
successfully, and ties the resulting token to these blocks as well.
- `token_explicit_max_ttl` `(integer: 0 or string: "")` - If set, will encode
an [explicit max
TTL](/docs/concepts/tokens#token-time-to-live-periodic-tokens-and-explicit-max-ttls)
onto the token. This is a hard cap even if `token_ttl` and `token_max_ttl`
would otherwise allow a renewal.
- `token_no_default_policy` `(bool: false)` - If set, the `default` policy will
not be set on generated tokens; otherwise it will be added to the policies set
in `token_policies`.
- `token_num_uses` `(integer: 0)` - The maximum number of times a generated
token may be used (within its lifetime); 0 means unlimited.
If you require the token to have the ability to create child tokens,
you will need to set this value to 0.
- `token_period` `(integer: 0 or string: "")` - The
[period](/docs/concepts/tokens#token-time-to-live-periodic-tokens-and-explicit-max-ttls),
if any, to set on the token.
- `token_type` `(string: "")` - The type of token that should be generated. Can
be `service`, `batch`, or `default` to use the mount's tuned default (which
unless changed will be `service` tokens). For token store roles, there are two
additional possibilities: `default-service` and `default-batch` which specify
the type to return unless the client requests a different type at generation
time.
| website/content/partials/tokenstorefields.mdx | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00020247974316589534,
0.00018297600036021322,
0.00017199577996507287,
0.00017445247794967145,
0.0000138276491270517
] |
{
"id": 1,
"code_window": [
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 59
} | <!-- Please reserve GitHub issues for bug reports and feature requests.
For questions, the best place to get answers is on our [mailing list](https://groups.google.com/forum/#!forum/vault-tool), as they will get more visibility from experienced users than the issue tracker.
Please note: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, please responsibly disclose by contacting us at [email protected]. Our PGP key is available at [our security page](https://www.hashicorp.com/security/).
-->
<!-- Uncomment this section if this is a feature request. Include or exclude other sections as deemed appropriate.
**Feature Request:**
-->
**Environment:**
<!-- The version can be retrieved with `vault version`. -->
* Vault Version:
* Operating System/Architecture:
**Vault Config File:**
<!-- Configuration file used for the vault server. -->
```hcl
# Paste your Vault config here.
# Be sure to scrub any sensitive values
```
**Startup Log Output:**
<!-- Logs from vault's output on startup, if available. -->
```text
# Paste your log output here
```
**Expected Behavior:**
<!-- What should have happened? -->
**Actual Behavior:**
<!-- What actually happened? -->
**Steps to Reproduce:**
<!-- List the steps required to reproduce the issue. -->
**Important Factoids:**
<!-- Describe any atypical environment setup, if any. -->
**References:**
<!-- Link to any references, such as GitHub issues or pull requests. -->
| .github/ISSUE_TEMPLATE.md | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017491119797341526,
0.00017072459741029888,
0.0001682442962191999,
0.00016980535292532295,
0.0000022721314962836914
] |
{
"id": 2,
"code_window": [
"\tb, err := Factory(context.Background(), config)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 83
} | package mongodb
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/helper/testhelpers/certhelpers"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }`
func TestMongoDB_Initialize(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
config := map[string]interface{}{
"connection_url": connURL,
}
// Make a copy since the original map could be modified by the Initialize call
expectedConfig := copyConfig(config)
req := dbplugin.InitializeRequest{
Config: config,
VerifyConnection: true,
}
resp := dbtesting.AssertInitialize(t, db, req)
if !reflect.DeepEqual(resp.Config, expectedConfig) {
t.Fatalf("Actual config: %#v\nExpected config: %#v", resp.Config, expectedConfig)
}
if !db.Initialized {
t.Fatal("Database should be initialized")
}
}
func TestNewUser_usernameTemplate(t *testing.T) {
type testCase struct {
usernameTemplate string
newUserReq dbplugin.NewUserRequest
expectedUsernameRegex string
}
tests := map[string]testCase{
"default username template": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"default username template with invalid chars": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "a.bad.account",
RoleName: "a.bad.role",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-a-bad-account-a-bad-role-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"custom username template": {
usernameTemplate: "{{random 2 | uppercase}}_{{unix_time}}_{{.RoleName | uppercase}}_{{.DisplayName | uppercase}}",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"username_template": test.usernameTemplate,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
ctx := context.Background()
newUserResp, err := db.NewUser(ctx, test.newUserReq)
require.NoError(t, err)
require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username)
assertCredsExist(t, newUserResp.Username, test.newUserReq.Password, connURL)
})
}
}
func TestMongoDB_CreateUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_CreateUser_writeConcern(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"write_concern": `{ "wmode": "majority", "wtimeout": 5000 }`,
},
VerifyConnection: true,
}
db := new()
defer dbtesting.AssertClose(t, db)
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_DeleteUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
// Test default revocation statement
delReq := dbplugin.DeleteUserRequest{
Username: createResp.Username,
}
dbtesting.AssertDeleteUser(t, db, delReq)
assertCredsDoNotExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_UpdateUser_Password(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
// The docker test method PrepareTestContainer defaults to a database "test"
// if none is provided
connURL = connURL + "/test"
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
// create the database user in advance, and test the connection
dbUser := "testmongouser"
startingPassword := "password"
createDBUser(t, connURL, "test", dbUser, startingPassword)
newPassword := "myreallysecurecredentials"
updateReq := dbplugin.UpdateUserRequest{
Username: dbUser,
Password: &dbplugin.ChangePassword{
NewPassword: newPassword,
},
}
dbtesting.AssertUpdateUser(t, db, updateReq)
assertCredsExist(t, dbUser, newPassword, connURL)
}
func TestGetTLSAuth(t *testing.T) {
ca := certhelpers.NewCert(t,
certhelpers.CommonName("certificate authority"),
certhelpers.IsCA(true),
certhelpers.SelfSign(),
)
cert := certhelpers.NewCert(t,
certhelpers.CommonName("test cert"),
certhelpers.Parent(ca),
)
type testCase struct {
username string
tlsCAData []byte
tlsKeyData []byte
expectOpts *options.ClientOptions
expectErr bool
}
tests := map[string]testCase{
"no TLS data set": {
expectOpts: nil,
expectErr: false,
},
"bad CA": {
tlsCAData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"bad key": {
tlsKeyData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"good ca": {
tlsCAData: cert.Pem,
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
RootCAs: appendToCertPool(t, x509.NewCertPool(), cert.Pem),
},
),
expectErr: false,
},
"good key": {
username: "unittest",
tlsKeyData: cert.CombinedPEM(),
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
Certificates: []tls.Certificate{cert.TLSCert},
},
).
SetAuth(options.Credential{
AuthMechanism: "MONGODB-X509",
Username: "unittest",
}),
expectErr: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
c := new()
c.Username = test.username
c.TLSCAData = test.tlsCAData
c.TLSCertificateKeyData = test.tlsKeyData
actual, err := c.getTLSAuth()
if test.expectErr && err == nil {
t.Fatalf("err expected, got nil")
}
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
assertDeepEqual(t, test.expectOpts, actual)
})
}
}
func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.CertPool {
t.Helper()
ok := pool.AppendCertsFromPEM(caPem)
if !ok {
t.Fatalf("Unable to append cert to cert pool")
}
return pool
}
var cmpClientOptionsOpts = cmp.Options{
cmp.AllowUnexported(options.ClientOptions{}),
cmp.AllowUnexported(tls.Config{}),
cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}),
// 'lazyCerts' has a func field which can't be compared.
cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
cmp.AllowUnexported(x509.CertPool{}),
}
// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16.
// See: https://github.com/golang/go/issues/45891
func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) {
t.Helper()
if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}
func createDBUser(t testing.TB, connURL, db, username, password string) {
t.Helper()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatal(err)
}
createUserCmd := &createUserCommand{
Username: username,
Password: password,
Roles: []interface{}{},
}
result := client.Database(db).RunCommand(ctx, createUserCmd, nil)
if result.Err() != nil {
t.Fatalf("failed to create user in mongodb: %s", result.Err())
}
assertCredsExist(t, username, password, connURL)
}
func assertCredsExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatalf("Failed to connect to mongo: %s", err)
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
t.Fatalf("Failed to ping mongo with user %q: %s", username, err)
}
}
func assertCredsDoNotExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
return // Creds don't exist as expected
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
return // Creds don't exist as expected
}
t.Fatalf("User %q exists and was able to authenticate", username)
}
func copyConfig(config map[string]interface{}) map[string]interface{} {
newConfig := map[string]interface{}{}
for k, v := range config {
newConfig[k] = v
}
return newConfig
}
| plugins/database/mongodb/mongodb_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9903440475463867,
0.18415804207324982,
0.00016015669098123908,
0.00020085740834474564,
0.37460410594940186
] |
{
"id": 2,
"code_window": [
"\tb, err := Factory(context.Background(), config)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 83
} | {{#if (eq this.model.httpStatus 404)}}
<NotFound @model={{this.model}} />
{{else}}
<PageHeader as |p|>
<p.levelLeft>
<h1 class="title is-3 has-text-grey">
{{#if (eq this.model.httpStatus 403)}}
Not authorized
{{else}}
Error
{{/if}}
</h1>
</p.levelLeft>
</PageHeader>
<div class="box is-sideless has-background-white-bis has-text-grey has-text-centered">
{{#if (and (eq this.model.httpStatus 403) (eq this.model.policyPath "sys/capabilities-self"))}}
<p>
Your auth token does not have access to
<code>{{this.model.policyPath}}</code>. Vault Enterprise uses this endpoint to determine what actions are allowed in
the interface.
</p>
<p>
Make sure the policy for the path
<code>{{this.model.policyPath}}</code>
includes
<code>capabilities = ['update']</code>.
</p>
{{else}}
{{#if this.model.message}}
<p>{{this.model.message}}</p>
{{/if}}
{{#each this.model.errors as |error|}}
<p>
{{error}}
</p>
{{/each}}
{{/if}}
</div>
{{/if}} | ui/app/templates/vault/cluster/error.hbs | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017762789502739906,
0.00017530549666844308,
0.00017131635104306042,
0.0001761388557497412,
0.000002501969902368728
] |
{
"id": 2,
"code_window": [
"\tb, err := Factory(context.Background(), config)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 83
} | ```release-note:feature
**MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine
```
| changelog/11532.txt | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0001695279061095789,
0.0001695279061095789,
0.0001695279061095789,
0.0001695279061095789,
0
] |
{
"id": 2,
"code_window": [
"\tb, err := Factory(context.Background(), config)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 83
} | //go:build !race
package command
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"testing"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/pgpkeys"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/cli"
)
func testOperatorInitCommand(tb testing.TB) (*cli.MockUi, *OperatorInitCommand) {
tb.Helper()
ui := cli.NewMockUi()
return ui, &OperatorInitCommand{
BaseCommand: &BaseCommand{
UI: ui,
},
}
}
func TestOperatorInitCommand_Run(t *testing.T) {
t.Parallel()
cases := []struct {
name string
args []string
out string
code int
}{
{
"too_many_args",
[]string{"foo"},
"Too many arguments",
1,
},
{
"pgp_keys_multi",
[]string{
"-pgp-keys", "keybase:hashicorp",
"-pgp-keys", "keybase:jefferai",
},
"can only be specified once",
1,
},
{
"root_token_pgp_key_multi",
[]string{
"-root-token-pgp-key", "keybase:hashicorp",
"-root-token-pgp-key", "keybase:jefferai",
},
"can only be specified once",
1,
},
{
"root_token_pgp_key_multi_inline",
[]string{
"-root-token-pgp-key", "keybase:hashicorp,keybase:jefferai",
},
"can only specify one pgp key",
1,
},
{
"recovery_pgp_keys_multi",
[]string{
"-recovery-pgp-keys", "keybase:hashicorp",
"-recovery-pgp-keys", "keybase:jefferai",
},
"can only be specified once",
1,
},
{
"key_shares_pgp_less",
[]string{
"-key-shares", "10",
"-pgp-keys", "keybase:jefferai,keybase:sethvargo",
},
"incorrect number",
2,
},
{
"key_shares_pgp_more",
[]string{
"-key-shares", "1",
"-pgp-keys", "keybase:jefferai,keybase:sethvargo",
},
"incorrect number",
2,
},
}
t.Run("validations", func(t *testing.T) {
t.Parallel()
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
ui, cmd := testOperatorInitCommand(t)
cmd.client = client
code := cmd.Run(tc.args)
if code != tc.code {
t.Errorf("expected %d to be %d", code, tc.code)
}
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, tc.out) {
t.Errorf("expected %q to contain %q", combined, tc.out)
}
})
}
})
t.Run("status", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServerUninit(t)
defer closer()
ui, cmd := testOperatorInitCommand(t)
cmd.client = client
// Verify the non-init response code
code := cmd.Run([]string{
"-status",
})
if exp := 2; code != exp {
t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
}
// Now init to verify the init response code
if _, err := client.Sys().Init(&api.InitRequest{
SecretShares: 1,
SecretThreshold: 1,
}); err != nil {
t.Fatal(err)
}
// Verify the init response code
ui, cmd = testOperatorInitCommand(t)
cmd.client = client
code = cmd.Run([]string{
"-status",
})
if exp := 0; code != exp {
t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
}
})
t.Run("default", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServerUninit(t)
defer closer()
ui, cmd := testOperatorInitCommand(t)
cmd.client = client
code := cmd.Run([]string{})
if exp := 0; code != exp {
t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
}
init, err := client.Sys().InitStatus()
if err != nil {
t.Fatal(err)
}
if !init {
t.Error("expected initialized")
}
re := regexp.MustCompile(`Unseal Key \d+: (.+)`)
output := ui.OutputWriter.String()
match := re.FindAllStringSubmatch(output, -1)
if len(match) < 5 || len(match[0]) < 2 {
t.Fatalf("no match: %#v", match)
}
keys := make([]string, len(match))
for i := range match {
keys[i] = match[i][1]
}
// Try unsealing with those keys - only use 3, which is the default
// threshold.
for i, key := range keys[:3] {
resp, err := client.Sys().Unseal(key)
if err != nil {
t.Fatal(err)
}
exp := (i + 1) % 3 // 1, 2, 0
if resp.Progress != exp {
t.Errorf("expected %d to be %d", resp.Progress, exp)
}
}
status, err := client.Sys().SealStatus()
if err != nil {
t.Fatal(err)
}
if status.Sealed {
t.Errorf("expected vault to be unsealed: %#v", status)
}
})
t.Run("custom_shares_threshold", func(t *testing.T) {
t.Parallel()
keyShares, keyThreshold := 20, 15
client, closer := testVaultServerUninit(t)
defer closer()
ui, cmd := testOperatorInitCommand(t)
cmd.client = client
code := cmd.Run([]string{
"-key-shares", strconv.Itoa(keyShares),
"-key-threshold", strconv.Itoa(keyThreshold),
})
if exp := 0; code != exp {
t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
}
init, err := client.Sys().InitStatus()
if err != nil {
t.Fatal(err)
}
if !init {
t.Error("expected initialized")
}
re := regexp.MustCompile(`Unseal Key \d+: (.+)`)
output := ui.OutputWriter.String()
match := re.FindAllStringSubmatch(output, -1)
if len(match) < keyShares || len(match[0]) < 2 {
t.Fatalf("no match: %#v", match)
}
keys := make([]string, len(match))
for i := range match {
keys[i] = match[i][1]
}
// Try unsealing with those keys - only use 3, which is the default
// threshold.
for i, key := range keys[:keyThreshold] {
resp, err := client.Sys().Unseal(key)
if err != nil {
t.Fatal(err)
}
exp := (i + 1) % keyThreshold
if resp.Progress != exp {
t.Errorf("expected %d to be %d", resp.Progress, exp)
}
}
status, err := client.Sys().SealStatus()
if err != nil {
t.Fatal(err)
}
if status.Sealed {
t.Errorf("expected vault to be unsealed: %#v", status)
}
})
t.Run("pgp", func(t *testing.T) {
t.Parallel()
tempDir, pubFiles, err := getPubKeyFiles(t)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
client, closer := testVaultServerUninit(t)
defer closer()
ui, cmd := testOperatorInitCommand(t)
cmd.client = client
code := cmd.Run([]string{
"-key-shares", "4",
"-key-threshold", "2",
"-pgp-keys", fmt.Sprintf("%s,@%s, %s, %s ",
pubFiles[0], pubFiles[1], pubFiles[2], pubFiles[3]),
"-root-token-pgp-key", pubFiles[0],
})
if exp := 0; code != exp {
t.Fatalf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String())
}
re := regexp.MustCompile(`Unseal Key \d+: (.+)`)
output := ui.OutputWriter.String()
match := re.FindAllStringSubmatch(output, -1)
if len(match) < 4 || len(match[0]) < 2 {
t.Fatalf("no match: %#v", match)
}
keys := make([]string, len(match))
for i := range match {
keys[i] = match[i][1]
}
// Try unsealing with one key
decryptedKey := testPGPDecrypt(t, pgpkeys.TestPrivKey1, keys[0])
if _, err := client.Sys().Unseal(decryptedKey); err != nil {
t.Fatal(err)
}
// Decrypt the root token
reToken := regexp.MustCompile(`Root Token: (.+)`)
match = reToken.FindAllStringSubmatch(output, -1)
if len(match) < 1 || len(match[0]) < 2 {
t.Fatalf("no match")
}
root := match[0][1]
decryptedRoot := testPGPDecrypt(t, pgpkeys.TestPrivKey1, root)
if l, exp := len(decryptedRoot), vault.TokenLength+vault.TokenPrefixLength; l != exp {
t.Errorf("expected %d to be %d", l, exp)
}
})
t.Run("communication_failure", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServerBad(t)
defer closer()
ui, cmd := testOperatorInitCommand(t)
cmd.client = client
code := cmd.Run([]string{
"-key-shares=1",
"-key-threshold=1",
})
if exp := 2; code != exp {
t.Errorf("expected %d to be %d", code, exp)
}
expected := "Error making API request"
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, expected) {
t.Errorf("expected %q to contain %q", combined, expected)
}
})
t.Run("no_tabs", func(t *testing.T) {
t.Parallel()
_, cmd := testOperatorInitCommand(t)
assertNoTabs(t, cmd)
})
}
| command/operator_init_test.go | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0005934592336416245,
0.00019466104276943952,
0.0001631308696232736,
0.00017183262389153242,
0.00009425976895727217
] |
{
"id": 3,
"code_window": [
"\tb, err := Factory(context.Background(), config)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 109
} | package mongodb
import (
"context"
"fmt"
"log"
"strings"
"sync"
"testing"
logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
"github.com/hashicorp/vault/sdk/logical"
"github.com/mitchellh/mapstructure"
)
var testImagePull sync.Once
func TestBackend_config_connection(t *testing.T) {
var resp *logical.Response
var err error
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
configData := map[string]interface{}{
"uri": "sample_connection_uri",
"verify_connection": false,
}
configReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/connection",
Storage: config.StorageView,
Data: configData,
}
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
}
func TestBackend_basic(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepRole(),
testAccStepReadCreds("web"),
},
})
}
func TestBackend_roleCrud(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepRole(),
testAccStepReadRole("web", testDb, testMongoDBRoles),
testAccStepDeleteRole("web"),
testAccStepReadRole("web", "", ""),
},
})
}
func TestBackend_leaseWriteRead(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepWriteLease(),
testAccStepReadLease(),
},
})
}
func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/connection",
Data: d,
ErrorOk: true,
Check: func(resp *logical.Response) error {
if expectError {
if resp.Data == nil {
return fmt.Errorf("data is nil")
}
var e struct {
Error string `mapstructure:"error"`
}
if err := mapstructure.Decode(resp.Data, &e); err != nil {
return err
}
if len(e.Error) == 0 {
return fmt.Errorf("expected error, but write succeeded")
}
return nil
} else if resp != nil && resp.IsError() {
return fmt.Errorf("got an error response: %v", resp.Error())
}
return nil
},
}
}
func testAccStepRole() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "roles/web",
Data: map[string]interface{}{
"db": testDb,
"roles": testMongoDBRoles,
},
}
}
func testAccStepDeleteRole(n string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.DeleteOperation,
Path: "roles/" + n,
}
}
func testAccStepReadCreds(name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "creds/" + name,
Check: func(resp *logical.Response) error {
var d struct {
DB string `mapstructure:"db"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.DB == "" {
return fmt.Errorf("bad: %#v", resp)
}
if d.Username == "" {
return fmt.Errorf("bad: %#v", resp)
}
if !strings.HasPrefix(d.Username, "vault-root-") {
return fmt.Errorf("bad: %#v", resp)
}
if d.Password == "" {
return fmt.Errorf("bad: %#v", resp)
}
log.Printf("[WARN] Generated credentials: %v", d)
return nil
},
}
}
func testAccStepReadRole(name, db, mongoDBRoles string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "roles/" + name,
Check: func(resp *logical.Response) error {
if resp == nil {
if db == "" && mongoDBRoles == "" {
return nil
}
return fmt.Errorf("bad: %#v", resp)
}
var d struct {
DB string `mapstructure:"db"`
MongoDBRoles string `mapstructure:"roles"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.DB != db {
return fmt.Errorf("bad: %#v", resp)
}
if d.MongoDBRoles != mongoDBRoles {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
func testAccStepWriteLease() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/lease",
Data: map[string]interface{}{
"ttl": "1h5m",
"max_ttl": "24h",
},
}
}
func testAccStepReadLease() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "config/lease",
Check: func(resp *logical.Response) error {
if resp.Data["ttl"].(float64) != 3900 || resp.Data["max_ttl"].(float64) != 86400 {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
const (
testDb = "foo"
testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]`
)
| builtin/logical/mongodb/backend_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9979895353317261,
0.2758634388446808,
0.00016462533676531166,
0.0001743335888022557,
0.4302690327167511
] |
{
"id": 3,
"code_window": [
"\tb, err := Factory(context.Background(), config)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 109
} | import { helper as buildHelper } from '@ember/component/helper';
export function jsonify([target]) {
return JSON.parse(target);
}
export default buildHelper(jsonify);
| ui/app/helpers/jsonify.js | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017646745254751295,
0.00017646745254751295,
0.00017646745254751295,
0.00017646745254751295,
0
] |
{
"id": 3,
"code_window": [
"\tb, err := Factory(context.Background(), config)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 109
} | <WizardSection
@headerText="AppRole"
@headerIcon="cpu"
@docText="Docs: AppRole Authentication"
@docPath="/docs/auth/approle.html"
>
<p>
The Approle Auth Method allows machines or apps to authenticate with Vault-defined roles. The open design of AppRole
enables a varied set of workflows and configurations to handle large numbers of apps. This Auth Method is oriented to
automated workflows (machines and services), and is less useful for human operators.
</p>
</WizardSection> | ui/app/templates/components/wizard/approle-method.hbs | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017528634634800255,
0.0001691855868557468,
0.00016308482736349106,
0.0001691855868557468,
0.000006100759492255747
] |
{
"id": 3,
"code_window": [
"\tb, err := Factory(context.Background(), config)\n",
"\tif err != nil {\n",
"\t\tt.Fatal(err)\n",
"\t}\n",
"\n",
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\tconnData := map[string]interface{}{\n",
"\t\t\"uri\": connURI,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURI := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "builtin/logical/mongodb/backend_test.go",
"type": "replace",
"edit_start_line_idx": 109
} | <svg width="16" height="16" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg"><title>Group 4</title><path d="M1.778 1.778l12.541 12.541m-.097-12.541L1.681 14.319" stroke="#FFF" stroke-width="2" fill="none" stroke-linecap="square"/></svg> | website/public/img/icons/close-icon.svg | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0001757628342602402,
0.0001757628342602402,
0.0001757628342602402,
0.0001757628342602402,
0
] |
{
"id": 4,
"code_window": [
")\n",
"\n",
"const mongoAdminRole = `{ \"db\": \"admin\", \"roles\": [ { \"role\": \"readWrite\" } ] }`\n",
"\n",
"func TestMongoDB_Initialize(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n",
"\tdefer dbtesting.AssertClose(t, db)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 29
} | package mongodb
import (
"context"
"fmt"
"log"
"strings"
"sync"
"testing"
logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
"github.com/hashicorp/vault/sdk/logical"
"github.com/mitchellh/mapstructure"
)
var testImagePull sync.Once
func TestBackend_config_connection(t *testing.T) {
var resp *logical.Response
var err error
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
configData := map[string]interface{}{
"uri": "sample_connection_uri",
"verify_connection": false,
}
configReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/connection",
Storage: config.StorageView,
Data: configData,
}
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
}
func TestBackend_basic(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepRole(),
testAccStepReadCreds("web"),
},
})
}
func TestBackend_roleCrud(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepRole(),
testAccStepReadRole("web", testDb, testMongoDBRoles),
testAccStepDeleteRole("web"),
testAccStepReadRole("web", "", ""),
},
})
}
func TestBackend_leaseWriteRead(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepWriteLease(),
testAccStepReadLease(),
},
})
}
func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/connection",
Data: d,
ErrorOk: true,
Check: func(resp *logical.Response) error {
if expectError {
if resp.Data == nil {
return fmt.Errorf("data is nil")
}
var e struct {
Error string `mapstructure:"error"`
}
if err := mapstructure.Decode(resp.Data, &e); err != nil {
return err
}
if len(e.Error) == 0 {
return fmt.Errorf("expected error, but write succeeded")
}
return nil
} else if resp != nil && resp.IsError() {
return fmt.Errorf("got an error response: %v", resp.Error())
}
return nil
},
}
}
func testAccStepRole() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "roles/web",
Data: map[string]interface{}{
"db": testDb,
"roles": testMongoDBRoles,
},
}
}
func testAccStepDeleteRole(n string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.DeleteOperation,
Path: "roles/" + n,
}
}
func testAccStepReadCreds(name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "creds/" + name,
Check: func(resp *logical.Response) error {
var d struct {
DB string `mapstructure:"db"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.DB == "" {
return fmt.Errorf("bad: %#v", resp)
}
if d.Username == "" {
return fmt.Errorf("bad: %#v", resp)
}
if !strings.HasPrefix(d.Username, "vault-root-") {
return fmt.Errorf("bad: %#v", resp)
}
if d.Password == "" {
return fmt.Errorf("bad: %#v", resp)
}
log.Printf("[WARN] Generated credentials: %v", d)
return nil
},
}
}
func testAccStepReadRole(name, db, mongoDBRoles string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "roles/" + name,
Check: func(resp *logical.Response) error {
if resp == nil {
if db == "" && mongoDBRoles == "" {
return nil
}
return fmt.Errorf("bad: %#v", resp)
}
var d struct {
DB string `mapstructure:"db"`
MongoDBRoles string `mapstructure:"roles"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.DB != db {
return fmt.Errorf("bad: %#v", resp)
}
if d.MongoDBRoles != mongoDBRoles {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
func testAccStepWriteLease() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/lease",
Data: map[string]interface{}{
"ttl": "1h5m",
"max_ttl": "24h",
},
}
}
func testAccStepReadLease() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "config/lease",
Check: func(resp *logical.Response) error {
if resp.Data["ttl"].(float64) != 3900 || resp.Data["max_ttl"].(float64) != 86400 {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
const (
testDb = "foo"
testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]`
)
| builtin/logical/mongodb/backend_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9701362252235413,
0.11220207065343857,
0.00016689432959537953,
0.0006864405586384237,
0.2670958936214447
] |
{
"id": 4,
"code_window": [
")\n",
"\n",
"const mongoAdminRole = `{ \"db\": \"admin\", \"roles\": [ { \"role\": \"readWrite\" } ] }`\n",
"\n",
"func TestMongoDB_Initialize(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n",
"\tdefer dbtesting.AssertClose(t, db)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 29
} | ---
layout: docs
page_title: plugin - Command
description: |-
The "plugin" command groups subcommands for interacting with
Vault's plugins and the plugin catalog.
---
# plugin
The `plugin` command groups subcommands for interacting with Vault's plugins and
the plugin catalog
## Examples
List all available plugins in the catalog:
```shell-session
$ vault plugin list
Plugins
-------
my-custom-plugin
# ...
```
Register a new plugin to the catalog:
```shell-session
$ vault plugin register \
-sha256=d3f0a8be02f6c074cf38c9c99d4d04c9c6466249 \
my-custom-plugin
Success! Registered plugin: my-custom-plugin
```
Get information about a plugin in the catalog:
```shell-session
$ vault plugin info my-custom-plugin
Key Value
--- -----
command my-custom-plugin
name my-custom-plugin
sha256 d3f0a8be02f6c074cf38c9c99d4d04c9c6466249
```
## Usage
```text
Usage: vault plugin <subcommand> [options] [args]
# ...
Subcommands:
deregister Deregister an existing plugin in the catalog
info Read information about a plugin in the catalog
list Lists available plugins
register Registers a new plugin in the catalog
reload Reload mounted plugin backend
```
For more information, examples, and usage about a subcommand, click on the name
of the subcommand in the sidebar.
| website/content/docs/commands/plugin/index.mdx | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017271938850171864,
0.00016866365331225097,
0.00016145124391186982,
0.0001711362856440246,
0.000004500886007008376
] |
{
"id": 4,
"code_window": [
")\n",
"\n",
"const mongoAdminRole = `{ \"db\": \"admin\", \"roles\": [ { \"role\": \"readWrite\" } ] }`\n",
"\n",
"func TestMongoDB_Initialize(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n",
"\tdefer dbtesting.AssertClose(t, db)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 29
} | import { helper } from '@ember/component/helper';
export function numberToWord(number, capitalize) {
const word =
{
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
}[number] || number;
return capitalize && typeof word === 'string' ? `${word.charAt(0).toUpperCase()}${word.slice(1)}` : word;
}
export default helper(function ([number], { capitalize }) {
return numberToWord(number, capitalize);
});
| ui/app/helpers/number-to-word.js | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017403712263330817,
0.00017336015298496932,
0.00017207270138897002,
0.00017397059127688408,
9.107606047109584e-7
] |
{
"id": 4,
"code_window": [
")\n",
"\n",
"const mongoAdminRole = `{ \"db\": \"admin\", \"roles\": [ { \"role\": \"readWrite\" } ] }`\n",
"\n",
"func TestMongoDB_Initialize(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n",
"\tdefer dbtesting.AssertClose(t, db)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 29
} | ---
layout: docs
page_title: policy - Command
description: |-
The "policy" command groups subcommands for interacting with policies. Users
can write, read, and list policies in Vault.
---
# policy
The `policy` command groups subcommands for interacting with policies. Users can
write, read, and list policies in Vault.
For more information, please see the [policy
documentation](/docs/concepts/policies).
## Examples
List all enabled policies:
```shell-session
$ vault policy list
```
Create a policy named "my-policy" from contents on local disk:
```shell-session
$ vault policy write my-policy ./my-policy.hcl
```
Delete the policy named my-policy:
```shell-session
$ vault policy delete my-policy
```
## Usage
```text
Usage: vault policy <subcommand> [options] [args]
# ...
Subcommands:
delete Deletes a policy by name
list Lists the installed policies
read Prints the contents of a policy
write Uploads a named policy from a file
```
For more information, examples, and usage about a subcommand, click on the name
of the subcommand in the sidebar.
| website/content/docs/commands/policy/index.mdx | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0001711364311631769,
0.0001671831269050017,
0.0001617651287233457,
0.00016803538892418146,
0.000003250583631597692
] |
{
"id": 5,
"code_window": [
"\n",
"\tfor name, test := range tests {\n",
"\t\tt.Run(name, func(t *testing.T) {\n",
"\t\t\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tdb := new()\n",
"\t\t\tdefer dbtesting.AssertClose(t, db)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 122
} | package mongodb
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/helper/testhelpers/certhelpers"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }`
func TestMongoDB_Initialize(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
config := map[string]interface{}{
"connection_url": connURL,
}
// Make a copy since the original map could be modified by the Initialize call
expectedConfig := copyConfig(config)
req := dbplugin.InitializeRequest{
Config: config,
VerifyConnection: true,
}
resp := dbtesting.AssertInitialize(t, db, req)
if !reflect.DeepEqual(resp.Config, expectedConfig) {
t.Fatalf("Actual config: %#v\nExpected config: %#v", resp.Config, expectedConfig)
}
if !db.Initialized {
t.Fatal("Database should be initialized")
}
}
func TestNewUser_usernameTemplate(t *testing.T) {
type testCase struct {
usernameTemplate string
newUserReq dbplugin.NewUserRequest
expectedUsernameRegex string
}
tests := map[string]testCase{
"default username template": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"default username template with invalid chars": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "a.bad.account",
RoleName: "a.bad.role",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-a-bad-account-a-bad-role-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"custom username template": {
usernameTemplate: "{{random 2 | uppercase}}_{{unix_time}}_{{.RoleName | uppercase}}_{{.DisplayName | uppercase}}",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"username_template": test.usernameTemplate,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
ctx := context.Background()
newUserResp, err := db.NewUser(ctx, test.newUserReq)
require.NoError(t, err)
require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username)
assertCredsExist(t, newUserResp.Username, test.newUserReq.Password, connURL)
})
}
}
func TestMongoDB_CreateUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_CreateUser_writeConcern(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"write_concern": `{ "wmode": "majority", "wtimeout": 5000 }`,
},
VerifyConnection: true,
}
db := new()
defer dbtesting.AssertClose(t, db)
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_DeleteUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
// Test default revocation statement
delReq := dbplugin.DeleteUserRequest{
Username: createResp.Username,
}
dbtesting.AssertDeleteUser(t, db, delReq)
assertCredsDoNotExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_UpdateUser_Password(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
// The docker test method PrepareTestContainer defaults to a database "test"
// if none is provided
connURL = connURL + "/test"
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
// create the database user in advance, and test the connection
dbUser := "testmongouser"
startingPassword := "password"
createDBUser(t, connURL, "test", dbUser, startingPassword)
newPassword := "myreallysecurecredentials"
updateReq := dbplugin.UpdateUserRequest{
Username: dbUser,
Password: &dbplugin.ChangePassword{
NewPassword: newPassword,
},
}
dbtesting.AssertUpdateUser(t, db, updateReq)
assertCredsExist(t, dbUser, newPassword, connURL)
}
func TestGetTLSAuth(t *testing.T) {
ca := certhelpers.NewCert(t,
certhelpers.CommonName("certificate authority"),
certhelpers.IsCA(true),
certhelpers.SelfSign(),
)
cert := certhelpers.NewCert(t,
certhelpers.CommonName("test cert"),
certhelpers.Parent(ca),
)
type testCase struct {
username string
tlsCAData []byte
tlsKeyData []byte
expectOpts *options.ClientOptions
expectErr bool
}
tests := map[string]testCase{
"no TLS data set": {
expectOpts: nil,
expectErr: false,
},
"bad CA": {
tlsCAData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"bad key": {
tlsKeyData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"good ca": {
tlsCAData: cert.Pem,
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
RootCAs: appendToCertPool(t, x509.NewCertPool(), cert.Pem),
},
),
expectErr: false,
},
"good key": {
username: "unittest",
tlsKeyData: cert.CombinedPEM(),
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
Certificates: []tls.Certificate{cert.TLSCert},
},
).
SetAuth(options.Credential{
AuthMechanism: "MONGODB-X509",
Username: "unittest",
}),
expectErr: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
c := new()
c.Username = test.username
c.TLSCAData = test.tlsCAData
c.TLSCertificateKeyData = test.tlsKeyData
actual, err := c.getTLSAuth()
if test.expectErr && err == nil {
t.Fatalf("err expected, got nil")
}
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
assertDeepEqual(t, test.expectOpts, actual)
})
}
}
func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.CertPool {
t.Helper()
ok := pool.AppendCertsFromPEM(caPem)
if !ok {
t.Fatalf("Unable to append cert to cert pool")
}
return pool
}
var cmpClientOptionsOpts = cmp.Options{
cmp.AllowUnexported(options.ClientOptions{}),
cmp.AllowUnexported(tls.Config{}),
cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}),
// 'lazyCerts' has a func field which can't be compared.
cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
cmp.AllowUnexported(x509.CertPool{}),
}
// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16.
// See: https://github.com/golang/go/issues/45891
func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) {
t.Helper()
if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}
func createDBUser(t testing.TB, connURL, db, username, password string) {
t.Helper()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatal(err)
}
createUserCmd := &createUserCommand{
Username: username,
Password: password,
Roles: []interface{}{},
}
result := client.Database(db).RunCommand(ctx, createUserCmd, nil)
if result.Err() != nil {
t.Fatalf("failed to create user in mongodb: %s", result.Err())
}
assertCredsExist(t, username, password, connURL)
}
func assertCredsExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatalf("Failed to connect to mongo: %s", err)
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
t.Fatalf("Failed to ping mongo with user %q: %s", username, err)
}
}
func assertCredsDoNotExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
return // Creds don't exist as expected
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
return // Creds don't exist as expected
}
t.Fatalf("User %q exists and was able to authenticate", username)
}
func copyConfig(config map[string]interface{}) map[string]interface{} {
newConfig := map[string]interface{}{}
for k, v := range config {
newConfig[k] = v
}
return newConfig
}
| plugins/database/mongodb/mongodb_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9986482262611389,
0.5037407875061035,
0.0001655411469982937,
0.7449002265930176,
0.48723700642585754
] |
{
"id": 5,
"code_window": [
"\n",
"\tfor name, test := range tests {\n",
"\t\tt.Run(name, func(t *testing.T) {\n",
"\t\t\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tdb := new()\n",
"\t\t\tdefer dbtesting.AssertClose(t, db)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 122
} | ```release-note:bug
secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown.
```
| changelog/12087.txt | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017042254330590367,
0.00017042254330590367,
0.00017042254330590367,
0.00017042254330590367,
0
] |
{
"id": 5,
"code_window": [
"\n",
"\tfor name, test := range tests {\n",
"\t\tt.Run(name, func(t *testing.T) {\n",
"\t\t\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tdb := new()\n",
"\t\t\tdefer dbtesting.AssertClose(t, db)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 122
} | ---
description: >
Ensure the right version of Go is installed and set GOPATH to $HOME/go.
parameters:
GOPROXY:
description: >
Set GOPROXY. By default this is set to "off" meaning you have to have all modules pre-downloaded.
type: string
default: "off"
GOPRIVATE:
description: Set GOPRIVATE, defaults to github.com/hashicorp/*
type: string
default: github.com/hashicorp/*
steps:
- run:
working_directory: ~/
name: Setup Go
command: |
[ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; }
# Install Go
curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz"
sudo rm -rf /usr/local/go
sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz"
rm -f "go${GO_VERSION}.linux-amd64.tar.gz"
GOPATH="/home/circleci/go"
mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; }
mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; }
echo "export GOPATH='$GOPATH'" >> "$BASH_ENV"
echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV"
echo "export GOPROXY=<<parameters.GOPROXY>>" >> "$BASH_ENV"
echo "export GOPRIVATE=<<parameters.GOPRIVATE>>" >> "$BASH_ENV"
echo "$ go version"
go version
| .circleci/config/commands/setup-go.yml | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017797765030991286,
0.00017475380445830524,
0.00017362210201099515,
0.00017370772548019886,
0.0000018623112509885686
] |
{
"id": 5,
"code_window": [
"\n",
"\tfor name, test := range tests {\n",
"\t\tt.Run(name, func(t *testing.T) {\n",
"\t\t\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\t\t\tdefer cleanup()\n",
"\n",
"\t\t\tdb := new()\n",
"\t\t\tdefer dbtesting.AssertClose(t, db)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 122
} | Key Value
lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4
lease_duration 279359999
lease_renewable false
certificate -----BEGIN CERTIFICATE-----
MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
-----END CERTIFICATE-----
issuing_ca -----BEGIN CERTIFICATE-----
MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
zehNe5dFTjFpylg1o6b8Ow==
-----END CERTIFICATE-----
private_key -----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
-----END RSA PRIVATE KEY-----
private_key_type rsa
| command/agent/auth/cert/test-fixtures/keys/pkioutput | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0011951751075685024,
0.0004350047674961388,
0.00016383465845137835,
0.0003479046863503754,
0.00032306500361301005
] |
{
"id": 6,
"code_window": [
"\t\t})\n",
"\t}\n",
"}\n",
"\n",
"func TestMongoDB_CreateUser(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 148
} | package mongodb
import (
"context"
"fmt"
"log"
"strings"
"sync"
"testing"
logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
"github.com/hashicorp/vault/sdk/logical"
"github.com/mitchellh/mapstructure"
)
var testImagePull sync.Once
func TestBackend_config_connection(t *testing.T) {
var resp *logical.Response
var err error
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
configData := map[string]interface{}{
"uri": "sample_connection_uri",
"verify_connection": false,
}
configReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/connection",
Storage: config.StorageView,
Data: configData,
}
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
}
func TestBackend_basic(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepRole(),
testAccStepReadCreds("web"),
},
})
}
func TestBackend_roleCrud(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepRole(),
testAccStepReadRole("web", testDb, testMongoDBRoles),
testAccStepDeleteRole("web"),
testAccStepReadRole("web", "", ""),
},
})
}
func TestBackend_leaseWriteRead(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, connURI := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
connData := map[string]interface{}{
"uri": connURI,
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalBackend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(connData, false),
testAccStepWriteLease(),
testAccStepReadLease(),
},
})
}
func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/connection",
Data: d,
ErrorOk: true,
Check: func(resp *logical.Response) error {
if expectError {
if resp.Data == nil {
return fmt.Errorf("data is nil")
}
var e struct {
Error string `mapstructure:"error"`
}
if err := mapstructure.Decode(resp.Data, &e); err != nil {
return err
}
if len(e.Error) == 0 {
return fmt.Errorf("expected error, but write succeeded")
}
return nil
} else if resp != nil && resp.IsError() {
return fmt.Errorf("got an error response: %v", resp.Error())
}
return nil
},
}
}
func testAccStepRole() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "roles/web",
Data: map[string]interface{}{
"db": testDb,
"roles": testMongoDBRoles,
},
}
}
func testAccStepDeleteRole(n string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.DeleteOperation,
Path: "roles/" + n,
}
}
func testAccStepReadCreds(name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "creds/" + name,
Check: func(resp *logical.Response) error {
var d struct {
DB string `mapstructure:"db"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.DB == "" {
return fmt.Errorf("bad: %#v", resp)
}
if d.Username == "" {
return fmt.Errorf("bad: %#v", resp)
}
if !strings.HasPrefix(d.Username, "vault-root-") {
return fmt.Errorf("bad: %#v", resp)
}
if d.Password == "" {
return fmt.Errorf("bad: %#v", resp)
}
log.Printf("[WARN] Generated credentials: %v", d)
return nil
},
}
}
func testAccStepReadRole(name, db, mongoDBRoles string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "roles/" + name,
Check: func(resp *logical.Response) error {
if resp == nil {
if db == "" && mongoDBRoles == "" {
return nil
}
return fmt.Errorf("bad: %#v", resp)
}
var d struct {
DB string `mapstructure:"db"`
MongoDBRoles string `mapstructure:"roles"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.DB != db {
return fmt.Errorf("bad: %#v", resp)
}
if d.MongoDBRoles != mongoDBRoles {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
func testAccStepWriteLease() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/lease",
Data: map[string]interface{}{
"ttl": "1h5m",
"max_ttl": "24h",
},
}
}
func testAccStepReadLease() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "config/lease",
Check: func(resp *logical.Response) error {
if resp.Data["ttl"].(float64) != 3900 || resp.Data["max_ttl"].(float64) != 86400 {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
const (
testDb = "foo"
testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]`
)
| builtin/logical/mongodb/backend_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9824498891830444,
0.07612590491771698,
0.0001663103757891804,
0.0002884205023292452,
0.2557261884212494
] |
{
"id": 6,
"code_window": [
"\t\t})\n",
"\t}\n",
"}\n",
"\n",
"func TestMongoDB_CreateUser(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 148
} | export { default } from 'core/helpers/is-version';
| ui/lib/core/app/helpers/is-version.js | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017054512863978744,
0.00017054512863978744,
0.00017054512863978744,
0.00017054512863978744,
0
] |
{
"id": 6,
"code_window": [
"\t\t})\n",
"\t}\n",
"}\n",
"\n",
"func TestMongoDB_CreateUser(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 148
} | ---
layout: docs
page_title: Vault CSI Provider Configurations
description: This section documents the configurables for the Vault CSI Provider.
---
# Command line arguments
The following command line arguments are supported by the Vault CSI provider.
Most settings support being set by, in ascending order of precedence:
- Environment variables
- Command line arguments
- Secret Provider Class parameters
If installing via the helm chart, they can be set using e.g.
`--set "csi.extraArgs={-debug=true}"`.
- `-debug` `(bool: false)` - Set to true to enable debug level logging.
- `-endpoint` `(string: "/tmp/vault.sock")` - Path to unix socket on which the
provider will listen for gRPC calls from the driver.
- `-health-addr` `(string: ":8080")` - (v0.3.0+) The address of the HTTP listener
for reporting health.
- `-vault-addr` `(string: "https://127.0.0.1:8200")` - (v0.3.0+) Default address
for connecting to Vault. Can also be specified via the `VAULT_ADDR` environment
variable.
- `-vault-mount` `(string: "kubernetes")` - (v0.3.0+) Default Vault mount path
for Kubernetes authentication. Can be overridden per Secret Provider Class
object.
- `-vault-namespace` `(string: "")` - (v1.1.0+) Default Vault namespace for Vault
requests. Can also be specified via the `VAULT_NAMESPACE` environment variable.
- `-vault-tls-ca-cert` `(string: "")` - (v1.1.0+) Path on disk to a single
PEM-encoded CA certificate to trust for Vault. Takes precendence over
`-vault-tls-ca-directory`. Can also be specified via the `VAULT_CACERT`
environment variable.
- `-vault-tls-ca-directory` `(string: "")` - (v1.1.0+) Path on disk to a
directory of PEM-encoded CA certificates to trust for Vault. Can also be
specified via the `VAULT_CAPATH` environment variable.
- `-vault-tls-server-name` `(string: "")` - (v1.1.0+) Name to use as the SNI
host when connecting to Vault via TLS. Can also be specified via the
`VAULT_TLS_SERVER_NAME` environment variable.
- `-vault-tls-client-cert` `(string: "")` - (v1.1.0+) Path on disk to a
PEM-encoded client certificate for mTLS communication with Vault. If set,
also requires `-vault-tls-client-key`. Can also be specified via the
`VAULT_CLIENT_CERT` environment variable.
- `-vault-tls-client-key` `(string: "")` - (v1.1.0+) Path on disk to a
PEM-encoded client key for mTLS communication with Vault. If set, also
requires `-vault-tls-client-cert`. Can also be specified via the
`VAULT_CLIENT_KEY` environment variable.
- `-vault-tls-skip-verify` `(bool: false)` - (v1.1.0+) Disable verification of
TLS certificates. Can also be specified via the `VAULT_SKIP_VERIFY` environment
variable.
- `-version` `(bool: false)` - print version information and exit.
# Secret Provider Class Parameters
The following parameters are supported by the Vault provider. Each parameter is
an entry under `spec.parameters` in a SecretProviderClass object. The full
structure is illustrated in the [examples](/docs/platform/k8s/csi/examples).
- `roleName` `(string: "")` - Name of the role to be used during login with Vault.
- `vaultAddress` `(string: "")` - The address of the Vault server.
- `vaultNamespace` `(string: "")` - The Vault [namespace](/docs/enterprise/namespaces) to use.
- `vaultSkipTLSVerify` `(string: "false")` - When set to true, skips verification of the Vault server
certificate. Setting this to true is not recommended for production.
- `vaultCACertPath` `(string: "")` - The path on disk where the Vault CA certificate can be found
when verifying the Vault server certificate.
- `vaultCADirectory` `(string: "")` - The directory on disk where the Vault CA certificate can be found
when verifying the Vault server certificate.
- `vaultTLSClientCertPath` `(string: "")` - The path on disk where the client certificate can be found
for mTLS communications with Vault.
- `vaultTLSClientKeyPath` `(string: "")` - The path on disk where the client key can be found
for mTLS communications with Vault.
- `vaultTLSServerName` `(string: "")` - The name to use as the SNI host when connecting via TLS.
- `vaultKubernetesMountPath` `(string: "kubernetes")` - The name of the auth mount used for login.
At this time only the Kubernetes auth method is supported.
- `audience` `(string: "")` - Specifies a custom audience for the requesting pod's service account token,
generated using the
[TokenRequest API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-request-v1/#TokenRequestSpec).
The resulting token is used to authenticate to Vault, so if you specify an
[audience](https://www.vaultproject.io/api-docs/auth/kubernetes#audience) for your Kubernetes auth
role, it must match the audience specified here. If not set, the token audiences will default to
the Kubernetes cluster's default API audiences.
- `objects` `(array)` - An array of secrets to retrieve from Vault.
- `objectName` `(string: "")` - The alias of the object which can be referenced within the secret provider class and
the name of the secret file.
- `method` `(string: "GET")` - The type of HTTP request. Supported values include "GET" and "PUT".
- `secretPath` `(string: "")` - The path in Vault where the secret is located.
For secrets that are retrieved via HTTP GET method, the `secretPath` can include optional URI parameters,
for example, the [version of the KV2 secret](https://www.vaultproject.io/api-docs/secret/kv/kv-v2#read-secret-version):
```yaml
objects: |
- objectName: "app-secret"
secretPath: "secret/data/test?version=1"
secretKey: "password"
```
- `secretKey` `(string: "")` - The key in the Vault secret to extract. If omitted, the whole response from Vault will be written as JSON.
- `filePermission` `(integer: 0o644)` - The file permissions to set for this secret's file.
- `secretArgs` `(map: {})` - Additional arguments to be sent to Vault for a specific secret. Arguments can vary
for different secret engines. For example:
```yaml
secretArgs:
common_name: 'test.example.com'
ttl: '24h'
```
~> `secretArgs` are sent as part of the HTTP request body. Therefore, they are only effective for HTTP PUT/POST requests, for instance,
the [request used to generate a new certificate](https://www.vaultproject.io/api-docs/secret/pki#generate-certificate).
To supply additional parameters for secrets retrieved via HTTP GET, include optional URI parameters in [`secretPath`](#secretpath).
| website/content/docs/platform/k8s/csi/configurations.mdx | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017228038632310927,
0.00016632430197205395,
0.00016204503481276333,
0.00016643204435240477,
0.000002620885652504512
] |
{
"id": 6,
"code_window": [
"\t\t})\n",
"\t}\n",
"}\n",
"\n",
"func TestMongoDB_CreateUser(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 148
} | <WizardSection
@headerText="Databases"
@headerIcon="database"
@docText="Docs: Database Secrets"
@docPath="/docs/secrets/databases/index.html"
>
<p>
The database Secrets Engine generates database credentials dynamically based on configured roles.
</p>
</WizardSection> | ui/app/templates/components/wizard/database-engine.hbs | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017930199101101607,
0.00017550484335515648,
0.0001717076956992969,
0.00017550484335515648,
0.0000037971476558595896
] |
{
"id": 7,
"code_window": [
"}\n",
"\n",
"func TestMongoDB_CreateUser_writeConcern(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tinitReq := dbplugin.InitializeRequest{\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 180
} | package mongodb
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/helper/testhelpers/certhelpers"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }`
func TestMongoDB_Initialize(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
config := map[string]interface{}{
"connection_url": connURL,
}
// Make a copy since the original map could be modified by the Initialize call
expectedConfig := copyConfig(config)
req := dbplugin.InitializeRequest{
Config: config,
VerifyConnection: true,
}
resp := dbtesting.AssertInitialize(t, db, req)
if !reflect.DeepEqual(resp.Config, expectedConfig) {
t.Fatalf("Actual config: %#v\nExpected config: %#v", resp.Config, expectedConfig)
}
if !db.Initialized {
t.Fatal("Database should be initialized")
}
}
func TestNewUser_usernameTemplate(t *testing.T) {
type testCase struct {
usernameTemplate string
newUserReq dbplugin.NewUserRequest
expectedUsernameRegex string
}
tests := map[string]testCase{
"default username template": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"default username template with invalid chars": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "a.bad.account",
RoleName: "a.bad.role",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-a-bad-account-a-bad-role-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"custom username template": {
usernameTemplate: "{{random 2 | uppercase}}_{{unix_time}}_{{.RoleName | uppercase}}_{{.DisplayName | uppercase}}",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"username_template": test.usernameTemplate,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
ctx := context.Background()
newUserResp, err := db.NewUser(ctx, test.newUserReq)
require.NoError(t, err)
require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username)
assertCredsExist(t, newUserResp.Username, test.newUserReq.Password, connURL)
})
}
}
func TestMongoDB_CreateUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_CreateUser_writeConcern(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"write_concern": `{ "wmode": "majority", "wtimeout": 5000 }`,
},
VerifyConnection: true,
}
db := new()
defer dbtesting.AssertClose(t, db)
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_DeleteUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
// Test default revocation statement
delReq := dbplugin.DeleteUserRequest{
Username: createResp.Username,
}
dbtesting.AssertDeleteUser(t, db, delReq)
assertCredsDoNotExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_UpdateUser_Password(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
// The docker test method PrepareTestContainer defaults to a database "test"
// if none is provided
connURL = connURL + "/test"
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
// create the database user in advance, and test the connection
dbUser := "testmongouser"
startingPassword := "password"
createDBUser(t, connURL, "test", dbUser, startingPassword)
newPassword := "myreallysecurecredentials"
updateReq := dbplugin.UpdateUserRequest{
Username: dbUser,
Password: &dbplugin.ChangePassword{
NewPassword: newPassword,
},
}
dbtesting.AssertUpdateUser(t, db, updateReq)
assertCredsExist(t, dbUser, newPassword, connURL)
}
func TestGetTLSAuth(t *testing.T) {
ca := certhelpers.NewCert(t,
certhelpers.CommonName("certificate authority"),
certhelpers.IsCA(true),
certhelpers.SelfSign(),
)
cert := certhelpers.NewCert(t,
certhelpers.CommonName("test cert"),
certhelpers.Parent(ca),
)
type testCase struct {
username string
tlsCAData []byte
tlsKeyData []byte
expectOpts *options.ClientOptions
expectErr bool
}
tests := map[string]testCase{
"no TLS data set": {
expectOpts: nil,
expectErr: false,
},
"bad CA": {
tlsCAData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"bad key": {
tlsKeyData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"good ca": {
tlsCAData: cert.Pem,
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
RootCAs: appendToCertPool(t, x509.NewCertPool(), cert.Pem),
},
),
expectErr: false,
},
"good key": {
username: "unittest",
tlsKeyData: cert.CombinedPEM(),
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
Certificates: []tls.Certificate{cert.TLSCert},
},
).
SetAuth(options.Credential{
AuthMechanism: "MONGODB-X509",
Username: "unittest",
}),
expectErr: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
c := new()
c.Username = test.username
c.TLSCAData = test.tlsCAData
c.TLSCertificateKeyData = test.tlsKeyData
actual, err := c.getTLSAuth()
if test.expectErr && err == nil {
t.Fatalf("err expected, got nil")
}
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
assertDeepEqual(t, test.expectOpts, actual)
})
}
}
func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.CertPool {
t.Helper()
ok := pool.AppendCertsFromPEM(caPem)
if !ok {
t.Fatalf("Unable to append cert to cert pool")
}
return pool
}
var cmpClientOptionsOpts = cmp.Options{
cmp.AllowUnexported(options.ClientOptions{}),
cmp.AllowUnexported(tls.Config{}),
cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}),
// 'lazyCerts' has a func field which can't be compared.
cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
cmp.AllowUnexported(x509.CertPool{}),
}
// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16.
// See: https://github.com/golang/go/issues/45891
func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) {
t.Helper()
if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}
func createDBUser(t testing.TB, connURL, db, username, password string) {
t.Helper()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatal(err)
}
createUserCmd := &createUserCommand{
Username: username,
Password: password,
Roles: []interface{}{},
}
result := client.Database(db).RunCommand(ctx, createUserCmd, nil)
if result.Err() != nil {
t.Fatalf("failed to create user in mongodb: %s", result.Err())
}
assertCredsExist(t, username, password, connURL)
}
func assertCredsExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatalf("Failed to connect to mongo: %s", err)
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
t.Fatalf("Failed to ping mongo with user %q: %s", username, err)
}
}
func assertCredsDoNotExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
return // Creds don't exist as expected
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
return // Creds don't exist as expected
}
t.Fatalf("User %q exists and was able to authenticate", username)
}
func copyConfig(config map[string]interface{}) map[string]interface{} {
newConfig := map[string]interface{}{}
for k, v := range config {
newConfig[k] = v
}
return newConfig
}
| plugins/database/mongodb/mongodb_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9988352656364441,
0.4570045471191406,
0.00016781890008132905,
0.02959507144987583,
0.4846658408641815
] |
{
"id": 7,
"code_window": [
"}\n",
"\n",
"func TestMongoDB_CreateUser_writeConcern(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tinitReq := dbplugin.InitializeRequest{\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 180
} | ```release-note:improvement
raft: Improve raft batch size selection
```
| changelog/11907.txt | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017205481708515435,
0.00017205481708515435,
0.00017205481708515435,
0.00017205481708515435,
0
] |
{
"id": 7,
"code_window": [
"}\n",
"\n",
"func TestMongoDB_CreateUser_writeConcern(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tinitReq := dbplugin.InitializeRequest{\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 180
} | /**
* @module OperationFieldDisplay
* OperationFieldDisplay components are used on KMIP role show pages to display the allowed operations on that model
*
* @example
* ```js
* <OperationFieldDisplay @model={{model}} />
* ```
*
* @param model {DS.Model} - model is the KMIP role model that needs to display its allowed operations
*
*/
import Component from '@ember/component';
import layout from '../templates/components/operation-field-display';
export default Component.extend({
layout,
tagName: '',
model: null,
trueOrFalseString(model, field, trueString, falseString) {
if (model.operationAll) {
return trueString;
}
if (model.operationNone) {
return falseString;
}
return model.get(field.name) ? trueString : falseString;
},
actions: {
iconClass(model, field) {
return this.trueOrFalseString(model, field, 'icon-true', 'icon-false');
},
iconGlyph(model, field) {
return this.trueOrFalseString(model, field, 'check-circle-outline', 'cancel-square-outline');
},
},
});
| ui/lib/kmip/addon/components/operation-field-display.js | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017577641119714826,
0.00017273015691898763,
0.0001707581104710698,
0.00017219306027982384,
0.000002006070417337469
] |
{
"id": 7,
"code_window": [
"}\n",
"\n",
"func TestMongoDB_CreateUser_writeConcern(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tinitReq := dbplugin.InitializeRequest{\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 180
} | ---
layout: docs
page_title: 1.6.0
description: |-
This page contains release notes for Vault 1.6.0.
---
# Vault 1.6.0
## Vault 1.6 Release Highlights
**Transform: Tokenization Tech Preview (Enterprise ADP Module Only)**: Vault 1.6 introduces a new transformation method
for tokenizing sensitive data stored in un-trusted/semi-trusted systems. Tokenization is available as part of the
“Advanced Data Protection” module in Vault Enterprise. Tokenization provides non-reversible data protection pursuant to
requirements for data irreversibility (PCI-DSS, GDPR, etc.). This feature is being released in a Tech Preview.
**Integrated Storage Enhancements**: Continuing with the enhancements made to Vault’s Integrated storage, we are adding
the following new features:
- **Cloud auto-join** provides support for Vault nodes to automatically discover and join a cluster via specified cloud
metadata. This is particularly useful when IP addresses are not static.
- **Automated snapshots** provides built-in Vault functionality for automated snapshots that takes snapshots of the
state of the Vault servers and saves them locally, or pushes them to an optional remote storage service.
**UI improvements**
- **New UI for Transform secrets engine (Enterprise ADP Module Only)**
- To configure FPE and Masking transformations, including custom alphabets and patterns for FPE.
- To create and manage roles and patterns for templated use in current and future FPE and masking transformations.
- **Improvements to Vault Usage UI** to show new metrics for “active clients”, “unique entities” and “active direct
tokens” that help with understanding Vault usage
**Support for seal migration** for all use cases to migrate from any to any unseal interface (including auto-unseal to auto-unseal of the same type)
**Key Management Secrets Engine in Tech Preview (Enterprise ADP Module Only)** - A new Key Management Secrets Engine to
help manage and securely distribute keys to various cloud KMS services. This feature is being released in Tech Preview
to be used in conjunction with Microsoft’s Azure Key Vault.
**Database secrets engine improvements**:
- Extending the newly released Password Policy to the combined Database Secrets Engine to support for all databases
- Add Couchbase support to the combined Database Secrets Engine to manage static and dynamic credentials for Couchbase
- Add static credential rotation to Cassandra, InfluxDB and Elasticsearch
- Add MongoDB Atlas root credential rotation
- Added support for root credential & static credential rotation for HanaDB
## What’s Changed
- Vault 1.6 will use Go 1.15, which has dropped support for 32-bit binaries for [Darwin](https://golang.org/doc/go1.15#darwin),
so we will no longer be issuing `darwin_386` builds of Vault.
For more detailed information, please refer to the [Changelog](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md#160).
| website/content/docs/release-notes/1.6.0.mdx | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017169100465252995,
0.00016622827388346195,
0.00016230560140684247,
0.0001655163650866598,
0.0000028382685286487686
] |
{
"id": 8,
"code_window": [
"}\n",
"\n",
"func TestMongoDB_DeleteUser(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n",
"\tdefer dbtesting.AssertClose(t, db)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 214
} | package mongodb
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/helper/testhelpers/certhelpers"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }`
func TestMongoDB_Initialize(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
config := map[string]interface{}{
"connection_url": connURL,
}
// Make a copy since the original map could be modified by the Initialize call
expectedConfig := copyConfig(config)
req := dbplugin.InitializeRequest{
Config: config,
VerifyConnection: true,
}
resp := dbtesting.AssertInitialize(t, db, req)
if !reflect.DeepEqual(resp.Config, expectedConfig) {
t.Fatalf("Actual config: %#v\nExpected config: %#v", resp.Config, expectedConfig)
}
if !db.Initialized {
t.Fatal("Database should be initialized")
}
}
func TestNewUser_usernameTemplate(t *testing.T) {
type testCase struct {
usernameTemplate string
newUserReq dbplugin.NewUserRequest
expectedUsernameRegex string
}
tests := map[string]testCase{
"default username template": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"default username template with invalid chars": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "a.bad.account",
RoleName: "a.bad.role",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-a-bad-account-a-bad-role-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"custom username template": {
usernameTemplate: "{{random 2 | uppercase}}_{{unix_time}}_{{.RoleName | uppercase}}_{{.DisplayName | uppercase}}",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"username_template": test.usernameTemplate,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
ctx := context.Background()
newUserResp, err := db.NewUser(ctx, test.newUserReq)
require.NoError(t, err)
require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username)
assertCredsExist(t, newUserResp.Username, test.newUserReq.Password, connURL)
})
}
}
func TestMongoDB_CreateUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_CreateUser_writeConcern(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"write_concern": `{ "wmode": "majority", "wtimeout": 5000 }`,
},
VerifyConnection: true,
}
db := new()
defer dbtesting.AssertClose(t, db)
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_DeleteUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
// Test default revocation statement
delReq := dbplugin.DeleteUserRequest{
Username: createResp.Username,
}
dbtesting.AssertDeleteUser(t, db, delReq)
assertCredsDoNotExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_UpdateUser_Password(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
// The docker test method PrepareTestContainer defaults to a database "test"
// if none is provided
connURL = connURL + "/test"
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
// create the database user in advance, and test the connection
dbUser := "testmongouser"
startingPassword := "password"
createDBUser(t, connURL, "test", dbUser, startingPassword)
newPassword := "myreallysecurecredentials"
updateReq := dbplugin.UpdateUserRequest{
Username: dbUser,
Password: &dbplugin.ChangePassword{
NewPassword: newPassword,
},
}
dbtesting.AssertUpdateUser(t, db, updateReq)
assertCredsExist(t, dbUser, newPassword, connURL)
}
func TestGetTLSAuth(t *testing.T) {
ca := certhelpers.NewCert(t,
certhelpers.CommonName("certificate authority"),
certhelpers.IsCA(true),
certhelpers.SelfSign(),
)
cert := certhelpers.NewCert(t,
certhelpers.CommonName("test cert"),
certhelpers.Parent(ca),
)
type testCase struct {
username string
tlsCAData []byte
tlsKeyData []byte
expectOpts *options.ClientOptions
expectErr bool
}
tests := map[string]testCase{
"no TLS data set": {
expectOpts: nil,
expectErr: false,
},
"bad CA": {
tlsCAData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"bad key": {
tlsKeyData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"good ca": {
tlsCAData: cert.Pem,
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
RootCAs: appendToCertPool(t, x509.NewCertPool(), cert.Pem),
},
),
expectErr: false,
},
"good key": {
username: "unittest",
tlsKeyData: cert.CombinedPEM(),
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
Certificates: []tls.Certificate{cert.TLSCert},
},
).
SetAuth(options.Credential{
AuthMechanism: "MONGODB-X509",
Username: "unittest",
}),
expectErr: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
c := new()
c.Username = test.username
c.TLSCAData = test.tlsCAData
c.TLSCertificateKeyData = test.tlsKeyData
actual, err := c.getTLSAuth()
if test.expectErr && err == nil {
t.Fatalf("err expected, got nil")
}
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
assertDeepEqual(t, test.expectOpts, actual)
})
}
}
func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.CertPool {
t.Helper()
ok := pool.AppendCertsFromPEM(caPem)
if !ok {
t.Fatalf("Unable to append cert to cert pool")
}
return pool
}
var cmpClientOptionsOpts = cmp.Options{
cmp.AllowUnexported(options.ClientOptions{}),
cmp.AllowUnexported(tls.Config{}),
cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}),
// 'lazyCerts' has a func field which can't be compared.
cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
cmp.AllowUnexported(x509.CertPool{}),
}
// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16.
// See: https://github.com/golang/go/issues/45891
func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) {
t.Helper()
if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}
func createDBUser(t testing.TB, connURL, db, username, password string) {
t.Helper()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatal(err)
}
createUserCmd := &createUserCommand{
Username: username,
Password: password,
Roles: []interface{}{},
}
result := client.Database(db).RunCommand(ctx, createUserCmd, nil)
if result.Err() != nil {
t.Fatalf("failed to create user in mongodb: %s", result.Err())
}
assertCredsExist(t, username, password, connURL)
}
func assertCredsExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatalf("Failed to connect to mongo: %s", err)
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
t.Fatalf("Failed to ping mongo with user %q: %s", username, err)
}
}
func assertCredsDoNotExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
return // Creds don't exist as expected
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
return // Creds don't exist as expected
}
t.Fatalf("User %q exists and was able to authenticate", username)
}
func copyConfig(config map[string]interface{}) map[string]interface{} {
newConfig := map[string]interface{}{}
for k, v := range config {
newConfig[k] = v
}
return newConfig
}
| plugins/database/mongodb/mongodb_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9985197186470032,
0.4855983853340149,
0.00016644092102069408,
0.21269501745700836,
0.4838831126689911
] |
{
"id": 8,
"code_window": [
"}\n",
"\n",
"func TestMongoDB_DeleteUser(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n",
"\tdefer dbtesting.AssertClose(t, db)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 214
} | package misc
import (
"testing"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
// Tests the regression in
// https://github.com/hashicorp/vault/pull/6920
func TestRecoverFromPanic(t *testing.T) {
logger := hclog.New(nil)
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"noop": vault.NoopBackendFactory,
},
EnableRaw: true,
Logger: logger,
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0]
vault.TestWaitActive(t, core.Core)
client := core.Client
err := client.Sys().Mount("noop", &api.MountInput{
Type: "noop",
})
if err != nil {
t.Fatal(err)
}
_, err = client.Logical().Read("noop/panic")
if err == nil {
t.Fatal("expected error")
}
// This will deadlock the test if we hit the condition
cluster.EnsureCoresSealed(t)
}
| vault/external_tests/misc/recover_from_panic_test.go | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0002224248310085386,
0.0001925680844578892,
0.00017041165847331285,
0.0001782762265065685,
0.000023627055270480923
] |
{
"id": 8,
"code_window": [
"}\n",
"\n",
"func TestMongoDB_DeleteUser(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n",
"\tdefer dbtesting.AssertClose(t, db)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 214
} | package testing
import (
"context"
"crypto/tls"
"fmt"
"os"
"reflect"
"sort"
"testing"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/sdk/physical/inmem"
"github.com/hashicorp/vault/vault"
)
// TestEnvVar must be set to a non-empty value for acceptance tests to run.
const TestEnvVar = "VAULT_ACC"
// TestCase is a single set of tests to run for a backend. A TestCase
// should generally map 1:1 to each test method for your acceptance
// tests.
type TestCase struct {
// Precheck, if non-nil, will be called once before the test case
// runs at all. This can be used for some validation prior to the
// test running.
PreCheck func()
// LogicalBackend is the backend that will be mounted.
LogicalBackend logical.Backend
// LogicalFactory can be used instead of LogicalBackend if the
// backend requires more construction
LogicalFactory logical.Factory
// CredentialBackend is the backend that will be mounted.
CredentialBackend logical.Backend
// CredentialFactory can be used instead of CredentialBackend if the
// backend requires more construction
CredentialFactory logical.Factory
// Steps are the set of operations that are run for this test case.
Steps []TestStep
// Teardown will be called before the test case is over regardless
// of if the test succeeded or failed. This should return an error
// in the case that the test can't guarantee all resources were
// properly cleaned up.
Teardown TestTeardownFunc
// AcceptanceTest, if set, the test case will be run only if
// the environment variable VAULT_ACC is set. If not this test case
// will be run as a unit test.
AcceptanceTest bool
}
// TestStep is a single step within a TestCase.
type TestStep struct {
// Operation is the operation to execute
Operation logical.Operation
// Path is the request path. The mount prefix will be automatically added.
Path string
// Arguments to pass in
Data map[string]interface{}
// Check is called after this step is executed in order to test that
// the step executed successfully. If this is not set, then the next
// step will be called
Check TestCheckFunc
// PreFlight is called directly before execution of the request, allowing
// modification of the request parameters (e.g. Path) with dynamic values.
PreFlight PreFlightFunc
// ErrorOk, if true, will let erroneous responses through to the check
ErrorOk bool
// Unauthenticated, if true, will make the request unauthenticated.
Unauthenticated bool
// RemoteAddr, if set, will set the remote addr on the request.
RemoteAddr string
// ConnState, if set, will set the tls connection state
ConnState *tls.ConnectionState
}
// TestCheckFunc is the callback used for Check in TestStep.
type TestCheckFunc func(*logical.Response) error
// PreFlightFunc is used to modify request parameters directly before execution
// in each TestStep.
type PreFlightFunc func(*logical.Request) error
// TestTeardownFunc is the callback used for Teardown in TestCase.
type TestTeardownFunc func() error
// Test performs an acceptance test on a backend with the given test case.
//
// Tests are not run unless an environmental variable "VAULT_ACC" is
// set to some non-empty value. This is to avoid test cases surprising
// a user by creating real resources.
//
// Tests will fail unless the verbose flag (`go test -v`, or explicitly
// the "-test.v" flag) is set. Because some acceptance tests take quite
// long, we require the verbose flag so users are able to see progress
// output.
func Test(tt TestT, c TestCase) {
// We only run acceptance tests if an env var is set because they're
// slow and generally require some outside configuration.
if c.AcceptanceTest && os.Getenv(TestEnvVar) == "" {
tt.Skip(fmt.Sprintf(
"Acceptance tests skipped unless env %q set",
TestEnvVar))
return
}
// We require verbose mode so that the user knows what is going on.
if c.AcceptanceTest && !testTesting && !testing.Verbose() {
tt.Fatal("Acceptance tests must be run with the -v flag on tests")
return
}
// Run the PreCheck if we have it
if c.PreCheck != nil {
c.PreCheck()
}
// Defer on the teardown, regardless of pass/fail at this point
if c.Teardown != nil {
defer c.Teardown()
}
// Check that something is provided
if c.LogicalBackend == nil && c.LogicalFactory == nil {
if c.CredentialBackend == nil && c.CredentialFactory == nil {
tt.Fatal("Must provide either Backend or Factory")
return
}
}
// We currently only support doing one logical OR one credential test at a time.
if (c.LogicalFactory != nil || c.LogicalBackend != nil) && (c.CredentialFactory != nil || c.CredentialBackend != nil) {
tt.Fatal("Must provide only one backend or factory")
return
}
// Create an in-memory Vault core
logger := logging.NewVaultLogger(log.Trace)
phys, err := inmem.NewInmem(nil, logger)
if err != nil {
tt.Fatal(err)
return
}
config := &vault.CoreConfig{
Physical: phys,
DisableMlock: true,
BuiltinRegistry: vault.NewMockBuiltinRegistry(),
}
if c.LogicalBackend != nil || c.LogicalFactory != nil {
config.LogicalBackends = map[string]logical.Factory{
"test": func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
if c.LogicalBackend != nil {
return c.LogicalBackend, nil
}
return c.LogicalFactory(ctx, conf)
},
}
}
if c.CredentialBackend != nil || c.CredentialFactory != nil {
config.CredentialBackends = map[string]logical.Factory{
"test": func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
if c.CredentialBackend != nil {
return c.CredentialBackend, nil
}
return c.CredentialFactory(ctx, conf)
},
}
}
core, err := vault.NewCore(config)
if err != nil {
tt.Fatal("error initializing core: ", err)
return
}
// Initialize the core
init, err := core.Initialize(context.Background(), &vault.InitParams{
BarrierConfig: &vault.SealConfig{
SecretShares: 1,
SecretThreshold: 1,
},
RecoveryConfig: nil,
})
if err != nil {
tt.Fatal("error initializing core: ", err)
return
}
// Unseal the core
if unsealed, err := core.Unseal(init.SecretShares[0]); err != nil {
tt.Fatal("error unsealing core: ", err)
return
} else if !unsealed {
tt.Fatal("vault shouldn't be sealed")
return
}
// Create an HTTP API server and client
ln, addr := http.TestServer(nil, core)
defer ln.Close()
clientConfig := api.DefaultConfig()
clientConfig.Address = addr
client, err := api.NewClient(clientConfig)
if err != nil {
tt.Fatal("error initializing HTTP client: ", err)
return
}
// Set the token so we're authenticated
client.SetToken(init.RootToken)
prefix := "mnt"
if c.LogicalBackend != nil || c.LogicalFactory != nil {
// Mount the backend
mountInfo := &api.MountInput{
Type: "test",
Description: "acceptance test",
}
if err := client.Sys().Mount(prefix, mountInfo); err != nil {
tt.Fatal("error mounting backend: ", err)
return
}
}
isAuthBackend := false
if c.CredentialBackend != nil || c.CredentialFactory != nil {
isAuthBackend = true
// Enable the test auth method
opts := &api.EnableAuthOptions{
Type: "test",
}
if err := client.Sys().EnableAuthWithOptions(prefix, opts); err != nil {
tt.Fatal("error enabling backend: ", err)
return
}
}
tokenInfo, err := client.Auth().Token().LookupSelf()
if err != nil {
tt.Fatal("error looking up token: ", err)
return
}
var tokenPolicies []string
if tokenPoliciesRaw, ok := tokenInfo.Data["policies"]; ok {
if tokenPoliciesSliceRaw, ok := tokenPoliciesRaw.([]interface{}); ok {
for _, p := range tokenPoliciesSliceRaw {
tokenPolicies = append(tokenPolicies, p.(string))
}
}
}
// Make requests
var revoke []*logical.Request
for i, s := range c.Steps {
if logger.IsWarn() {
logger.Warn("Executing test step", "step_number", i+1)
}
// Create the request
req := &logical.Request{
Operation: s.Operation,
Path: s.Path,
Data: s.Data,
}
if !s.Unauthenticated {
req.ClientToken = client.Token()
req.SetTokenEntry(&logical.TokenEntry{
ID: req.ClientToken,
NamespaceID: namespace.RootNamespaceID,
Policies: tokenPolicies,
DisplayName: tokenInfo.Data["display_name"].(string),
})
}
req.Connection = &logical.Connection{RemoteAddr: s.RemoteAddr}
if s.ConnState != nil {
req.Connection.ConnState = s.ConnState
}
if s.PreFlight != nil {
ct := req.ClientToken
req.ClientToken = ""
if err := s.PreFlight(req); err != nil {
tt.Error(fmt.Sprintf("Failed preflight for step %d: %s", i+1, err))
break
}
req.ClientToken = ct
}
// Make sure to prefix the path with where we mounted the thing
req.Path = fmt.Sprintf("%s/%s", prefix, req.Path)
if isAuthBackend {
// Prepend the path with "auth"
req.Path = "auth/" + req.Path
}
// Make the request
resp, err := core.HandleRequest(namespace.RootContext(nil), req)
if resp != nil && resp.Secret != nil {
// Revoke this secret later
revoke = append(revoke, &logical.Request{
Operation: logical.UpdateOperation,
Path: "sys/revoke/" + resp.Secret.LeaseID,
})
}
// Test step returned an error.
if err != nil {
// But if an error is expected, do not fail the test step,
// regardless of whether the error is a 'logical.ErrorResponse'
// or not. Set the err to nil. If the error is a logical.ErrorResponse,
// it will be handled later.
if s.ErrorOk {
err = nil
} else {
// If the error is not expected, fail right away.
tt.Error(fmt.Sprintf("Failed step %d: %s", i+1, err))
break
}
}
// If the error is a 'logical.ErrorResponse' and if error was not expected,
// set the error so that this can be caught below.
if resp.IsError() && !s.ErrorOk {
err = fmt.Errorf("erroneous response:\n\n%#v", resp)
}
// Either the 'err' was nil or if an error was expected, it was set to nil.
// Call the 'Check' function if there is one.
//
// TODO: This works perfectly for now, but it would be better if 'Check'
// function takes in both the response object and the error, and decide on
// the action on its own.
if err == nil && s.Check != nil {
// Call the test method
err = s.Check(resp)
}
if err != nil {
tt.Error(fmt.Sprintf("Failed step %d: %s", i+1, err))
break
}
}
// Revoke any secrets we might have.
var failedRevokes []*logical.Secret
for _, req := range revoke {
if logger.IsWarn() {
logger.Warn("Revoking secret", "secret", fmt.Sprintf("%#v", req))
}
req.ClientToken = client.Token()
resp, err := core.HandleRequest(namespace.RootContext(nil), req)
if err == nil && resp.IsError() {
err = fmt.Errorf("erroneous response:\n\n%#v", resp)
}
if err != nil {
failedRevokes = append(failedRevokes, req.Secret)
tt.Error(fmt.Sprintf("Revoke error: %s", err))
}
}
// Perform any rollbacks. This should no-op if there aren't any.
// We set the "immediate" flag here that any backend can pick up on
// to do all rollbacks immediately even if the WAL entries are new.
logger.Warn("Requesting RollbackOperation")
rollbackPath := prefix + "/"
if c.CredentialFactory != nil || c.CredentialBackend != nil {
rollbackPath = "auth/" + rollbackPath
}
req := logical.RollbackRequest(rollbackPath)
req.Data["immediate"] = true
req.ClientToken = client.Token()
resp, err := core.HandleRequest(namespace.RootContext(nil), req)
if err == nil && resp.IsError() {
err = fmt.Errorf("erroneous response:\n\n%#v", resp)
}
if err != nil {
if !errwrap.Contains(err, logical.ErrUnsupportedOperation.Error()) {
tt.Error(fmt.Sprintf("[ERR] Rollback error: %s", err))
}
}
// If we have any failed revokes, log it.
if len(failedRevokes) > 0 {
for _, s := range failedRevokes {
tt.Error(fmt.Sprintf(
"WARNING: Revoking the following secret failed. It may\n"+
"still exist. Please verify:\n\n%#v",
s))
}
}
}
// TestCheckMulti is a helper to have multiple checks.
func TestCheckMulti(fs ...TestCheckFunc) TestCheckFunc {
return func(resp *logical.Response) error {
for _, f := range fs {
if err := f(resp); err != nil {
return err
}
}
return nil
}
}
// TestCheckAuth is a helper to check that a request generated an
// auth token with the proper policies.
func TestCheckAuth(policies []string) TestCheckFunc {
return func(resp *logical.Response) error {
if resp == nil || resp.Auth == nil {
return fmt.Errorf("no auth in response")
}
expected := make([]string, len(policies))
copy(expected, policies)
sort.Strings(expected)
ret := make([]string, len(resp.Auth.Policies))
copy(ret, resp.Auth.Policies)
sort.Strings(ret)
if !reflect.DeepEqual(ret, expected) {
return fmt.Errorf("invalid policies: expected %#v, got %#v", expected, ret)
}
return nil
}
}
// TestCheckAuthEntityId is a helper to check that a request generated an
// auth token with the expected entity_id.
func TestCheckAuthEntityId(entity_id *string) TestCheckFunc {
return func(resp *logical.Response) error {
if resp == nil || resp.Auth == nil {
return fmt.Errorf("no auth in response")
}
if *entity_id == "" {
// If we don't know what the entity_id should be, just save it
*entity_id = resp.Auth.EntityID
} else if resp.Auth.EntityID != *entity_id {
return fmt.Errorf("entity_id %s does not match the expected value of %s", resp.Auth.EntityID, *entity_id)
}
return nil
}
}
// TestCheckAuthEntityAliasMetadataName is a helper to check that a request generated an
// auth token with the expected alias metadata.
func TestCheckAuthEntityAliasMetadataName(key string, value string) TestCheckFunc {
return func(resp *logical.Response) error {
if resp == nil || resp.Auth == nil {
return fmt.Errorf("no auth in response")
}
if key == "" || value == "" {
return fmt.Errorf("alias metadata key and value required")
}
name, ok := resp.Auth.Alias.Metadata[key]
if !ok {
return fmt.Errorf("metadata key %s does not exist, it should", key)
}
if name != value {
return fmt.Errorf("expected map value %s, got %s", value, name)
}
return nil
}
}
// TestCheckAuthDisplayName is a helper to check that a request generated a
// valid display name.
func TestCheckAuthDisplayName(n string) TestCheckFunc {
return func(resp *logical.Response) error {
if resp.Auth == nil {
return fmt.Errorf("no auth in response")
}
if n != "" && resp.Auth.DisplayName != "mnt-"+n {
return fmt.Errorf("invalid display name: %#v", resp.Auth.DisplayName)
}
return nil
}
}
// TestCheckError is a helper to check that a response is an error.
func TestCheckError() TestCheckFunc {
return func(resp *logical.Response) error {
if !resp.IsError() {
return fmt.Errorf("response should be error")
}
return nil
}
}
// TestT is the interface used to handle the test lifecycle of a test.
//
// Users should just use a *testing.T object, which implements this.
type TestT interface {
Error(args ...interface{})
Fatal(args ...interface{})
Skip(args ...interface{})
}
var testTesting = false
| helper/testhelpers/logical/testing.go | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.0037471060641109943,
0.0002537488180678338,
0.00016551588487345725,
0.0001709404750727117,
0.00048532828805036843
] |
{
"id": 8,
"code_window": [
"}\n",
"\n",
"func TestMongoDB_DeleteUser(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\tdb := new()\n",
"\tdefer dbtesting.AssertClose(t, db)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 214
} | rules:
- id: hmac-needs-new
patterns:
- pattern-either:
- pattern: |
$H := $HASH.New()
...
$FUNC := func() hash.Hash { return $H }
...
hmac.New($FUNC, ...)
- pattern: |
$H := $HASH.New()
...
hmac.New(func() hash.Hash { return $H }, ...)
- pattern: |
hmac.New(func() hash.Hash { return ( $H : hash.Hash) }, ...)
message: "calling hmac.New with unchanging hash.New"
languages: [go]
severity: ERROR
| tools/semgrep/ci/hmac-hash.yml | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017372063302900642,
0.00017102005949709564,
0.00016858972958289087,
0.00017074980132747442,
0.000002103381575579988
] |
{
"id": 9,
"code_window": [
"\n",
"\tassertCredsDoNotExist(t, createResp.Username, password, connURL)\n",
"}\n",
"\n",
"func TestMongoDB_UpdateUser_Password(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\t// The docker test method PrepareTestContainer defaults to a database \"test\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 254
} | package mongodb
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/helper/testhelpers/certhelpers"
"github.com/hashicorp/vault/helper/testhelpers/mongodb"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }`
func TestMongoDB_Initialize(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
config := map[string]interface{}{
"connection_url": connURL,
}
// Make a copy since the original map could be modified by the Initialize call
expectedConfig := copyConfig(config)
req := dbplugin.InitializeRequest{
Config: config,
VerifyConnection: true,
}
resp := dbtesting.AssertInitialize(t, db, req)
if !reflect.DeepEqual(resp.Config, expectedConfig) {
t.Fatalf("Actual config: %#v\nExpected config: %#v", resp.Config, expectedConfig)
}
if !db.Initialized {
t.Fatal("Database should be initialized")
}
}
func TestNewUser_usernameTemplate(t *testing.T) {
type testCase struct {
usernameTemplate string
newUserReq dbplugin.NewUserRequest
expectedUsernameRegex string
}
tests := map[string]testCase{
"default username template": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"default username template with invalid chars": {
usernameTemplate: "",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "a.bad.account",
RoleName: "a.bad.role",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^v-a-bad-account-a-bad-role-[a-zA-Z0-9]{20}-[0-9]{10}$",
},
"custom username template": {
usernameTemplate: "{{random 2 | uppercase}}_{{unix_time}}_{{.RoleName | uppercase}}_{{.DisplayName | uppercase}}",
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "testrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: "98yq3thgnakjsfhjkl",
Expiration: time.Now().Add(time.Minute),
},
expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"username_template": test.usernameTemplate,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
ctx := context.Background()
newUserResp, err := db.NewUser(ctx, test.newUserReq)
require.NoError(t, err)
require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username)
assertCredsExist(t, newUserResp.Username, test.newUserReq.Password, connURL)
})
}
}
func TestMongoDB_CreateUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_CreateUser_writeConcern(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
"write_concern": `{ "wmode": "majority", "wtimeout": 5000 }`,
},
VerifyConnection: true,
}
db := new()
defer dbtesting.AssertClose(t, db)
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_DeleteUser(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{mongoAdminRole},
},
Password: password,
Expiration: time.Now().Add(time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCredsExist(t, createResp.Username, password, connURL)
// Test default revocation statement
delReq := dbplugin.DeleteUserRequest{
Username: createResp.Username,
}
dbtesting.AssertDeleteUser(t, db, delReq)
assertCredsDoNotExist(t, createResp.Username, password, connURL)
}
func TestMongoDB_UpdateUser_Password(t *testing.T) {
cleanup, connURL := mongodb.PrepareTestContainer(t, "latest")
defer cleanup()
// The docker test method PrepareTestContainer defaults to a database "test"
// if none is provided
connURL = connURL + "/test"
db := new()
defer dbtesting.AssertClose(t, db)
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"connection_url": connURL,
},
VerifyConnection: true,
}
dbtesting.AssertInitialize(t, db, initReq)
// create the database user in advance, and test the connection
dbUser := "testmongouser"
startingPassword := "password"
createDBUser(t, connURL, "test", dbUser, startingPassword)
newPassword := "myreallysecurecredentials"
updateReq := dbplugin.UpdateUserRequest{
Username: dbUser,
Password: &dbplugin.ChangePassword{
NewPassword: newPassword,
},
}
dbtesting.AssertUpdateUser(t, db, updateReq)
assertCredsExist(t, dbUser, newPassword, connURL)
}
func TestGetTLSAuth(t *testing.T) {
ca := certhelpers.NewCert(t,
certhelpers.CommonName("certificate authority"),
certhelpers.IsCA(true),
certhelpers.SelfSign(),
)
cert := certhelpers.NewCert(t,
certhelpers.CommonName("test cert"),
certhelpers.Parent(ca),
)
type testCase struct {
username string
tlsCAData []byte
tlsKeyData []byte
expectOpts *options.ClientOptions
expectErr bool
}
tests := map[string]testCase{
"no TLS data set": {
expectOpts: nil,
expectErr: false,
},
"bad CA": {
tlsCAData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"bad key": {
tlsKeyData: []byte("foobar"),
expectOpts: nil,
expectErr: true,
},
"good ca": {
tlsCAData: cert.Pem,
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
RootCAs: appendToCertPool(t, x509.NewCertPool(), cert.Pem),
},
),
expectErr: false,
},
"good key": {
username: "unittest",
tlsKeyData: cert.CombinedPEM(),
expectOpts: options.Client().
SetTLSConfig(
&tls.Config{
Certificates: []tls.Certificate{cert.TLSCert},
},
).
SetAuth(options.Credential{
AuthMechanism: "MONGODB-X509",
Username: "unittest",
}),
expectErr: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
c := new()
c.Username = test.username
c.TLSCAData = test.tlsCAData
c.TLSCertificateKeyData = test.tlsKeyData
actual, err := c.getTLSAuth()
if test.expectErr && err == nil {
t.Fatalf("err expected, got nil")
}
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
assertDeepEqual(t, test.expectOpts, actual)
})
}
}
func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.CertPool {
t.Helper()
ok := pool.AppendCertsFromPEM(caPem)
if !ok {
t.Fatalf("Unable to append cert to cert pool")
}
return pool
}
var cmpClientOptionsOpts = cmp.Options{
cmp.AllowUnexported(options.ClientOptions{}),
cmp.AllowUnexported(tls.Config{}),
cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}),
// 'lazyCerts' has a func field which can't be compared.
cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
cmp.AllowUnexported(x509.CertPool{}),
}
// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16.
// See: https://github.com/golang/go/issues/45891
func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) {
t.Helper()
if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}
func createDBUser(t testing.TB, connURL, db, username, password string) {
t.Helper()
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatal(err)
}
createUserCmd := &createUserCommand{
Username: username,
Password: password,
Roles: []interface{}{},
}
result := client.Database(db).RunCommand(ctx, createUserCmd, nil)
if result.Err() != nil {
t.Fatalf("failed to create user in mongodb: %s", result.Err())
}
assertCredsExist(t, username, password, connURL)
}
func assertCredsExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
t.Fatalf("Failed to connect to mongo: %s", err)
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
t.Fatalf("Failed to ping mongo with user %q: %s", username, err)
}
}
func assertCredsDoNotExist(t testing.TB, username, password, connURL string) {
t.Helper()
connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL))
if err != nil {
return // Creds don't exist as expected
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
return // Creds don't exist as expected
}
t.Fatalf("User %q exists and was able to authenticate", username)
}
func copyConfig(config map[string]interface{}) map[string]interface{} {
newConfig := map[string]interface{}{}
for k, v := range config {
newConfig[k] = v
}
return newConfig
}
| plugins/database/mongodb/mongodb_test.go | 1 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.9978243112564087,
0.31378620862960815,
0.00016612412582617253,
0.0018875414971262217,
0.4378969371318817
] |
{
"id": 9,
"code_window": [
"\n",
"\tassertCredsDoNotExist(t, createResp.Username, password, connURL)\n",
"}\n",
"\n",
"func TestMongoDB_UpdateUser_Password(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\t// The docker test method PrepareTestContainer defaults to a database \"test\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 254
} | ```release-note:improvement
secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs
```
| changelog/13927.txt | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00016887224046513438,
0.00016887224046513438,
0.00016887224046513438,
0.00016887224046513438,
0
] |
{
"id": 9,
"code_window": [
"\n",
"\tassertCredsDoNotExist(t, createResp.Username, password, connURL)\n",
"}\n",
"\n",
"func TestMongoDB_UpdateUser_Password(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\t// The docker test method PrepareTestContainer defaults to a database \"test\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 254
} | import Component from '@glimmer/component';
import { tracked } from '@glimmer/tracking';
/**
* @module HoverCopyButton
* The `HoverCopyButton` is used on dark backgrounds to show a copy button.
*
* @example ```js
* <HoverCopyButton @copyValue={{stringify this.model.id}} @alwaysShow={{true}} />```
*
* @param {string} copyValue - The value to be copied.
* @param {boolean} [alwaysShow] - Boolean that affects the class.
*/
export default class HoverCopyButton extends Component {
get alwaysShow() {
return this.args.alwaysShow || false;
}
get copyValue() {
return this.args.copyValue || false;
}
@tracked tooltipText = 'Copy';
}
| ui/app/components/hover-copy-button.js | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017722779011819512,
0.00017534730432089418,
0.0001733232056722045,
0.00017549088806845248,
0.0000015972703977240599
] |
{
"id": 9,
"code_window": [
"\n",
"\tassertCredsDoNotExist(t, createResp.Username, password, connURL)\n",
"}\n",
"\n",
"func TestMongoDB_UpdateUser_Password(t *testing.T) {\n",
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"latest\")\n",
"\tdefer cleanup()\n",
"\n",
"\t// The docker test method PrepareTestContainer defaults to a database \"test\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcleanup, connURL := mongodb.PrepareTestContainer(t, \"5.0.10\")\n"
],
"file_path": "plugins/database/mongodb/mongodb_test.go",
"type": "replace",
"edit_start_line_idx": 254
} | ```release-note:change
storage/etcd: Remove support for v2.
```
| changelog/14193.txt | 0 | https://github.com/hashicorp/vault/commit/247a019be0ace89bfa3cdc54c0294829bf390ef0 | [
0.00017076462972909212,
0.00017076462972909212,
0.00017076462972909212,
0.00017076462972909212,
0
] |
{
"id": 0,
"code_window": [
"\tif v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {\n",
"\t\treturn ErrKnownBlock\n",
"\t}\n",
"\tif !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\tif !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\t\treturn consensus.ErrUnknownAncestor\n",
"\t\t}\n",
"\t\treturn consensus.ErrPrunedAncestor\n",
"\t}\n",
"\t// Header validity is known at this point, check the uncles and transactions\n",
"\theader := block.Header()\n",
"\tif err := v.engine.VerifyUncles(v.bc, block); err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "core/block_validator.go",
"type": "replace",
"edit_start_line_idx": 55
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"fmt"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
// BlockValidator is responsible for validating block headers, uncles and
// processed state.
//
// BlockValidator implements Validator.
type BlockValidator struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
engine consensus.Engine // Consensus engine used for validating
}
// NewBlockValidator returns a new block validator which is safe for re-use
func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator {
validator := &BlockValidator{
config: config,
engine: engine,
bc: blockchain,
}
return validator
}
// ValidateBody validates the given block's uncles and verifies the block
// header's transaction and uncle roots. The headers are assumed to be already
// validated at this point.
func (v *BlockValidator) ValidateBody(block *types.Block) error {
// Check whether the block's known, and if not, that it's linkable
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
return ErrKnownBlock
}
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
}
return consensus.ErrPrunedAncestor
}
// Header validity is known at this point, check the uncles and transactions
header := block.Header()
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err
}
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
}
if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
return nil
}
// ValidateState validates the various changes that happen after a state
// transition, such as amount of used gas, the receipt roots and the state root
// itself. ValidateState returns a database batch if the validation was a success
// otherwise nil and an error is returned.
func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error {
header := block.Header()
if block.GasUsed() != usedGas {
return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)
}
// Validate the received block's bloom with the one derived from the generated receipts.
// For valid blocks this should always validate to true.
rbloom := types.CreateBloom(receipts)
if rbloom != header.Bloom {
return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
}
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]]))
receiptSha := types.DeriveSha(receipts)
if receiptSha != header.ReceiptHash {
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
}
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root)
}
return nil
}
// CalcGasLimit computes the gas limit of the next block after parent. It aims
// to keep the baseline gas above the provided floor, and increase it towards the
// ceil if the blocks are full. If the ceil is exceeded, it will always decrease
// the gas allowance.
func CalcGasLimit(parent *types.Block, gasFloor, gasCeil uint64) uint64 {
// contrib = (parentGasUsed * 3 / 2) / 1024
contrib := (parent.GasUsed() + parent.GasUsed()/2) / params.GasLimitBoundDivisor
// decay = parentGasLimit / 1024 -1
decay := parent.GasLimit()/params.GasLimitBoundDivisor - 1
/*
strategy: gasLimit of block-to-mine is set based on parent's
gasUsed value. if parentGasUsed > parentGasLimit * (2/3) then we
increase it, otherwise lower it (or leave it unchanged if it's right
at that usage) the amount increased/decreased depends on how far away
from parentGasLimit * (2/3) parentGasUsed is.
*/
limit := parent.GasLimit() - decay + contrib
if limit < params.MinGasLimit {
limit = params.MinGasLimit
}
// If we're outside our allowed gas range, we try to hone towards them
if limit < gasFloor {
limit = parent.GasLimit() + decay
if limit > gasFloor {
limit = gasFloor
}
} else if limit > gasCeil {
limit = parent.GasLimit() - decay
if limit < gasCeil {
limit = gasCeil
}
}
return limit
}
| core/block_validator.go | 1 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.9977948665618896,
0.13610422611236572,
0.00017259294691029936,
0.00021044386085122824,
0.3209884464740753
] |
{
"id": 0,
"code_window": [
"\tif v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {\n",
"\t\treturn ErrKnownBlock\n",
"\t}\n",
"\tif !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\tif !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\t\treturn consensus.ErrUnknownAncestor\n",
"\t\t}\n",
"\t\treturn consensus.ErrPrunedAncestor\n",
"\t}\n",
"\t// Header validity is known at this point, check the uncles and transactions\n",
"\theader := block.Header()\n",
"\tif err := v.engine.VerifyUncles(v.bc, block); err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "core/block_validator.go",
"type": "replace",
"edit_start_line_idx": 55
} | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm
import (
"testing"
"github.com/ethereum/go-ethereum/crypto"
)
func TestJumpDestAnalysis(t *testing.T) {
tests := []struct {
code []byte
exp byte
which int
}{
{[]byte{byte(PUSH1), 0x01, 0x01, 0x01}, 0x40, 0},
{[]byte{byte(PUSH1), byte(PUSH1), byte(PUSH1), byte(PUSH1)}, 0x50, 0},
{[]byte{byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), 0x01, 0x01, 0x01}, 0x7F, 0},
{[]byte{byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x80, 1},
{[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), byte(PUSH2), byte(PUSH2), 0x01, 0x01, 0x01}, 0x03, 0},
{[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), 0x01, 0x01, 0x01, 0x01, 0x01}, 0x00, 1},
{[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x74, 0},
{[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x00, 1},
{[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x3F, 0},
{[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0xC0, 1},
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x7F, 0},
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0xFF, 1},
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x80, 2},
{[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0x7f, 0},
{[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0xA0, 1},
{[]byte{byte(PUSH32)}, 0x7F, 0},
{[]byte{byte(PUSH32)}, 0xFF, 1},
{[]byte{byte(PUSH32)}, 0xFF, 2},
}
for _, test := range tests {
ret := codeBitmap(test.code)
if ret[test.which] != test.exp {
t.Fatalf("expected %x, got %02x", test.exp, ret[test.which])
}
}
}
func BenchmarkJumpdestAnalysis_1200k(bench *testing.B) {
// 1.4 ms
code := make([]byte, 1200000)
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
codeBitmap(code)
}
bench.StopTimer()
}
func BenchmarkJumpdestHashing_1200k(bench *testing.B) {
// 4 ms
code := make([]byte, 1200000)
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
crypto.Keccak256Hash(code)
}
bench.StopTimer()
}
| core/vm/analysis_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.0001789424568414688,
0.0001740817679092288,
0.00016788308857940137,
0.00017543429567012936,
0.0000036271151202527108
] |
{
"id": 0,
"code_window": [
"\tif v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {\n",
"\t\treturn ErrKnownBlock\n",
"\t}\n",
"\tif !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\tif !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\t\treturn consensus.ErrUnknownAncestor\n",
"\t\t}\n",
"\t\treturn consensus.ErrPrunedAncestor\n",
"\t}\n",
"\t// Header validity is known at this point, check the uncles and transactions\n",
"\theader := block.Header()\n",
"\tif err := v.engine.VerifyUncles(v.bc, block); err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "core/block_validator.go",
"type": "replace",
"edit_start_line_idx": 55
} | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package packet implements parsing and serialization of OpenPGP packets, as
// specified in RFC 4880.
package packet // import "golang.org/x/crypto/openpgp/packet"
import (
"bufio"
"crypto/aes"
"crypto/cipher"
"crypto/des"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/openpgp/errors"
"io"
"math/big"
)
// readFull is the same as io.ReadFull except that reading zero bytes returns
// ErrUnexpectedEOF rather than EOF.
func readFull(r io.Reader, buf []byte) (n int, err error) {
n, err = io.ReadFull(r, buf)
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
func readLength(r io.Reader) (length int64, isPartial bool, err error) {
var buf [4]byte
_, err = readFull(r, buf[:1])
if err != nil {
return
}
switch {
case buf[0] < 192:
length = int64(buf[0])
case buf[0] < 224:
length = int64(buf[0]-192) << 8
_, err = readFull(r, buf[0:1])
if err != nil {
return
}
length += int64(buf[0]) + 192
case buf[0] < 255:
length = int64(1) << (buf[0] & 0x1f)
isPartial = true
default:
_, err = readFull(r, buf[0:4])
if err != nil {
return
}
length = int64(buf[0])<<24 |
int64(buf[1])<<16 |
int64(buf[2])<<8 |
int64(buf[3])
}
return
}
// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
// The continuation lengths are parsed and removed from the stream and EOF is
// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
type partialLengthReader struct {
r io.Reader
remaining int64
isPartial bool
}
func (r *partialLengthReader) Read(p []byte) (n int, err error) {
for r.remaining == 0 {
if !r.isPartial {
return 0, io.EOF
}
r.remaining, r.isPartial, err = readLength(r.r)
if err != nil {
return 0, err
}
}
toRead := int64(len(p))
if toRead > r.remaining {
toRead = r.remaining
}
n, err = r.r.Read(p[:int(toRead)])
r.remaining -= int64(n)
if n < int(toRead) && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
// See RFC 4880, section 4.2.2.4.
type partialLengthWriter struct {
w io.WriteCloser
lengthByte [1]byte
}
func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
for len(p) > 0 {
for power := uint(14); power < 32; power-- {
l := 1 << power
if len(p) >= l {
w.lengthByte[0] = 224 + uint8(power)
_, err = w.w.Write(w.lengthByte[:])
if err != nil {
return
}
var m int
m, err = w.w.Write(p[:l])
n += m
if err != nil {
return
}
p = p[l:]
break
}
}
}
return
}
func (w *partialLengthWriter) Close() error {
w.lengthByte[0] = 0
_, err := w.w.Write(w.lengthByte[:])
if err != nil {
return err
}
return w.w.Close()
}
// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
// underlying Reader returns EOF before the limit has been reached.
type spanReader struct {
r io.Reader
n int64
}
func (l *spanReader) Read(p []byte) (n int, err error) {
if l.n <= 0 {
return 0, io.EOF
}
if int64(len(p)) > l.n {
p = p[0:l.n]
}
n, err = l.r.Read(p)
l.n -= int64(n)
if l.n > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// readHeader parses a packet header and returns an io.Reader which will return
// the contents of the packet. See RFC 4880, section 4.2.
func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
var buf [4]byte
_, err = io.ReadFull(r, buf[:1])
if err != nil {
return
}
if buf[0]&0x80 == 0 {
err = errors.StructuralError("tag byte does not have MSB set")
return
}
if buf[0]&0x40 == 0 {
// Old format packet
tag = packetType((buf[0] & 0x3f) >> 2)
lengthType := buf[0] & 3
if lengthType == 3 {
length = -1
contents = r
return
}
lengthBytes := 1 << lengthType
_, err = readFull(r, buf[0:lengthBytes])
if err != nil {
return
}
for i := 0; i < lengthBytes; i++ {
length <<= 8
length |= int64(buf[i])
}
contents = &spanReader{r, length}
return
}
// New format packet
tag = packetType(buf[0] & 0x3f)
length, isPartial, err := readLength(r)
if err != nil {
return
}
if isPartial {
contents = &partialLengthReader{
remaining: length,
isPartial: true,
r: r,
}
length = -1
} else {
contents = &spanReader{r, length}
}
return
}
// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
// 4.2.
func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
var buf [6]byte
var n int
buf[0] = 0x80 | 0x40 | byte(ptype)
if length < 192 {
buf[1] = byte(length)
n = 2
} else if length < 8384 {
length -= 192
buf[1] = 192 + byte(length>>8)
buf[2] = byte(length)
n = 3
} else {
buf[1] = 255
buf[2] = byte(length >> 24)
buf[3] = byte(length >> 16)
buf[4] = byte(length >> 8)
buf[5] = byte(length)
n = 6
}
_, err = w.Write(buf[:n])
return
}
// serializeStreamHeader writes an OpenPGP packet header to w where the
// length of the packet is unknown. It returns a io.WriteCloser which can be
// used to write the contents of the packet. See RFC 4880, section 4.2.
func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
var buf [1]byte
buf[0] = 0x80 | 0x40 | byte(ptype)
_, err = w.Write(buf[:])
if err != nil {
return
}
out = &partialLengthWriter{w: w}
return
}
// Packet represents an OpenPGP packet. Users are expected to try casting
// instances of this interface to specific packet types.
type Packet interface {
parse(io.Reader) error
}
// consumeAll reads from the given Reader until error, returning the number of
// bytes read.
func consumeAll(r io.Reader) (n int64, err error) {
var m int
var buf [1024]byte
for {
m, err = r.Read(buf[:])
n += int64(m)
if err == io.EOF {
err = nil
return
}
if err != nil {
return
}
}
}
// packetType represents the numeric ids of the different OpenPGP packet types. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
type packetType uint8
const (
packetTypeEncryptedKey packetType = 1
packetTypeSignature packetType = 2
packetTypeSymmetricKeyEncrypted packetType = 3
packetTypeOnePassSignature packetType = 4
packetTypePrivateKey packetType = 5
packetTypePublicKey packetType = 6
packetTypePrivateSubkey packetType = 7
packetTypeCompressed packetType = 8
packetTypeSymmetricallyEncrypted packetType = 9
packetTypeLiteralData packetType = 11
packetTypeUserId packetType = 13
packetTypePublicSubkey packetType = 14
packetTypeUserAttribute packetType = 17
packetTypeSymmetricallyEncryptedMDC packetType = 18
)
// peekVersion detects the version of a public key packet about to
// be read. A bufio.Reader at the original position of the io.Reader
// is returned.
func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) {
bufr = bufio.NewReader(r)
var verBuf []byte
if verBuf, err = bufr.Peek(1); err != nil {
return
}
ver = verBuf[0]
return
}
// Read reads a single OpenPGP packet from the given io.Reader. If there is an
// error parsing a packet, the whole packet is consumed from the input.
func Read(r io.Reader) (p Packet, err error) {
tag, _, contents, err := readHeader(r)
if err != nil {
return
}
switch tag {
case packetTypeEncryptedKey:
p = new(EncryptedKey)
case packetTypeSignature:
var version byte
// Detect signature version
if contents, version, err = peekVersion(contents); err != nil {
return
}
if version < 4 {
p = new(SignatureV3)
} else {
p = new(Signature)
}
case packetTypeSymmetricKeyEncrypted:
p = new(SymmetricKeyEncrypted)
case packetTypeOnePassSignature:
p = new(OnePassSignature)
case packetTypePrivateKey, packetTypePrivateSubkey:
pk := new(PrivateKey)
if tag == packetTypePrivateSubkey {
pk.IsSubkey = true
}
p = pk
case packetTypePublicKey, packetTypePublicSubkey:
var version byte
if contents, version, err = peekVersion(contents); err != nil {
return
}
isSubkey := tag == packetTypePublicSubkey
if version < 4 {
p = &PublicKeyV3{IsSubkey: isSubkey}
} else {
p = &PublicKey{IsSubkey: isSubkey}
}
case packetTypeCompressed:
p = new(Compressed)
case packetTypeSymmetricallyEncrypted:
p = new(SymmetricallyEncrypted)
case packetTypeLiteralData:
p = new(LiteralData)
case packetTypeUserId:
p = new(UserId)
case packetTypeUserAttribute:
p = new(UserAttribute)
case packetTypeSymmetricallyEncryptedMDC:
se := new(SymmetricallyEncrypted)
se.MDC = true
p = se
default:
err = errors.UnknownPacketTypeError(tag)
}
if p != nil {
err = p.parse(contents)
}
if err != nil {
consumeAll(contents)
}
return
}
// SignatureType represents the different semantic meanings of an OpenPGP
// signature. See RFC 4880, section 5.2.1.
type SignatureType uint8
const (
SigTypeBinary SignatureType = 0
SigTypeText = 1
SigTypeGenericCert = 0x10
SigTypePersonaCert = 0x11
SigTypeCasualCert = 0x12
SigTypePositiveCert = 0x13
SigTypeSubkeyBinding = 0x18
SigTypePrimaryKeyBinding = 0x19
SigTypeDirectSignature = 0x1F
SigTypeKeyRevocation = 0x20
SigTypeSubkeyRevocation = 0x28
)
// PublicKeyAlgorithm represents the different public key system specified for
// OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
type PublicKeyAlgorithm uint8
const (
PubKeyAlgoRSA PublicKeyAlgorithm = 1
PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
PubKeyAlgoElGamal PublicKeyAlgorithm = 16
PubKeyAlgoDSA PublicKeyAlgorithm = 17
// RFC 6637, Section 5.
PubKeyAlgoECDH PublicKeyAlgorithm = 18
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
)
// CanEncrypt returns true if it's possible to encrypt a message to a public
// key of the given type.
func (pka PublicKeyAlgorithm) CanEncrypt() bool {
switch pka {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal:
return true
}
return false
}
// CanSign returns true if it's possible for a public key of the given type to
// sign a message.
func (pka PublicKeyAlgorithm) CanSign() bool {
switch pka {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
return true
}
return false
}
// CipherFunction represents the different block ciphers specified for OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
type CipherFunction uint8
const (
Cipher3DES CipherFunction = 2
CipherCAST5 CipherFunction = 3
CipherAES128 CipherFunction = 7
CipherAES192 CipherFunction = 8
CipherAES256 CipherFunction = 9
)
// KeySize returns the key size, in bytes, of cipher.
func (cipher CipherFunction) KeySize() int {
switch cipher {
case Cipher3DES:
return 24
case CipherCAST5:
return cast5.KeySize
case CipherAES128:
return 16
case CipherAES192:
return 24
case CipherAES256:
return 32
}
return 0
}
// blockSize returns the block size, in bytes, of cipher.
func (cipher CipherFunction) blockSize() int {
switch cipher {
case Cipher3DES:
return des.BlockSize
case CipherCAST5:
return 8
case CipherAES128, CipherAES192, CipherAES256:
return 16
}
return 0
}
// new returns a fresh instance of the given cipher.
func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
switch cipher {
case Cipher3DES:
block, _ = des.NewTripleDESCipher(key)
case CipherCAST5:
block, _ = cast5.NewCipher(key)
case CipherAES128, CipherAES192, CipherAES256:
block, _ = aes.NewCipher(key)
}
return
}
// readMPI reads a big integer from r. The bit length returned is the bit
// length that was specified in r. This is preserved so that the integer can be
// reserialized exactly.
func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
var buf [2]byte
_, err = readFull(r, buf[0:])
if err != nil {
return
}
bitLength = uint16(buf[0])<<8 | uint16(buf[1])
numBytes := (int(bitLength) + 7) / 8
mpi = make([]byte, numBytes)
_, err = readFull(r, mpi)
return
}
// mpiLength returns the length of the given *big.Int when serialized as an
// MPI.
func mpiLength(n *big.Int) (mpiLengthInBytes int) {
mpiLengthInBytes = 2 /* MPI length */
mpiLengthInBytes += (n.BitLen() + 7) / 8
return
}
// writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
if err == nil {
_, err = w.Write(mpiBytes)
}
return
}
// writeBig serializes a *big.Int to w.
func writeBig(w io.Writer, i *big.Int) error {
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
}
// CompressionAlgo Represents the different compression algorithms
// supported by OpenPGP (except for BZIP2, which is not currently
// supported). See Section 9.3 of RFC 4880.
type CompressionAlgo uint8
const (
CompressionNone CompressionAlgo = 0
CompressionZIP CompressionAlgo = 1
CompressionZLIB CompressionAlgo = 2
)
| vendor/golang.org/x/crypto/openpgp/packet/packet.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.0015203232178464532,
0.00021716809715144336,
0.0001603559503564611,
0.0001692222140263766,
0.00024396752996835858
] |
{
"id": 0,
"code_window": [
"\tif v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {\n",
"\t\treturn ErrKnownBlock\n",
"\t}\n",
"\tif !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\tif !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\t\treturn consensus.ErrUnknownAncestor\n",
"\t\t}\n",
"\t\treturn consensus.ErrPrunedAncestor\n",
"\t}\n",
"\t// Header validity is known at this point, check the uncles and transactions\n",
"\theader := block.Header()\n",
"\tif err := v.engine.VerifyUncles(v.bc, block); err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "core/block_validator.go",
"type": "replace",
"edit_start_line_idx": 55
} | // Copyright 2013 Julien Schmidt. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// Package httprouter is a trie based high performance HTTP request router.
//
// A trivial example is:
//
// package main
//
// import (
// "fmt"
// "github.com/julienschmidt/httprouter"
// "net/http"
// "log"
// )
//
// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// fmt.Fprint(w, "Welcome!\n")
// }
//
// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
// }
//
// func main() {
// router := httprouter.New()
// router.GET("/", Index)
// router.GET("/hello/:name", Hello)
//
// log.Fatal(http.ListenAndServe(":8080", router))
// }
//
// The router matches incoming requests by the request method and the path.
// If a handle is registered for this path and method, the router delegates the
// request to that function.
// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to
// register handles, for all other methods router.Handle can be used.
//
// The registered path, against which the router matches incoming requests, can
// contain two types of parameters:
// Syntax Type
// :name named parameter
// *name catch-all parameter
//
// Named parameters are dynamic path segments. They match anything until the
// next '/' or the path end:
// Path: /blog/:category/:post
//
// Requests:
// /blog/go/request-routers match: category="go", post="request-routers"
// /blog/go/request-routers/ no match, but the router would redirect
// /blog/go/ no match
// /blog/go/request-routers/comments no match
//
// Catch-all parameters match anything until the path end, including the
// directory index (the '/' before the catch-all). Since they match anything
// until the end, catch-all parameters must always be the final path element.
// Path: /files/*filepath
//
// Requests:
// /files/ match: filepath="/"
// /files/LICENSE match: filepath="/LICENSE"
// /files/templates/article.html match: filepath="/templates/article.html"
// /files no match, but the router would redirect
//
// The value of parameters is saved as a slice of the Param struct, consisting
// each of a key and a value. The slice is passed to the Handle func as a third
// parameter.
// There are two ways to retrieve the value of a parameter:
// // by the name of the parameter
// user := ps.ByName("user") // defined by :user or *user
//
// // by the index of the parameter. This way you can also get the name (key)
// thirdKey := ps[2].Key // the name of the 3rd parameter
// thirdValue := ps[2].Value // the value of the 3rd parameter
package httprouter
import (
"net/http"
)
// Handle is a function that can be registered to a route to handle HTTP
// requests. Like http.HandlerFunc, but has a third parameter for the values of
// wildcards (variables).
type Handle func(http.ResponseWriter, *http.Request, Params)
// Param is a single URL parameter, consisting of a key and a value.
type Param struct {
Key string
Value string
}
// Params is a Param-slice, as returned by the router.
// The slice is ordered, the first URL parameter is also the first slice value.
// It is therefore safe to read values by the index.
type Params []Param
// ByName returns the value of the first Param which key matches the given name.
// If no matching Param is found, an empty string is returned.
func (ps Params) ByName(name string) string {
for i := range ps {
if ps[i].Key == name {
return ps[i].Value
}
}
return ""
}
// Router is a http.Handler which can be used to dispatch requests to different
// handler functions via configurable routes
type Router struct {
trees map[string]*node
// Enables automatic redirection if the current route can't be matched but a
// handler for the path with (without) the trailing slash exists.
// For example if /foo/ is requested but a route only exists for /foo, the
// client is redirected to /foo with http status code 301 for GET requests
// and 307 for all other request methods.
RedirectTrailingSlash bool
// If enabled, the router tries to fix the current request path, if no
// handle is registered for it.
// First superfluous path elements like ../ or // are removed.
// Afterwards the router does a case-insensitive lookup of the cleaned path.
// If a handle can be found for this route, the router makes a redirection
// to the corrected path with status code 301 for GET requests and 307 for
// all other request methods.
// For example /FOO and /..//Foo could be redirected to /foo.
// RedirectTrailingSlash is independent of this option.
RedirectFixedPath bool
// If enabled, the router checks if another method is allowed for the
// current route, if the current request can not be routed.
// If this is the case, the request is answered with 'Method Not Allowed'
// and HTTP status code 405.
// If no other Method is allowed, the request is delegated to the NotFound
// handler.
HandleMethodNotAllowed bool
// If enabled, the router automatically replies to OPTIONS requests.
// Custom OPTIONS handlers take priority over automatic replies.
HandleOPTIONS bool
// Configurable http.Handler which is called when no matching route is
// found. If it is not set, http.NotFound is used.
NotFound http.Handler
// Configurable http.Handler which is called when a request
// cannot be routed and HandleMethodNotAllowed is true.
// If it is not set, http.Error with http.StatusMethodNotAllowed is used.
// The "Allow" header with allowed request methods is set before the handler
// is called.
MethodNotAllowed http.Handler
// Function to handle panics recovered from http handlers.
// It should be used to generate a error page and return the http error code
// 500 (Internal Server Error).
// The handler can be used to keep your server from crashing because of
// unrecovered panics.
PanicHandler func(http.ResponseWriter, *http.Request, interface{})
}
// Make sure the Router conforms with the http.Handler interface
var _ http.Handler = New()
// New returns a new initialized Router.
// Path auto-correction, including trailing slashes, is enabled by default.
func New() *Router {
return &Router{
RedirectTrailingSlash: true,
RedirectFixedPath: true,
HandleMethodNotAllowed: true,
HandleOPTIONS: true,
}
}
// GET is a shortcut for router.Handle("GET", path, handle)
func (r *Router) GET(path string, handle Handle) {
r.Handle("GET", path, handle)
}
// HEAD is a shortcut for router.Handle("HEAD", path, handle)
func (r *Router) HEAD(path string, handle Handle) {
r.Handle("HEAD", path, handle)
}
// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle)
func (r *Router) OPTIONS(path string, handle Handle) {
r.Handle("OPTIONS", path, handle)
}
// POST is a shortcut for router.Handle("POST", path, handle)
func (r *Router) POST(path string, handle Handle) {
r.Handle("POST", path, handle)
}
// PUT is a shortcut for router.Handle("PUT", path, handle)
func (r *Router) PUT(path string, handle Handle) {
r.Handle("PUT", path, handle)
}
// PATCH is a shortcut for router.Handle("PATCH", path, handle)
func (r *Router) PATCH(path string, handle Handle) {
r.Handle("PATCH", path, handle)
}
// DELETE is a shortcut for router.Handle("DELETE", path, handle)
func (r *Router) DELETE(path string, handle Handle) {
r.Handle("DELETE", path, handle)
}
// Handle registers a new request handle with the given path and method.
//
// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut
// functions can be used.
//
// This function is intended for bulk loading and to allow the usage of less
// frequently used, non-standardized or custom methods (e.g. for internal
// communication with a proxy).
func (r *Router) Handle(method, path string, handle Handle) {
if path[0] != '/' {
panic("path must begin with '/' in path '" + path + "'")
}
if r.trees == nil {
r.trees = make(map[string]*node)
}
root := r.trees[method]
if root == nil {
root = new(node)
r.trees[method] = root
}
root.addRoute(path, handle)
}
// Handler is an adapter which allows the usage of an http.Handler as a
// request handle.
func (r *Router) Handler(method, path string, handler http.Handler) {
r.Handle(method, path,
func(w http.ResponseWriter, req *http.Request, _ Params) {
handler.ServeHTTP(w, req)
},
)
}
// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a
// request handle.
func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) {
r.Handler(method, path, handler)
}
// ServeFiles serves files from the given file system root.
// The path must end with "/*filepath", files are then served from the local
// path /defined/root/dir/*filepath.
// For example if root is "/etc" and *filepath is "passwd", the local file
// "/etc/passwd" would be served.
// Internally a http.FileServer is used, therefore http.NotFound is used instead
// of the Router's NotFound handler.
// To use the operating system's file system implementation,
// use http.Dir:
// router.ServeFiles("/src/*filepath", http.Dir("/var/www"))
func (r *Router) ServeFiles(path string, root http.FileSystem) {
if len(path) < 10 || path[len(path)-10:] != "/*filepath" {
panic("path must end with /*filepath in path '" + path + "'")
}
fileServer := http.FileServer(root)
r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) {
req.URL.Path = ps.ByName("filepath")
fileServer.ServeHTTP(w, req)
})
}
func (r *Router) recv(w http.ResponseWriter, req *http.Request) {
if rcv := recover(); rcv != nil {
r.PanicHandler(w, req, rcv)
}
}
// Lookup allows the manual lookup of a method + path combo.
// This is e.g. useful to build a framework around this router.
// If the path was found, it returns the handle function and the path parameter
// values. Otherwise the third return value indicates whether a redirection to
// the same path with an extra / without the trailing slash should be performed.
func (r *Router) Lookup(method, path string) (Handle, Params, bool) {
if root := r.trees[method]; root != nil {
return root.getValue(path)
}
return nil, nil, false
}
func (r *Router) allowed(path, reqMethod string) (allow string) {
if path == "*" { // server-wide
for method := range r.trees {
if method == "OPTIONS" {
continue
}
// add request method to list of allowed methods
if len(allow) == 0 {
allow = method
} else {
allow += ", " + method
}
}
} else { // specific path
for method := range r.trees {
// Skip the requested method - we already tried this one
if method == reqMethod || method == "OPTIONS" {
continue
}
handle, _, _ := r.trees[method].getValue(path)
if handle != nil {
// add request method to list of allowed methods
if len(allow) == 0 {
allow = method
} else {
allow += ", " + method
}
}
}
}
if len(allow) > 0 {
allow += ", OPTIONS"
}
return
}
// ServeHTTP makes the router implement the http.Handler interface.
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if r.PanicHandler != nil {
defer r.recv(w, req)
}
path := req.URL.Path
if root := r.trees[req.Method]; root != nil {
if handle, ps, tsr := root.getValue(path); handle != nil {
handle(w, req, ps)
return
} else if req.Method != "CONNECT" && path != "/" {
code := 301 // Permanent redirect, request with GET method
if req.Method != "GET" {
// Temporary redirect, request with same method
// As of Go 1.3, Go does not support status code 308.
code = 307
}
if tsr && r.RedirectTrailingSlash {
if len(path) > 1 && path[len(path)-1] == '/' {
req.URL.Path = path[:len(path)-1]
} else {
req.URL.Path = path + "/"
}
http.Redirect(w, req, req.URL.String(), code)
return
}
// Try to fix the request path
if r.RedirectFixedPath {
fixedPath, found := root.findCaseInsensitivePath(
CleanPath(path),
r.RedirectTrailingSlash,
)
if found {
req.URL.Path = string(fixedPath)
http.Redirect(w, req, req.URL.String(), code)
return
}
}
}
}
if req.Method == "OPTIONS" {
// Handle OPTIONS requests
if r.HandleOPTIONS {
if allow := r.allowed(path, req.Method); len(allow) > 0 {
w.Header().Set("Allow", allow)
return
}
}
} else {
// Handle 405
if r.HandleMethodNotAllowed {
if allow := r.allowed(path, req.Method); len(allow) > 0 {
w.Header().Set("Allow", allow)
if r.MethodNotAllowed != nil {
r.MethodNotAllowed.ServeHTTP(w, req)
} else {
http.Error(w,
http.StatusText(http.StatusMethodNotAllowed),
http.StatusMethodNotAllowed,
)
}
return
}
}
}
// Handle 404
if r.NotFound != nil {
r.NotFound.ServeHTTP(w, req)
} else {
http.NotFound(w, req)
}
}
| vendor/github.com/julienschmidt/httprouter/router.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.0009736212086863816,
0.0002074780932161957,
0.00016046286327764392,
0.00017033804033417255,
0.00016924610827118158
] |
{
"id": 1,
"code_window": [
"\tif hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {\n",
"\t\treturn fmt.Errorf(\"transaction root hash mismatch: have %x, want %x\", hash, header.TxHash)\n",
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"// ValidateState validates the various changes that happen after a state\n",
"// transition, such as amount of used gas, the receipt roots and the state root\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\tif !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\t\treturn consensus.ErrUnknownAncestor\n",
"\t\t}\n",
"\t\treturn consensus.ErrPrunedAncestor\n",
"\t}\n"
],
"file_path": "core/block_validator.go",
"type": "add",
"edit_start_line_idx": 72
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package downloader contains the manual full chain synchronisation.
package downloader
import (
"errors"
"fmt"
"math/big"
"sync"
"sync/atomic"
"time"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
)
var (
MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation
rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
qosTuningPeers = 5 // Number of peers to tune based on (best peers)
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
maxResultsProcess = 2048 // Number of content download results to import at once into the chain
reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync
)
var (
errBusy = errors.New("busy")
errUnknownPeer = errors.New("peer is unknown or unhealthy")
errBadPeer = errors.New("action from bad peer ignored")
errStallingPeer = errors.New("peer is stalling")
errNoPeers = errors.New("no peers to keep download active")
errTimeout = errors.New("timeout")
errEmptyHeaderSet = errors.New("empty header set by peer")
errPeersUnavailable = errors.New("no peers available or all tried for download")
errInvalidAncestor = errors.New("retrieved ancestor is invalid")
errInvalidChain = errors.New("retrieved hash chain is invalid")
errInvalidBlock = errors.New("retrieved block is invalid")
errInvalidBody = errors.New("retrieved block body is invalid")
errInvalidReceipt = errors.New("retrieved receipt is invalid")
errCancelBlockFetch = errors.New("block download canceled (requested)")
errCancelHeaderFetch = errors.New("block header download canceled (requested)")
errCancelBodyFetch = errors.New("block body download canceled (requested)")
errCancelReceiptFetch = errors.New("receipt download canceled (requested)")
errCancelStateFetch = errors.New("state data download canceled (requested)")
errCancelHeaderProcessing = errors.New("header processing canceled (requested)")
errCancelContentProcessing = errors.New("content processing canceled (requested)")
errNoSyncActive = errors.New("no sync active")
errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
)
type Downloader struct {
mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
mux *event.TypeMux // Event multiplexer to announce sync operation events
queue *queue // Scheduler for selecting the hashes to download
peers *peerSet // Set of active peers from which download can proceed
stateDB ethdb.Database
rttEstimate uint64 // Round trip time to target for download requests
rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)
// Statistics
syncStatsChainOrigin uint64 // Origin block number where syncing started at
syncStatsChainHeight uint64 // Highest block number known when syncing started
syncStatsState stateSyncStats
syncStatsLock sync.RWMutex // Lock protecting the sync stats fields
lightchain LightChain
blockchain BlockChain
// Callbacks
dropPeer peerDropFn // Drops a peer for misbehaving
// Status
synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
synchronising int32
notified int32
committed int32
// Channels
headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies
receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts
bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks
receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks
headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks
// for stateFetcher
stateSyncStart chan *stateSync
trackStateReq chan *stateReq
stateCh chan dataPack // [eth/63] Channel receiving inbound node state data
// Cancellation and termination
cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop)
cancelCh chan struct{} // Channel to cancel mid-flight syncs
cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers
cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited.
quitCh chan struct{} // Quit channel to signal termination
quitLock sync.RWMutex // Lock to prevent double closes
// Testing hooks
syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run
bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch
receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
}
// LightChain encapsulates functions required to synchronise a light chain.
type LightChain interface {
// HasHeader verifies a header's presence in the local chain.
HasHeader(common.Hash, uint64) bool
// GetHeaderByHash retrieves a header from the local chain.
GetHeaderByHash(common.Hash) *types.Header
// CurrentHeader retrieves the head header from the local chain.
CurrentHeader() *types.Header
// GetTd returns the total difficulty of a local block.
GetTd(common.Hash, uint64) *big.Int
// InsertHeaderChain inserts a batch of headers into the local chain.
InsertHeaderChain([]*types.Header, int) (int, error)
// Rollback removes a few recently added elements from the local chain.
Rollback([]common.Hash)
}
// BlockChain encapsulates functions required to sync a (full or fast) blockchain.
type BlockChain interface {
LightChain
// HasBlock verifies a block's presence in the local chain.
HasBlock(common.Hash, uint64) bool
// GetBlockByHash retrieves a block from the local chain.
GetBlockByHash(common.Hash) *types.Block
// CurrentBlock retrieves the head block from the local chain.
CurrentBlock() *types.Block
// CurrentFastBlock retrieves the head fast block from the local chain.
CurrentFastBlock() *types.Block
// FastSyncCommitHead directly commits the head block to a certain entity.
FastSyncCommitHead(common.Hash) error
// InsertChain inserts a batch of blocks into the local chain.
InsertChain(types.Blocks) (int, error)
// InsertReceiptChain inserts a batch of receipts into the local chain.
InsertReceiptChain(types.Blocks, []types.Receipts) (int, error)
}
// New creates a new downloader to fetch hashes and blocks from remote peers.
func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
if lightchain == nil {
lightchain = chain
}
dl := &Downloader{
mode: mode,
stateDB: stateDb,
mux: mux,
queue: newQueue(),
peers: newPeerSet(),
rttEstimate: uint64(rttMaxEstimate),
rttConfidence: uint64(1000000),
blockchain: chain,
lightchain: lightchain,
dropPeer: dropPeer,
headerCh: make(chan dataPack, 1),
bodyCh: make(chan dataPack, 1),
receiptCh: make(chan dataPack, 1),
bodyWakeCh: make(chan bool, 1),
receiptWakeCh: make(chan bool, 1),
headerProcCh: make(chan []*types.Header, 1),
quitCh: make(chan struct{}),
stateCh: make(chan dataPack),
stateSyncStart: make(chan *stateSync),
syncStatsState: stateSyncStats{
processed: rawdb.ReadFastTrieProgress(stateDb),
},
trackStateReq: make(chan *stateReq),
}
go dl.qosTuner()
go dl.stateFetcher()
return dl
}
// Progress retrieves the synchronisation boundaries, specifically the origin
// block where synchronisation started at (may have failed/suspended); the block
// or header sync is currently at; and the latest known block which the sync targets.
//
// In addition, during the state download phase of fast synchronisation the number
// of processed and the total number of known states are also returned. Otherwise
// these are zero.
func (d *Downloader) Progress() ethereum.SyncProgress {
// Lock the current stats and return the progress
d.syncStatsLock.RLock()
defer d.syncStatsLock.RUnlock()
current := uint64(0)
switch d.mode {
case FullSync:
current = d.blockchain.CurrentBlock().NumberU64()
case FastSync:
current = d.blockchain.CurrentFastBlock().NumberU64()
case LightSync:
current = d.lightchain.CurrentHeader().Number.Uint64()
}
return ethereum.SyncProgress{
StartingBlock: d.syncStatsChainOrigin,
CurrentBlock: current,
HighestBlock: d.syncStatsChainHeight,
PulledStates: d.syncStatsState.processed,
KnownStates: d.syncStatsState.processed + d.syncStatsState.pending,
}
}
// Synchronising returns whether the downloader is currently retrieving blocks.
func (d *Downloader) Synchronising() bool {
return atomic.LoadInt32(&d.synchronising) > 0
}
// RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from.
func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
logger := log.New("peer", id)
logger.Trace("Registering sync peer")
if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
logger.Error("Failed to register sync peer", "err", err)
return err
}
d.qosReduceConfidence()
return nil
}
// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error {
return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
}
// UnregisterPeer remove a peer from the known list, preventing any action from
// the specified peer. An effort is also made to return any pending fetches into
// the queue.
func (d *Downloader) UnregisterPeer(id string) error {
// Unregister the peer from the active peer set and revoke any fetch tasks
logger := log.New("peer", id)
logger.Trace("Unregistering sync peer")
if err := d.peers.Unregister(id); err != nil {
logger.Error("Failed to unregister sync peer", "err", err)
return err
}
d.queue.Revoke(id)
// If this peer was the master peer, abort sync immediately
d.cancelLock.RLock()
master := id == d.cancelPeer
d.cancelLock.RUnlock()
if master {
d.cancel()
}
return nil
}
// Synchronise tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries.
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
err := d.synchronise(id, head, td, mode)
switch err {
case nil:
case errBusy:
case errTimeout, errBadPeer, errStallingPeer,
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
errInvalidAncestor, errInvalidChain:
log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
if d.dropPeer == nil {
// The dropPeer method is nil when `--copydb` is used for a local copy.
// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
} else {
d.dropPeer(id)
}
default:
log.Warn("Synchronisation failed, retrying", "err", err)
}
return err
}
// synchronise will select the peer and use it for synchronising. If an empty string is given
// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous
func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
// Mock out the synchronisation if testing
if d.synchroniseMock != nil {
return d.synchroniseMock(id, hash)
}
// Make sure only one goroutine is ever allowed past this point at once
if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
return errBusy
}
defer atomic.StoreInt32(&d.synchronising, 0)
// Post a user notification of the sync (only once per session)
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
log.Info("Block synchronisation started")
}
// Reset the queue, peer set and wake channels to clean any internal leftover state
d.queue.Reset()
d.peers.Reset()
for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
select {
case <-ch:
default:
}
}
for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
for empty := false; !empty; {
select {
case <-ch:
default:
empty = true
}
}
}
for empty := false; !empty; {
select {
case <-d.headerProcCh:
default:
empty = true
}
}
// Create cancel channel for aborting mid-flight and mark the master peer
d.cancelLock.Lock()
d.cancelCh = make(chan struct{})
d.cancelPeer = id
d.cancelLock.Unlock()
defer d.Cancel() // No matter what, we can't leave the cancel channel open
// Set the requested sync mode, unless it's forbidden
d.mode = mode
// Retrieve the origin peer and initiate the downloading process
p := d.peers.Peer(id)
if p == nil {
return errUnknownPeer
}
return d.syncWithPeer(p, hash, td)
}
// syncWithPeer starts a block synchronization based on the hash chain from the
// specified peer and head hash.
func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
d.mux.Post(StartEvent{})
defer func() {
// reset on error
if err != nil {
d.mux.Post(FailedEvent{err})
} else {
d.mux.Post(DoneEvent{})
}
}()
if p.version < 62 {
return errTooOld
}
log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode)
defer func(start time.Time) {
log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
}(time.Now())
// Look up the sync boundaries: the common ancestor and the target block
latest, err := d.fetchHeight(p)
if err != nil {
return err
}
height := latest.Number.Uint64()
origin, err := d.findAncestor(p, height)
if err != nil {
return err
}
d.syncStatsLock.Lock()
if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
d.syncStatsChainOrigin = origin
}
d.syncStatsChainHeight = height
d.syncStatsLock.Unlock()
// Ensure our origin point is below any fast sync pivot point
pivot := uint64(0)
if d.mode == FastSync {
if height <= uint64(fsMinFullBlocks) {
origin = 0
} else {
pivot = height - uint64(fsMinFullBlocks)
if pivot <= origin {
origin = pivot - 1
}
}
}
d.committed = 1
if d.mode == FastSync && pivot != 0 {
d.committed = 0
}
// Initiate the sync using a concurrent header and content retrieval algorithm
d.queue.Prepare(origin+1, d.mode)
if d.syncInitHook != nil {
d.syncInitHook(origin, height)
}
fetchers := []func() error{
func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved
func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync
func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
func() error { return d.processHeaders(origin+1, pivot, td) },
}
if d.mode == FastSync {
fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) })
} else if d.mode == FullSync {
fetchers = append(fetchers, d.processFullSyncContent)
}
return d.spawnSync(fetchers)
}
// spawnSync runs d.process and all given fetcher functions to completion in
// separate goroutines, returning the first error that appears.
func (d *Downloader) spawnSync(fetchers []func() error) error {
errc := make(chan error, len(fetchers))
d.cancelWg.Add(len(fetchers))
for _, fn := range fetchers {
fn := fn
go func() { defer d.cancelWg.Done(); errc <- fn() }()
}
// Wait for the first error, then terminate the others.
var err error
for i := 0; i < len(fetchers); i++ {
if i == len(fetchers)-1 {
// Close the queue when all fetchers have exited.
// This will cause the block processor to end when
// it has processed the queue.
d.queue.Close()
}
if err = <-errc; err != nil {
break
}
}
d.queue.Close()
d.Cancel()
return err
}
// cancel aborts all of the operations and resets the queue. However, cancel does
// not wait for the running download goroutines to finish. This method should be
// used when cancelling the downloads from inside the downloader.
func (d *Downloader) cancel() {
// Close the current cancel channel
d.cancelLock.Lock()
if d.cancelCh != nil {
select {
case <-d.cancelCh:
// Channel was already closed
default:
close(d.cancelCh)
}
}
d.cancelLock.Unlock()
}
// Cancel aborts all of the operations and waits for all download goroutines to
// finish before returning.
func (d *Downloader) Cancel() {
d.cancel()
d.cancelWg.Wait()
}
// Terminate interrupts the downloader, canceling all pending operations.
// The downloader cannot be reused after calling Terminate.
func (d *Downloader) Terminate() {
// Close the termination channel (make sure double close is allowed)
d.quitLock.Lock()
select {
case <-d.quitCh:
default:
close(d.quitCh)
}
d.quitLock.Unlock()
// Cancel any pending download requests
d.Cancel()
}
// fetchHeight retrieves the head header of the remote peer to aid in estimating
// the total time a pending synchronisation would take.
func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
p.log.Debug("Retrieving remote chain height")
// Request the advertised remote head block and wait for the response
head, _ := p.peer.Head()
go p.peer.RequestHeadersByHash(head, 1, 0, false)
ttl := d.requestTTL()
timeout := time.After(ttl)
for {
select {
case <-d.cancelCh:
return nil, errCancelBlockFetch
case packet := <-d.headerCh:
// Discard anything not from the origin peer
if packet.PeerId() != p.id {
log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
break
}
// Make sure the peer actually gave something valid
headers := packet.(*headerPack).headers
if len(headers) != 1 {
p.log.Debug("Multiple headers for single request", "headers", len(headers))
return nil, errBadPeer
}
head := headers[0]
p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash())
return head, nil
case <-timeout:
p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
return nil, errTimeout
case <-d.bodyCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
}
}
}
// findAncestor tries to locate the common ancestor link of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N links should already get us a match.
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
// the head links match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) {
// Figure out the valid ancestor range to prevent rewrite attacks
floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
if d.mode == FullSync {
ceil = d.blockchain.CurrentBlock().NumberU64()
} else if d.mode == FastSync {
ceil = d.blockchain.CurrentFastBlock().NumberU64()
}
if ceil >= MaxForkAncestry {
floor = int64(ceil - MaxForkAncestry)
}
p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
// Request the topmost blocks to short circuit binary ancestor lookup
head := ceil
if head > height {
head = height
}
from := int64(head) - int64(MaxHeaderFetch)
if from < 0 {
from = 0
}
// Span out with 15 block gaps into the future to catch bad head reports
limit := 2 * MaxHeaderFetch / 16
count := 1 + int((int64(ceil)-from)/16)
if count > limit {
count = limit
}
go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false)
// Wait for the remote response to the head fetch
number, hash := uint64(0), common.Hash{}
ttl := d.requestTTL()
timeout := time.After(ttl)
for finished := false; !finished; {
select {
case <-d.cancelCh:
return 0, errCancelHeaderFetch
case packet := <-d.headerCh:
// Discard anything not from the origin peer
if packet.PeerId() != p.id {
log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
break
}
// Make sure the peer actually gave something valid
headers := packet.(*headerPack).headers
if len(headers) == 0 {
p.log.Warn("Empty head header set")
return 0, errEmptyHeaderSet
}
// Make sure the peer's reply conforms to the request
for i := 0; i < len(headers); i++ {
if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number)
return 0, errInvalidChain
}
}
// Check if a common ancestor was found
finished = true
for i := len(headers) - 1; i >= 0; i-- {
// Skip any headers that underflow/overflow our requested set
if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil {
continue
}
// Otherwise check if we already know the header or not
h := headers[i].Hash()
n := headers[i].Number.Uint64()
if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) {
number, hash = n, h
// If every header is known, even future ones, the peer straight out lied about its head
if number > height && i == limit-1 {
p.log.Warn("Lied about chain head", "reported", height, "found", number)
return 0, errStallingPeer
}
break
}
}
case <-timeout:
p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
return 0, errTimeout
case <-d.bodyCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
}
}
// If the head fetch already found an ancestor, return
if hash != (common.Hash{}) {
if int64(number) <= floor {
p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
return 0, errInvalidAncestor
}
p.log.Debug("Found common ancestor", "number", number, "hash", hash)
return number, nil
}
// Ancestor not found, we need to binary search over our chain
start, end := uint64(0), head
if floor > 0 {
start = uint64(floor)
}
for start+1 < end {
// Split our chain interval in two, and request the hash to cross check
check := (start + end) / 2
ttl := d.requestTTL()
timeout := time.After(ttl)
go p.peer.RequestHeadersByNumber(check, 1, 0, false)
// Wait until a reply arrives to this request
for arrived := false; !arrived; {
select {
case <-d.cancelCh:
return 0, errCancelHeaderFetch
case packer := <-d.headerCh:
// Discard anything not from the origin peer
if packer.PeerId() != p.id {
log.Debug("Received headers from incorrect peer", "peer", packer.PeerId())
break
}
// Make sure the peer actually gave something valid
headers := packer.(*headerPack).headers
if len(headers) != 1 {
p.log.Debug("Multiple headers for single request", "headers", len(headers))
return 0, errBadPeer
}
arrived = true
// Modify the search interval based on the response
h := headers[0].Hash()
n := headers[0].Number.Uint64()
if (d.mode == FullSync && !d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && !d.lightchain.HasHeader(h, n)) {
end = check
break
}
header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
if header.Number.Uint64() != check {
p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
return 0, errBadPeer
}
start = check
case <-timeout:
p.log.Debug("Waiting for search header timed out", "elapsed", ttl)
return 0, errTimeout
case <-d.bodyCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
}
}
}
// Ensure valid ancestry and return
if int64(start) <= floor {
p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
return 0, errInvalidAncestor
}
p.log.Debug("Found common ancestor", "number", start, "hash", hash)
return start, nil
}
// fetchHeaders keeps retrieving headers concurrently from the number
// requested, until no more are returned, potentially throttling on the way. To
// facilitate concurrency but still protect against malicious nodes sending bad
// headers, we construct a header chain skeleton using the "origin" peer we are
// syncing with, and fill in the missing headers using anyone else. Headers from
// other peers are only accepted if they map cleanly to the skeleton. If no one
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
// the origin is dropped.
func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error {
p.log.Debug("Directing header downloads", "origin", from)
defer p.log.Debug("Header download terminated")
// Create a timeout timer, and the associated header fetcher
skeleton := true // Skeleton assembly phase or finishing up
request := time.Now() // time of the last skeleton fetch request
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
<-timeout.C // timeout channel should be initially empty
defer timeout.Stop()
var ttl time.Duration
getHeaders := func(from uint64) {
request = time.Now()
ttl = d.requestTTL()
timeout.Reset(ttl)
if skeleton {
p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
} else {
p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
}
}
// Start pulling the header chain skeleton until all is done
getHeaders(from)
for {
select {
case <-d.cancelCh:
return errCancelHeaderFetch
case packet := <-d.headerCh:
// Make sure the active peer is giving us the skeleton headers
if packet.PeerId() != p.id {
log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
break
}
headerReqTimer.UpdateSince(request)
timeout.Stop()
// If the skeleton's finished, pull any remaining head headers directly from the origin
if packet.Items() == 0 && skeleton {
skeleton = false
getHeaders(from)
continue
}
// If no more headers are inbound, notify the content fetchers and return
if packet.Items() == 0 {
// Don't abort header fetches while the pivot is downloading
if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
p.log.Debug("No headers, waiting for pivot commit")
select {
case <-time.After(fsHeaderContCheck):
getHeaders(from)
continue
case <-d.cancelCh:
return errCancelHeaderFetch
}
}
// Pivot done (or not in fast sync) and no more headers, terminate the process
p.log.Debug("No more headers available")
select {
case d.headerProcCh <- nil:
return nil
case <-d.cancelCh:
return errCancelHeaderFetch
}
}
headers := packet.(*headerPack).headers
// If we received a skeleton batch, resolve internals concurrently
if skeleton {
filled, proced, err := d.fillHeaderSkeleton(from, headers)
if err != nil {
p.log.Debug("Skeleton chain invalid", "err", err)
return errInvalidChain
}
headers = filled[proced:]
from += uint64(proced)
} else {
// If we're closing in on the chain head, but haven't yet reached it, delay
// the last few headers so mini reorgs on the head don't cause invalid hash
// chain errors.
if n := len(headers); n > 0 {
// Retrieve the current head we're at
head := uint64(0)
if d.mode == LightSync {
head = d.lightchain.CurrentHeader().Number.Uint64()
} else {
head = d.blockchain.CurrentFastBlock().NumberU64()
if full := d.blockchain.CurrentBlock().NumberU64(); head < full {
head = full
}
}
// If the head is way older than this batch, delay the last few headers
if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
delay := reorgProtHeaderDelay
if delay > n {
delay = n
}
headers = headers[:n-delay]
}
}
}
// Insert all the new headers and fetch the next batch
if len(headers) > 0 {
p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
select {
case d.headerProcCh <- headers:
case <-d.cancelCh:
return errCancelHeaderFetch
}
from += uint64(len(headers))
getHeaders(from)
} else {
// No headers delivered, or all of them being delayed, sleep a bit and retry
p.log.Trace("All headers delayed, waiting")
select {
case <-time.After(fsHeaderContCheck):
getHeaders(from)
continue
case <-d.cancelCh:
return errCancelHeaderFetch
}
}
case <-timeout.C:
if d.dropPeer == nil {
// The dropPeer method is nil when `--copydb` is used for a local copy.
// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id)
break
}
// Header retrieval timed out, consider the peer bad and drop
p.log.Debug("Header request timed out", "elapsed", ttl)
headerTimeoutMeter.Mark(1)
d.dropPeer(p.id)
// Finish the sync gracefully instead of dumping the gathered data though
for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
select {
case ch <- false:
case <-d.cancelCh:
}
}
select {
case d.headerProcCh <- nil:
case <-d.cancelCh:
}
return errBadPeer
}
}
}
// fillHeaderSkeleton concurrently retrieves headers from all our available peers
// and maps them to the provided skeleton header chain.
//
// Any partial results from the beginning of the skeleton is (if possible) forwarded
// immediately to the header processor to keep the rest of the pipeline full even
// in the case of header stalls.
//
// The method returns the entire filled skeleton and also the number of headers
// already forwarded for processing.
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
log.Debug("Filling up skeleton", "from", from)
d.queue.ScheduleSkeleton(from, skeleton)
var (
deliver = func(packet dataPack) (int, error) {
pack := packet.(*headerPack)
return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
}
expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
throttle = func() bool { return false }
reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) {
return d.queue.ReserveHeaders(p, count), false, nil
}
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) }
)
err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire,
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
log.Debug("Skeleton fill terminated", "err", err)
filled, proced := d.queue.RetrieveHeaders()
return filled, proced, err
}
// fetchBodies iteratively downloads the scheduled block bodies, taking any
// available peers, reserving a chunk of blocks for each, waiting for delivery
// and also periodically checking for timeouts.
func (d *Downloader) fetchBodies(from uint64) error {
log.Debug("Downloading block bodies", "origin", from)
var (
deliver = func(packet dataPack) (int, error) {
pack := packet.(*bodyPack)
return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)
}
expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
)
err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
log.Debug("Block body download terminated", "err", err)
return err
}
// fetchReceipts iteratively downloads the scheduled block receipts, taking any
// available peers, reserving a chunk of receipts for each, waiting for delivery
// and also periodically checking for timeouts.
func (d *Downloader) fetchReceipts(from uint64) error {
log.Debug("Downloading transaction receipts", "origin", from)
var (
deliver = func(packet dataPack) (int, error) {
pack := packet.(*receiptPack)
return d.queue.DeliverReceipts(pack.peerID, pack.receipts)
}
expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
)
err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
log.Debug("Transaction receipt download terminated", "err", err)
return err
}
// fetchParts iteratively downloads scheduled block parts, taking any available
// peers, reserving a chunk of fetch requests for each, waiting for delivery and
// also periodically checking for timeouts.
//
// As the scheduling/timeout logic mostly is the same for all downloaded data
// types, this method is used by each for data gathering and is instrumented with
// various callbacks to handle the slight differences between processing them.
//
// The instrumentation parameters:
// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
// - pending: task callback for the number of requests still needing download (detect completion/non-completability)
// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
// - fetch: network callback to actually send a particular download request to a physical remote peer
// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
// - kind: textual label of the type being downloaded to display in log mesages
func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error),
fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error {
// Create a ticker to detect expired retrieval tasks
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
update := make(chan struct{}, 1)
// Prepare the queue and fetch block parts until the block header fetcher's done
finished := false
for {
select {
case <-d.cancelCh:
return errCancel
case packet := <-deliveryCh:
// If the peer was previously banned and failed to deliver its pack
// in a reasonable time frame, ignore its message.
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
// Deliver the received chunk of data and check chain validity
accepted, err := deliver(packet)
if err == errInvalidChain {
return err
}
// Unless a peer delivered something completely else than requested (usually
// caused by a timed out request which came through in the end), set it to
// idle. If the delivery's stale, the peer should have already been idled.
if err != errStaleDelivery {
setIdle(peer, accepted)
}
// Issue a log to the user to see what's going on
switch {
case err == nil && packet.Items() == 0:
peer.log.Trace("Requested data not delivered", "type", kind)
case err == nil:
peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
default:
peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err)
}
}
// Blocks assembled, try to update the progress
select {
case update <- struct{}{}:
default:
}
case cont := <-wakeCh:
// The header fetcher sent a continuation flag, check if it's done
if !cont {
finished = true
}
// Headers arrive, try to update the progress
select {
case update <- struct{}{}:
default:
}
case <-ticker.C:
// Sanity check update the progress
select {
case update <- struct{}{}:
default:
}
case <-update:
// Short circuit if we lost all our peers
if d.peers.Len() == 0 {
return errNoPeers
}
// Check for fetch request timeouts and demote the responsible peers
for pid, fails := range expire() {
if peer := d.peers.Peer(pid); peer != nil {
// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
// out that sync wise we need to get rid of the peer.
//
// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
if fails > 2 {
peer.log.Trace("Data delivery timed out", "type", kind)
setIdle(peer, 0)
} else {
peer.log.Debug("Stalling delivery, dropping", "type", kind)
if d.dropPeer == nil {
// The dropPeer method is nil when `--copydb` is used for a local copy.
// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid)
} else {
d.dropPeer(pid)
}
}
}
}
// If there's nothing more to fetch, wait or terminate
if pending() == 0 {
if !inFlight() && finished {
log.Debug("Data fetching completed", "type", kind)
return nil
}
break
}
// Send a download request to all idle peers, until throttled
progressed, throttled, running := false, false, inFlight()
idles, total := idle()
for _, peer := range idles {
// Short circuit if throttling activated
if throttle() {
throttled = true
break
}
// Short circuit if there is no more available task.
if pending() == 0 {
break
}
// Reserve a chunk of fetches for a peer. A nil can mean either that
// no more headers are available, or that the peer is known not to
// have them.
request, progress, err := reserve(peer, capacity(peer))
if err != nil {
return err
}
if progress {
progressed = true
}
if request == nil {
continue
}
if request.From > 0 {
peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From)
} else {
peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
}
// Fetch the chunk and make sure any errors return the hashes to the queue
if fetchHook != nil {
fetchHook(request.Headers)
}
if err := fetch(peer, request); err != nil {
// Although we could try and make an attempt to fix this, this error really
// means that we've double allocated a fetch task to a peer. If that is the
// case, the internal state of the downloader and the queue is very wrong so
// better hard crash and note the error instead of silently accumulating into
// a much bigger issue.
panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
}
running = true
}
// Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
return errPeersUnavailable
}
}
}
}
// processHeaders takes batches of retrieved headers from an input channel and
// keeps processing and scheduling them into the header chain and downloader's
// queue until the stream ends or a failure occurs.
func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
// Keep a count of uncertain headers to roll back
rollback := []*types.Header{}
defer func() {
if len(rollback) > 0 {
// Flatten the headers and roll them back
hashes := make([]common.Hash, len(rollback))
for i, header := range rollback {
hashes[i] = header.Hash()
}
lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
if d.mode != LightSync {
lastFastBlock = d.blockchain.CurrentFastBlock().Number()
lastBlock = d.blockchain.CurrentBlock().Number()
}
d.lightchain.Rollback(hashes)
curFastBlock, curBlock := common.Big0, common.Big0
if d.mode != LightSync {
curFastBlock = d.blockchain.CurrentFastBlock().Number()
curBlock = d.blockchain.CurrentBlock().Number()
}
log.Warn("Rolled back headers", "count", len(hashes),
"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
"block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
}
}()
// Wait for batches of headers to process
gotHeaders := false
for {
select {
case <-d.cancelCh:
return errCancelHeaderProcessing
case headers := <-d.headerProcCh:
// Terminate header processing if we synced up
if len(headers) == 0 {
// Notify everyone that headers are fully processed
for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
select {
case ch <- false:
case <-d.cancelCh:
}
}
// If no headers were retrieved at all, the peer violated its TD promise that it had a
// better chain compared to ours. The only exception is if its promised blocks were
// already imported by other means (e.g. fetcher):
//
// R <remote peer>, L <local node>: Both at block 10
// R: Mine block 11, and propagate it to L
// L: Queue block 11 for import
// L: Notice that R's head and TD increased compared to ours, start sync
// L: Import of block 11 finishes
// L: Sync begins, and finds common ancestor at 11
// L: Request new headers up from 11 (R's TD was higher, it must have something)
// R: Nothing to give
if d.mode != LightSync {
head := d.blockchain.CurrentBlock()
if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
return errStallingPeer
}
}
// If fast or light syncing, ensure promised headers are indeed delivered. This is
// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
// of delivering the post-pivot blocks that would flag the invalid content.
//
// This check cannot be executed "as is" for full imports, since blocks may still be
// queued for processing when the header download completes. However, as long as the
// peer gave us something useful, we're already happy/progressed (above check).
if d.mode == FastSync || d.mode == LightSync {
head := d.lightchain.CurrentHeader()
if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
return errStallingPeer
}
}
// Disable any rollback and return
rollback = nil
return nil
}
// Otherwise split the chunk of headers into batches and process them
gotHeaders = true
for len(headers) > 0 {
// Terminate if something failed in between processing chunks
select {
case <-d.cancelCh:
return errCancelHeaderProcessing
default:
}
// Select the next chunk of headers to import
limit := maxHeadersProcess
if limit > len(headers) {
limit = len(headers)
}
chunk := headers[:limit]
// In case of header only syncing, validate the chunk immediately
if d.mode == FastSync || d.mode == LightSync {
// Collect the yet unknown headers to mark them as uncertain
unknown := make([]*types.Header, 0, len(headers))
for _, header := range chunk {
if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
unknown = append(unknown, header)
}
}
// If we're importing pure headers, verify based on their recentness
frequency := fsHeaderCheckFrequency
if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
frequency = 1
}
if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
// If some headers were inserted, add them too to the rollback list
if n > 0 {
rollback = append(rollback, chunk[:n]...)
}
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
return errInvalidChain
}
// All verifications passed, store newly found uncertain headers
rollback = append(rollback, unknown...)
if len(rollback) > fsHeaderSafetyNet {
rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
}
}
// Unless we're doing light chains, schedule the headers for associated content retrieval
if d.mode == FullSync || d.mode == FastSync {
// If we've reached the allowed number of pending headers, stall a bit
for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
select {
case <-d.cancelCh:
return errCancelHeaderProcessing
case <-time.After(time.Second):
}
}
// Otherwise insert the headers for content retrieval
inserts := d.queue.Schedule(chunk, origin)
if len(inserts) != len(chunk) {
log.Debug("Stale headers")
return errBadPeer
}
}
headers = headers[limit:]
origin += uint64(limit)
}
// Update the highest block number we know if a higher one is found.
d.syncStatsLock.Lock()
if d.syncStatsChainHeight < origin {
d.syncStatsChainHeight = origin - 1
}
d.syncStatsLock.Unlock()
// Signal the content downloaders of the availablility of new tasks
for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
select {
case ch <- true:
default:
}
}
}
}
}
// processFullSyncContent takes fetch results from the queue and imports them into the chain.
func (d *Downloader) processFullSyncContent() error {
for {
results := d.queue.Results(true)
if len(results) == 0 {
return nil
}
if d.chainInsertHook != nil {
d.chainInsertHook(results)
}
if err := d.importBlockResults(results); err != nil {
return err
}
}
}
func (d *Downloader) importBlockResults(results []*fetchResult) error {
// Check for any early termination requests
if len(results) == 0 {
return nil
}
select {
case <-d.quitCh:
return errCancelContentProcessing
default:
}
// Retrieve the a batch of results to import
first, last := results[0].Header, results[len(results)-1].Header
log.Debug("Inserting downloaded chain", "items", len(results),
"firstnum", first.Number, "firsthash", first.Hash(),
"lastnum", last.Number, "lasthash", last.Hash(),
)
blocks := make([]*types.Block, len(results))
for i, result := range results {
blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
}
if index, err := d.blockchain.InsertChain(blocks); err != nil {
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
return errInvalidChain
}
return nil
}
// processFastSyncContent takes fetch results from the queue and writes them to the
// database. It also controls the synchronisation of state nodes of the pivot block.
func (d *Downloader) processFastSyncContent(latest *types.Header) error {
// Start syncing state of the reported head block. This should get us most of
// the state of the pivot block.
stateSync := d.syncState(latest.Root)
defer stateSync.Cancel()
go func() {
if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
d.queue.Close() // wake up Results
}
}()
// Figure out the ideal pivot block. Note, that this goalpost may move if the
// sync takes long enough for the chain head to move significantly.
pivot := uint64(0)
if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) {
pivot = height - uint64(fsMinFullBlocks)
}
// To cater for moving pivot points, track the pivot block and subsequently
// accumulated download results separately.
var (
oldPivot *fetchResult // Locked in pivot block, might change eventually
oldTail []*fetchResult // Downloaded content after the pivot
)
for {
// Wait for the next batch of downloaded data to be available, and if the pivot
// block became stale, move the goalpost
results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
if len(results) == 0 {
// If pivot sync is done, stop
if oldPivot == nil {
return stateSync.Cancel()
}
// If sync failed, stop
select {
case <-d.cancelCh:
return stateSync.Cancel()
default:
}
}
if d.chainInsertHook != nil {
d.chainInsertHook(results)
}
if oldPivot != nil {
results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
}
// Split around the pivot block and process the two sides via fast/full sync
if atomic.LoadInt32(&d.committed) == 0 {
latest = results[len(results)-1].Header
if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
pivot = height - uint64(fsMinFullBlocks)
}
}
P, beforeP, afterP := splitAroundPivot(pivot, results)
if err := d.commitFastSyncData(beforeP, stateSync); err != nil {
return err
}
if P != nil {
// If new pivot block found, cancel old state retrieval and restart
if oldPivot != P {
stateSync.Cancel()
stateSync = d.syncState(P.Header.Root)
defer stateSync.Cancel()
go func() {
if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
d.queue.Close() // wake up Results
}
}()
oldPivot = P
}
// Wait for completion, occasionally checking for pivot staleness
select {
case <-stateSync.done:
if stateSync.err != nil {
return stateSync.err
}
if err := d.commitPivotBlock(P); err != nil {
return err
}
oldPivot = nil
case <-time.After(time.Second):
oldTail = afterP
continue
}
}
// Fast sync done, pivot commit done, full import
if err := d.importBlockResults(afterP); err != nil {
return err
}
}
}
func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
for _, result := range results {
num := result.Header.Number.Uint64()
switch {
case num < pivot:
before = append(before, result)
case num == pivot:
p = result
default:
after = append(after, result)
}
}
return p, before, after
}
func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {
// Check for any early termination requests
if len(results) == 0 {
return nil
}
select {
case <-d.quitCh:
return errCancelContentProcessing
case <-stateSync.done:
if err := stateSync.Wait(); err != nil {
return err
}
default:
}
// Retrieve the a batch of results to import
first, last := results[0].Header, results[len(results)-1].Header
log.Debug("Inserting fast-sync blocks", "items", len(results),
"firstnum", first.Number, "firsthash", first.Hash(),
"lastnumn", last.Number, "lasthash", last.Hash(),
)
blocks := make([]*types.Block, len(results))
receipts := make([]types.Receipts, len(results))
for i, result := range results {
blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
receipts[i] = result.Receipts
}
if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil {
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
return errInvalidChain
}
return nil
}
func (d *Downloader) commitPivotBlock(result *fetchResult) error {
block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil {
return err
}
if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
return err
}
atomic.StoreInt32(&d.committed, 1)
return nil
}
// DeliverHeaders injects a new batch of block headers received from a remote
// node into the download schedule.
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
}
// DeliverBodies injects a new batch of block bodies received from a remote node.
func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
}
// DeliverReceipts injects a new batch of receipts received from a remote node.
func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) {
return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
}
// DeliverNodeData injects a new batch of node state data received from a remote node.
func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) {
return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
}
// deliver injects a new batch of data received from a remote node.
func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
// Update the delivery metrics for both good and failed deliveries
inMeter.Mark(int64(packet.Items()))
defer func() {
if err != nil {
dropMeter.Mark(int64(packet.Items()))
}
}()
// Deliver or abort if the sync is canceled while queuing
d.cancelLock.RLock()
cancel := d.cancelCh
d.cancelLock.RUnlock()
if cancel == nil {
return errNoSyncActive
}
select {
case destCh <- packet:
return nil
case <-cancel:
return errNoSyncActive
}
}
// qosTuner is the quality of service tuning loop that occasionally gathers the
// peer latency statistics and updates the estimated request round trip time.
func (d *Downloader) qosTuner() {
for {
// Retrieve the current median RTT and integrate into the previoust target RTT
rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
// A new RTT cycle passed, increase our confidence in the estimated RTT
conf := atomic.LoadUint64(&d.rttConfidence)
conf = conf + (1000000-conf)/2
atomic.StoreUint64(&d.rttConfidence, conf)
// Log the new QoS values and sleep until the next RTT
log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
select {
case <-d.quitCh:
return
case <-time.After(rtt):
}
}
}
// qosReduceConfidence is meant to be called when a new peer joins the downloader's
// peer set, needing to reduce the confidence we have in out QoS estimates.
func (d *Downloader) qosReduceConfidence() {
// If we have a single peer, confidence is always 1
peers := uint64(d.peers.Len())
if peers == 0 {
// Ensure peer connectivity races don't catch us off guard
return
}
if peers == 1 {
atomic.StoreUint64(&d.rttConfidence, 1000000)
return
}
// If we have a ton of peers, don't drop confidence)
if peers >= uint64(qosConfidenceCap) {
return
}
// Otherwise drop the confidence factor
conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers
if float64(conf)/1000000 < rttMinConfidence {
conf = uint64(rttMinConfidence * 1000000)
}
atomic.StoreUint64(&d.rttConfidence, conf)
rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
}
// requestRTT returns the current target round trip time for a download request
// to complete in.
//
// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
// the downloader tries to adapt queries to the RTT, so multiple RTT values can
// be adapted to, but smaller ones are preferred (stabler download stream).
func (d *Downloader) requestRTT() time.Duration {
return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
}
// requestTTL returns the current timeout allowance for a single download request
// to finish under.
func (d *Downloader) requestTTL() time.Duration {
var (
rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate))
conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0
)
ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf)
if ttl > ttlLimit {
ttl = ttlLimit
}
return ttl
}
| eth/downloader/downloader.go | 1 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.009155549108982086,
0.0005871463217772543,
0.00016230373876169324,
0.00017159215349238366,
0.0013735981192439795
] |
{
"id": 1,
"code_window": [
"\tif hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {\n",
"\t\treturn fmt.Errorf(\"transaction root hash mismatch: have %x, want %x\", hash, header.TxHash)\n",
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"// ValidateState validates the various changes that happen after a state\n",
"// transition, such as amount of used gas, the receipt roots and the state root\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\tif !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\t\treturn consensus.ErrUnknownAncestor\n",
"\t\t}\n",
"\t\treturn consensus.ErrPrunedAncestor\n",
"\t}\n"
],
"file_path": "core/block_validator.go",
"type": "add",
"edit_start_line_idx": 72
} | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bmt provides a binary merkle tree implementation used for swarm chunk hash
package bmt
import (
"fmt"
"hash"
"strings"
"sync"
"sync/atomic"
)
/*
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size.
It is defined as the root hash of the binary merkle tree built over fixed size segments
of the underlying chunk using any base hash function (e.g., keccak 256 SHA3).
Chunks with data shorter than the fixed size are hashed as if they had zero padding.
BMT hash is used as the chunk hash function in swarm which in turn is the basis for the
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
segment is a substring of a chunk starting at a particular offset.
The size of the underlying segments is fixed to the size of the base hash (called the resolution
of the BMT hash), Using Keccak256 SHA3 hash is 32 bytes, the EVM word size to optimize for on-chain BMT verification
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
Two implementations are provided:
* RefHasher is optimized for code simplicity and meant as a reference implementation
that is simple to understand
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
control structure to coordinate the concurrent routines
BMT Hasher implements the following interfaces
* standard golang hash.Hash - synchronous, reusable
* SwarmHash - SumWithSpan provided
* io.Writer - synchronous left-to-right datawriter
* AsyncWriter - concurrent section writes and asynchronous Sum call
*/
const (
// PoolSize is the maximum number of bmt trees used by the hashers, i.e,
// the maximum number of concurrent BMT hashing operations performed by the same hasher
PoolSize = 8
)
// BaseHasherFunc is a hash.Hash constructor function used for the base hash of the BMT.
// implemented by Keccak256 SHA3 sha3.NewKeccak256
type BaseHasherFunc func() hash.Hash
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
// - implements the hash.Hash interface
// - reuses a pool of trees for amortised memory allocation and resource control
// - supports order-agnostic concurrent segment writes and section (double segment) writes
// as well as sequential read and write
// - the same hasher instance must not be called concurrently on more than one chunk
// - the same hasher instance is synchronously reuseable
// - Sum gives back the tree to the pool and guaranteed to leave
// the tree and itself in a state reusable for hashing a new chunk
// - generates and verifies segment inclusion proofs (TODO:)
type Hasher struct {
pool *TreePool // BMT resource pool
bmt *tree // prebuilt BMT resource for flowcontrol and proofs
}
// New creates a reusable BMT Hasher that
// pulls a new tree from a resource pool for hashing each chunk
func New(p *TreePool) *Hasher {
return &Hasher{
pool: p,
}
}
// TreePool provides a pool of trees used as resources by the BMT Hasher.
// A tree popped from the pool is guaranteed to have a clean state ready
// for hashing a new chunk.
type TreePool struct {
lock sync.Mutex
c chan *tree // the channel to obtain a resource from the pool
hasher BaseHasherFunc // base hasher to use for the BMT levels
SegmentSize int // size of leaf segments, stipulated to be = hash size
SegmentCount int // the number of segments on the base level of the BMT
Capacity int // pool capacity, controls concurrency
Depth int // depth of the bmt trees = int(log2(segmentCount))+1
Size int // the total length of the data (count * size)
count int // current count of (ever) allocated resources
zerohashes [][]byte // lookup table for predictable padding subtrees for all levels
}
// NewTreePool creates a tree pool with hasher, segment size, segment count and capacity
// on Hasher.getTree it reuses free trees or creates a new one if capacity is not reached
func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool {
// initialises the zerohashes lookup table
depth := calculateDepthFor(segmentCount)
segmentSize := hasher().Size()
zerohashes := make([][]byte, depth+1)
zeros := make([]byte, segmentSize)
zerohashes[0] = zeros
h := hasher()
for i := 1; i < depth+1; i++ {
zeros = doSum(h, nil, zeros, zeros)
zerohashes[i] = zeros
}
return &TreePool{
c: make(chan *tree, capacity),
hasher: hasher,
SegmentSize: segmentSize,
SegmentCount: segmentCount,
Capacity: capacity,
Size: segmentCount * segmentSize,
Depth: depth,
zerohashes: zerohashes,
}
}
// Drain drains the pool until it has no more than n resources
func (p *TreePool) Drain(n int) {
p.lock.Lock()
defer p.lock.Unlock()
for len(p.c) > n {
<-p.c
p.count--
}
}
// Reserve is blocking until it returns an available tree
// it reuses free trees or creates a new one if size is not reached
// TODO: should use a context here
func (p *TreePool) reserve() *tree {
p.lock.Lock()
defer p.lock.Unlock()
var t *tree
if p.count == p.Capacity {
return <-p.c
}
select {
case t = <-p.c:
default:
t = newTree(p.SegmentSize, p.Depth, p.hasher)
p.count++
}
return t
}
// release gives back a tree to the pool.
// this tree is guaranteed to be in reusable state
func (p *TreePool) release(t *tree) {
p.c <- t // can never fail ...
}
// tree is a reusable control structure representing a BMT
// organised in a binary tree
// Hasher uses a TreePool to obtain a tree for each chunk hash
// the tree is 'locked' while not in the pool
type tree struct {
leaves []*node // leaf nodes of the tree, other nodes accessible via parent links
cursor int // index of rightmost currently open segment
offset int // offset (cursor position) within currently open segment
section []byte // the rightmost open section (double segment)
result chan []byte // result channel
span []byte // The span of the data subsumed under the chunk
}
// node is a reuseable segment hasher representing a node in a BMT
type node struct {
isLeft bool // whether it is left side of the parent double segment
parent *node // pointer to parent node in the BMT
state int32 // atomic increment impl concurrent boolean toggle
left, right []byte // this is where the two children sections are written
hasher hash.Hash // preconstructed hasher on nodes
}
// newNode constructs a segment hasher node in the BMT (used by newTree)
func newNode(index int, parent *node, hasher hash.Hash) *node {
return &node{
parent: parent,
isLeft: index%2 == 0,
hasher: hasher,
}
}
// Draw draws the BMT (badly)
func (t *tree) draw(hash []byte) string {
var left, right []string
var anc []*node
for i, n := range t.leaves {
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
if i%2 == 0 {
anc = append(anc, n.parent)
}
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
}
anc = t.leaves
var hashes [][]string
for l := 0; len(anc) > 0; l++ {
var nodes []*node
hash := []string{""}
for i, n := range anc {
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
if i%2 == 0 && n.parent != nil {
nodes = append(nodes, n.parent)
}
}
hash = append(hash, "")
hashes = append(hashes, hash)
anc = nodes
}
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
total := 60
del := " "
var rows []string
for i := len(hashes) - 1; i >= 0; i-- {
var textlen int
hash := hashes[i]
for _, s := range hash {
textlen += len(s)
}
if total < textlen {
total = textlen + len(hash)
}
delsize := (total - textlen) / (len(hash) - 1)
if delsize > len(del) {
delsize = len(del)
}
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
rows = append(rows, row)
}
rows = append(rows, strings.Join(left, " "))
rows = append(rows, strings.Join(right, " "))
return strings.Join(rows, "\n") + "\n"
}
// newTree initialises a tree by building up the nodes of a BMT
// - segment size is stipulated to be the size of the hash
func newTree(segmentSize, depth int, hashfunc func() hash.Hash) *tree {
n := newNode(0, nil, hashfunc())
prevlevel := []*node{n}
// iterate over levels and creates 2^(depth-level) nodes
// the 0 level is on double segment sections so we start at depth - 2 since
count := 2
for level := depth - 2; level >= 0; level-- {
nodes := make([]*node, count)
for i := 0; i < count; i++ {
parent := prevlevel[i/2]
var hasher hash.Hash
if level == 0 {
hasher = hashfunc()
}
nodes[i] = newNode(i, parent, hasher)
}
prevlevel = nodes
count *= 2
}
// the datanode level is the nodes on the last level
return &tree{
leaves: prevlevel,
result: make(chan []byte),
section: make([]byte, 2*segmentSize),
}
}
// methods needed to implement hash.Hash
// Size returns the size
func (h *Hasher) Size() int {
return h.pool.SegmentSize
}
// BlockSize returns the block size
func (h *Hasher) BlockSize() int {
return 2 * h.pool.SegmentSize
}
// Sum returns the BMT root hash of the buffer
// using Sum presupposes sequential synchronous writes (io.Writer interface)
// hash.Hash interface Sum method appends the byte slice to the underlying
// data before it calculates and returns the hash of the chunk
// caller must make sure Sum is not called concurrently with Write, writeSection
func (h *Hasher) Sum(b []byte) (s []byte) {
t := h.getTree()
// write the last section with final flag set to true
go h.writeSection(t.cursor, t.section, true, true)
// wait for the result
s = <-t.result
span := t.span
// release the tree resource back to the pool
h.releaseTree()
// b + sha3(span + BMT(pure_chunk))
if len(span) == 0 {
return append(b, s...)
}
return doSum(h.pool.hasher(), b, span, s)
}
// methods needed to implement the SwarmHash and the io.Writer interfaces
// Write calls sequentially add to the buffer to be hashed,
// with every full segment calls writeSection in a go routine
func (h *Hasher) Write(b []byte) (int, error) {
l := len(b)
if l == 0 || l > h.pool.Size {
return 0, nil
}
t := h.getTree()
secsize := 2 * h.pool.SegmentSize
// calculate length of missing bit to complete current open section
smax := secsize - t.offset
// if at the beginning of chunk or middle of the section
if t.offset < secsize {
// fill up current segment from buffer
copy(t.section[t.offset:], b)
// if input buffer consumed and open section not complete, then
// advance offset and return
if smax == 0 {
smax = secsize
}
if l <= smax {
t.offset += l
return l, nil
}
} else {
// if end of a section
if t.cursor == h.pool.SegmentCount*2 {
return 0, nil
}
}
// read full sections and the last possibly partial section from the input buffer
for smax < l {
// section complete; push to tree asynchronously
go h.writeSection(t.cursor, t.section, true, false)
// reset section
t.section = make([]byte, secsize)
// copy from input buffer at smax to right half of section
copy(t.section, b[smax:])
// advance cursor
t.cursor++
// smax here represents successive offsets in the input buffer
smax += secsize
}
t.offset = l - smax + secsize
return l, nil
}
// Reset needs to be called before writing to the hasher
func (h *Hasher) Reset() {
h.releaseTree()
}
// methods needed to implement the SwarmHash interface
// ResetWithLength needs to be called before writing to the hasher
// the argument is supposed to be the byte slice binary representation of
// the length of the data subsumed under the hash, i.e., span
func (h *Hasher) ResetWithLength(span []byte) {
h.Reset()
h.getTree().span = span
}
// releaseTree gives back the Tree to the pool whereby it unlocks
// it resets tree, segment and index
func (h *Hasher) releaseTree() {
t := h.bmt
if t == nil {
return
}
h.bmt = nil
go func() {
t.cursor = 0
t.offset = 0
t.span = nil
t.section = make([]byte, h.pool.SegmentSize*2)
select {
case <-t.result:
default:
}
h.pool.release(t)
}()
}
// NewAsyncWriter extends Hasher with an interface for concurrent segment/section writes
func (h *Hasher) NewAsyncWriter(double bool) *AsyncHasher {
secsize := h.pool.SegmentSize
if double {
secsize *= 2
}
write := func(i int, section []byte, final bool) {
h.writeSection(i, section, double, final)
}
return &AsyncHasher{
Hasher: h,
double: double,
secsize: secsize,
write: write,
}
}
// SectionWriter is an asynchronous segment/section writer interface
type SectionWriter interface {
Reset() // standard init to be called before reuse
Write(index int, data []byte) // write into section of index
Sum(b []byte, length int, span []byte) []byte // returns the hash of the buffer
SectionSize() int // size of the async section unit to use
}
// AsyncHasher extends BMT Hasher with an asynchronous segment/section writer interface
// AsyncHasher is unsafe and does not check indexes and section data lengths
// it must be used with the right indexes and length and the right number of sections
//
// behaviour is undefined if
// * non-final sections are shorter or longer than secsize
// * if final section does not match length
// * write a section with index that is higher than length/secsize
// * set length in Sum call when length/secsize < maxsec
//
// * if Sum() is not called on a Hasher that is fully written
// a process will block, can be terminated with Reset
// * it will not leak processes if not all sections are written but it blocks
// and keeps the resource which can be released calling Reset()
type AsyncHasher struct {
*Hasher // extends the Hasher
mtx sync.Mutex // to lock the cursor access
double bool // whether to use double segments (call Hasher.writeSection)
secsize int // size of base section (size of hash or double)
write func(i int, section []byte, final bool)
}
// methods needed to implement AsyncWriter
// SectionSize returns the size of async section unit to use
func (sw *AsyncHasher) SectionSize() int {
return sw.secsize
}
// Write writes the i-th section of the BMT base
// this function can and is meant to be called concurrently
// it sets max segment threadsafely
func (sw *AsyncHasher) Write(i int, section []byte) {
sw.mtx.Lock()
defer sw.mtx.Unlock()
t := sw.getTree()
// cursor keeps track of the rightmost section written so far
// if index is lower than cursor then just write non-final section as is
if i < t.cursor {
// if index is not the rightmost, safe to write section
go sw.write(i, section, false)
return
}
// if there is a previous rightmost section safe to write section
if t.offset > 0 {
if i == t.cursor {
// i==cursor implies cursor was set by Hash call so we can write section as final one
// since it can be shorter, first we copy it to the padded buffer
t.section = make([]byte, sw.secsize)
copy(t.section, section)
go sw.write(i, t.section, true)
return
}
// the rightmost section just changed, so we write the previous one as non-final
go sw.write(t.cursor, t.section, false)
}
// set i as the index of the righmost section written so far
// set t.offset to cursor*secsize+1
t.cursor = i
t.offset = i*sw.secsize + 1
t.section = make([]byte, sw.secsize)
copy(t.section, section)
}
// Sum can be called any time once the length and the span is known
// potentially even before all segments have been written
// in such cases Sum will block until all segments are present and
// the hash for the length can be calculated.
//
// b: digest is appended to b
// length: known length of the input (unsafe; undefined if out of range)
// meta: metadata to hash together with BMT root for the final digest
// e.g., span for protection against existential forgery
func (sw *AsyncHasher) Sum(b []byte, length int, meta []byte) (s []byte) {
sw.mtx.Lock()
t := sw.getTree()
if length == 0 {
sw.mtx.Unlock()
s = sw.pool.zerohashes[sw.pool.Depth]
} else {
// for non-zero input the rightmost section is written to the tree asynchronously
// if the actual last section has been written (t.cursor == length/t.secsize)
maxsec := (length - 1) / sw.secsize
if t.offset > 0 {
go sw.write(t.cursor, t.section, maxsec == t.cursor)
}
// set cursor to maxsec so final section is written when it arrives
t.cursor = maxsec
t.offset = length
result := t.result
sw.mtx.Unlock()
// wait for the result or reset
s = <-result
}
// relesase the tree back to the pool
sw.releaseTree()
// if no meta is given just append digest to b
if len(meta) == 0 {
return append(b, s...)
}
// hash together meta and BMT root hash using the pools
return doSum(sw.pool.hasher(), b, meta, s)
}
// writeSection writes the hash of i-th section into level 1 node of the BMT tree
func (h *Hasher) writeSection(i int, section []byte, double bool, final bool) {
// select the leaf node for the section
var n *node
var isLeft bool
var hasher hash.Hash
var level int
t := h.getTree()
if double {
level++
n = t.leaves[i]
hasher = n.hasher
isLeft = n.isLeft
n = n.parent
// hash the section
section = doSum(hasher, nil, section)
} else {
n = t.leaves[i/2]
hasher = n.hasher
isLeft = i%2 == 0
}
// write hash into parent node
if final {
// for the last segment use writeFinalNode
h.writeFinalNode(level, n, hasher, isLeft, section)
} else {
h.writeNode(n, hasher, isLeft, section)
}
}
// writeNode pushes the data to the node
// if it is the first of 2 sisters written, the routine terminates
// if it is the second, it calculates the hash and writes it
// to the parent node recursively
// since hashing the parent is synchronous the same hasher can be used
func (h *Hasher) writeNode(n *node, bh hash.Hash, isLeft bool, s []byte) {
level := 1
for {
// at the root of the bmt just write the result to the result channel
if n == nil {
h.getTree().result <- s
return
}
// otherwise assign child hash to left or right segment
if isLeft {
n.left = s
} else {
n.right = s
}
// the child-thread first arriving will terminate
if n.toggle() {
return
}
// the thread coming second now can be sure both left and right children are written
// so it calculates the hash of left|right and pushes it to the parent
s = doSum(bh, nil, n.left, n.right)
isLeft = n.isLeft
n = n.parent
level++
}
}
// writeFinalNode is following the path starting from the final datasegment to the
// BMT root via parents
// for unbalanced trees it fills in the missing right sister nodes using
// the pool's lookup table for BMT subtree root hashes for all-zero sections
// otherwise behaves like `writeNode`
func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s []byte) {
for {
// at the root of the bmt just write the result to the result channel
if n == nil {
if s != nil {
h.getTree().result <- s
}
return
}
var noHash bool
if isLeft {
// coming from left sister branch
// when the final section's path is going via left child node
// we include an all-zero subtree hash for the right level and toggle the node.
n.right = h.pool.zerohashes[level]
if s != nil {
n.left = s
// if a left final node carries a hash, it must be the first (and only thread)
// so the toggle is already in passive state no need no call
// yet thread needs to carry on pushing hash to parent
noHash = false
} else {
// if again first thread then propagate nil and calculate no hash
noHash = n.toggle()
}
} else {
// right sister branch
if s != nil {
// if hash was pushed from right child node, write right segment change state
n.right = s
// if toggle is true, we arrived first so no hashing just push nil to parent
noHash = n.toggle()
} else {
// if s is nil, then thread arrived first at previous node and here there will be two,
// so no need to do anything and keep s = nil for parent
noHash = true
}
}
// the child-thread first arriving will just continue resetting s to nil
// the second thread now can be sure both left and right children are written
// it calculates the hash of left|right and pushes it to the parent
if noHash {
s = nil
} else {
s = doSum(bh, nil, n.left, n.right)
}
// iterate to parent
isLeft = n.isLeft
n = n.parent
level++
}
}
// getTree obtains a BMT resource by reserving one from the pool and assigns it to the bmt field
func (h *Hasher) getTree() *tree {
if h.bmt != nil {
return h.bmt
}
t := h.pool.reserve()
h.bmt = t
return t
}
// atomic bool toggle implementing a concurrent reusable 2-state object
// atomic addint with %2 implements atomic bool toggle
// it returns true if the toggler just put it in the active/waiting state
func (n *node) toggle() bool {
return atomic.AddInt32(&n.state, 1)%2 == 1
}
// calculates the hash of the data using hash.Hash
func doSum(h hash.Hash, b []byte, data ...[]byte) []byte {
h.Reset()
for _, v := range data {
h.Write(v)
}
return h.Sum(b)
}
// hashstr is a pretty printer for bytes used in tree.draw
func hashstr(b []byte) string {
end := len(b)
if end > 4 {
end = 4
}
return fmt.Sprintf("%x", b[:end])
}
// calculateDepthFor calculates the depth (number of levels) in the BMT tree
func calculateDepthFor(n int) (d int) {
c := 2
for ; c < n; c *= 2 {
d++
}
return d + 1
}
| swarm/bmt/bmt.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.0017545860027894378,
0.00020004184625577182,
0.000162404976435937,
0.00016968855925370008,
0.00018898025155067444
] |
{
"id": 1,
"code_window": [
"\tif hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {\n",
"\t\treturn fmt.Errorf(\"transaction root hash mismatch: have %x, want %x\", hash, header.TxHash)\n",
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"// ValidateState validates the various changes that happen after a state\n",
"// transition, such as amount of used gas, the receipt roots and the state root\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\tif !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\t\treturn consensus.ErrUnknownAncestor\n",
"\t\t}\n",
"\t\treturn consensus.ErrPrunedAncestor\n",
"\t}\n"
],
"file_path": "core/block_validator.go",
"type": "add",
"edit_start_line_idx": 72
} | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/log"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
"github.com/mattn/go-colorable"
)
func init() {
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
// available from all nodes via the HTTP API
func TestCLISwarmUp(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUp(false, t)
}
func TestCLISwarmUpRecursive(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUpRecursive(false, t)
}
// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
// available from all nodes via the HTTP API
func TestCLISwarmUpEncrypted(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUp(true, t)
}
func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUpRecursive(true, t)
}
func testCLISwarmUp(toEncrypt bool, t *testing.T) {
log.Info("starting 3 node cluster")
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer tmp.Close()
defer os.Remove(tmp.Name())
// write data to file
data := "notsorandomdata"
_, err = io.WriteString(tmp, data)
if err != nil {
t.Fatal(err)
}
hashRegexp := `[a-f\d]{64}`
flags := []string{
"--bzzapi", cluster.Nodes[0].URL,
"up",
tmp.Name()}
if toEncrypt {
hashRegexp = `[a-f\d]{128}`
flags = []string{
"--bzzapi", cluster.Nodes[0].URL,
"up",
"--encrypt",
tmp.Name()}
}
// upload the file with 'swarm up' and expect a hash
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
up := runSwarm(t, flags...)
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
hash := matches[0]
log.Info("file uploaded", "hash", hash)
// get the file from the HTTP API of each node
for _, node := range cluster.Nodes {
log.Info("getting file from node", "node", node.Name)
res, err := http.Get(node.URL + "/bzz:/" + hash)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
reply, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("expected HTTP status 200, got %s", res.Status)
}
if string(reply) != data {
t.Fatalf("expected HTTP body %q, got %q", data, reply)
}
log.Debug("verifying uploaded file using `swarm down`")
//try to get the content with `swarm down`
tmpDownload, err := ioutil.TempDir("", "swarm-test")
tmpDownload = path.Join(tmpDownload, "tmpfile.tmp")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDownload)
bzzLocator := "bzz:/" + hash
flags = []string{
"--bzzapi", cluster.Nodes[0].URL,
"down",
bzzLocator,
tmpDownload,
}
down := runSwarm(t, flags...)
down.ExpectExit()
fi, err := os.Stat(tmpDownload)
if err != nil {
t.Fatalf("could not stat path: %v", err)
}
switch mode := fi.Mode(); {
case mode.IsRegular():
downloadedBytes, err := ioutil.ReadFile(tmpDownload)
if err != nil {
t.Fatalf("had an error reading the downloaded file: %v", err)
}
if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) {
t.Fatalf("retrieved data and posted data not equal!")
}
default:
t.Fatalf("expected to download regular file, got %s", fi.Mode())
}
}
timeout := time.Duration(2 * time.Second)
httpClient := http.Client{
Timeout: timeout,
}
// try to squeeze a timeout by getting an non-existent hash from each node
for _, node := range cluster.Nodes {
_, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340")
// we're speeding up the timeout here since netstore has a 60 seconds timeout on a request
if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
t.Fatal(err)
}
// this is disabled since it takes 60s due to netstore timeout
// if res.StatusCode != 404 {
// t.Fatalf("expected HTTP status 404, got %s", res.Status)
// }
}
}
func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
fmt.Println("starting 3 node cluster")
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpUploadDir)
// create tmp files
data := "notsorandomdata"
for _, path := range []string{"tmp1", "tmp2"} {
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
t.Fatal(err)
}
}
hashRegexp := `[a-f\d]{64}`
flags := []string{
"--bzzapi", cluster.Nodes[0].URL,
"--recursive",
"up",
tmpUploadDir}
if toEncrypt {
hashRegexp = `[a-f\d]{128}`
flags = []string{
"--bzzapi", cluster.Nodes[0].URL,
"--recursive",
"up",
"--encrypt",
tmpUploadDir}
}
// upload the file with 'swarm up' and expect a hash
log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
up := runSwarm(t, flags...)
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
hash := matches[0]
log.Info("dir uploaded", "hash", hash)
// get the file from the HTTP API of each node
for _, node := range cluster.Nodes {
log.Info("getting file from node", "node", node.Name)
//try to get the content with `swarm down`
tmpDownload, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDownload)
bzzLocator := "bzz:/" + hash
flagss := []string{}
flagss = []string{
"--bzzapi", cluster.Nodes[0].URL,
"down",
"--recursive",
bzzLocator,
tmpDownload,
}
fmt.Println("downloading from swarm with recursive")
down := runSwarm(t, flagss...)
down.ExpectExit()
files, err := ioutil.ReadDir(tmpDownload)
for _, v := range files {
fi, err := os.Stat(path.Join(tmpDownload, v.Name()))
if err != nil {
t.Fatalf("got an error: %v", err)
}
switch mode := fi.Mode(); {
case mode.IsRegular():
if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
} else {
ff := make([]byte, len(data))
io.ReadFull(file, ff)
buf := bytes.NewBufferString(data)
if !bytes.Equal(ff, buf.Bytes()) {
t.Fatalf("retrieved data and posted data not equal!")
}
}
default:
t.Fatalf("this shouldnt happen")
}
}
if err != nil {
t.Fatalf("could not list files at: %v", files)
}
}
}
// TestCLISwarmUpDefaultPath tests swarm recursive upload with relative and absolute
// default paths and with encryption.
func TestCLISwarmUpDefaultPath(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
testCLISwarmUpDefaultPath(false, false, t)
testCLISwarmUpDefaultPath(false, true, t)
testCLISwarmUpDefaultPath(true, false, t)
testCLISwarmUpDefaultPath(true, true, t)
}
func testCLISwarmUpDefaultPath(toEncrypt bool, absDefaultPath bool, t *testing.T) {
srv := testutil.NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
tmp, err := ioutil.TempDir("", "swarm-defaultpath-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
err = ioutil.WriteFile(filepath.Join(tmp, "index.html"), []byte("<h1>Test</h1>"), 0666)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(tmp, "robots.txt"), []byte("Disallow: /"), 0666)
if err != nil {
t.Fatal(err)
}
defaultPath := "index.html"
if absDefaultPath {
defaultPath = filepath.Join(tmp, defaultPath)
}
args := []string{
"--bzzapi",
srv.URL,
"--recursive",
"--defaultpath",
defaultPath,
"up",
tmp,
}
if toEncrypt {
args = append(args, "--encrypt")
}
up := runSwarm(t, args...)
hashRegexp := `[a-f\d]{64,128}`
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
hash := matches[0]
client := swarm.NewClient(srv.URL)
m, isEncrypted, err := client.DownloadManifest(hash)
if err != nil {
t.Fatal(err)
}
if toEncrypt != isEncrypted {
t.Error("downloaded manifest is not encrypted")
}
var found bool
var entriesCount int
for _, e := range m.Entries {
entriesCount++
if e.Path == "" {
found = true
}
}
if !found {
t.Error("manifest default entry was not found")
}
if entriesCount != 3 {
t.Errorf("manifest contains %v entries, expected %v", entriesCount, 3)
}
}
| cmd/swarm/upload_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.00097760371863842,
0.00021397533419076353,
0.0001633014326216653,
0.00017334072617813945,
0.00017183263844344765
] |
{
"id": 1,
"code_window": [
"\tif hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {\n",
"\t\treturn fmt.Errorf(\"transaction root hash mismatch: have %x, want %x\", hash, header.TxHash)\n",
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"// ValidateState validates the various changes that happen after a state\n",
"// transition, such as amount of used gas, the receipt roots and the state root\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\tif !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {\n",
"\t\t\treturn consensus.ErrUnknownAncestor\n",
"\t\t}\n",
"\t\treturn consensus.ErrPrunedAncestor\n",
"\t}\n"
],
"file_path": "core/block_validator.go",
"type": "add",
"edit_start_line_idx": 72
} | package fuse
import (
"runtime"
)
func stack() string {
buf := make([]byte, 1024)
return string(buf[:runtime.Stack(buf, false)])
}
func nop(msg interface{}) {}
// Debug is called to output debug messages, including protocol
// traces. The default behavior is to do nothing.
//
// The messages have human-friendly string representations and are
// safe to marshal to JSON.
//
// Implementations must not retain msg.
var Debug func(msg interface{}) = nop
| vendor/bazil.org/fuse/debug.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.0001887702674139291,
0.00017410109285265207,
0.00016333833627868444,
0.00017019470396917313,
0.000010743703569460195
] |
{
"id": 2,
"code_window": [
"\t\t\t\t\treturn 0, errBadPeer\n",
"\t\t\t\t}\n",
"\t\t\t\tstart = check\n",
"\n",
"\t\t\tcase <-timeout:\n",
"\t\t\t\tp.log.Debug(\"Waiting for search header timed out\", \"elapsed\", ttl)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\thash = h\n"
],
"file_path": "eth/downloader/downloader.go",
"type": "add",
"edit_start_line_idx": 742
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"fmt"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
// BlockValidator is responsible for validating block headers, uncles and
// processed state.
//
// BlockValidator implements Validator.
type BlockValidator struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
engine consensus.Engine // Consensus engine used for validating
}
// NewBlockValidator returns a new block validator which is safe for re-use
func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator {
validator := &BlockValidator{
config: config,
engine: engine,
bc: blockchain,
}
return validator
}
// ValidateBody validates the given block's uncles and verifies the block
// header's transaction and uncle roots. The headers are assumed to be already
// validated at this point.
func (v *BlockValidator) ValidateBody(block *types.Block) error {
// Check whether the block's known, and if not, that it's linkable
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
return ErrKnownBlock
}
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
}
return consensus.ErrPrunedAncestor
}
// Header validity is known at this point, check the uncles and transactions
header := block.Header()
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err
}
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
}
if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
return nil
}
// ValidateState validates the various changes that happen after a state
// transition, such as amount of used gas, the receipt roots and the state root
// itself. ValidateState returns a database batch if the validation was a success
// otherwise nil and an error is returned.
func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error {
header := block.Header()
if block.GasUsed() != usedGas {
return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)
}
// Validate the received block's bloom with the one derived from the generated receipts.
// For valid blocks this should always validate to true.
rbloom := types.CreateBloom(receipts)
if rbloom != header.Bloom {
return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
}
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, R1]]))
receiptSha := types.DeriveSha(receipts)
if receiptSha != header.ReceiptHash {
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
}
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root)
}
return nil
}
// CalcGasLimit computes the gas limit of the next block after parent. It aims
// to keep the baseline gas above the provided floor, and increase it towards the
// ceil if the blocks are full. If the ceil is exceeded, it will always decrease
// the gas allowance.
func CalcGasLimit(parent *types.Block, gasFloor, gasCeil uint64) uint64 {
// contrib = (parentGasUsed * 3 / 2) / 1024
contrib := (parent.GasUsed() + parent.GasUsed()/2) / params.GasLimitBoundDivisor
// decay = parentGasLimit / 1024 -1
decay := parent.GasLimit()/params.GasLimitBoundDivisor - 1
/*
strategy: gasLimit of block-to-mine is set based on parent's
gasUsed value. if parentGasUsed > parentGasLimit * (2/3) then we
increase it, otherwise lower it (or leave it unchanged if it's right
at that usage) the amount increased/decreased depends on how far away
from parentGasLimit * (2/3) parentGasUsed is.
*/
limit := parent.GasLimit() - decay + contrib
if limit < params.MinGasLimit {
limit = params.MinGasLimit
}
// If we're outside our allowed gas range, we try to hone towards them
if limit < gasFloor {
limit = parent.GasLimit() + decay
if limit > gasFloor {
limit = gasFloor
}
} else if limit > gasCeil {
limit = parent.GasLimit() - decay
if limit < gasCeil {
limit = gasCeil
}
}
return limit
}
| core/block_validator.go | 1 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.0003222533268854022,
0.00018888400518335402,
0.00016586281708441675,
0.00017419952200725675,
0.00003929689773940481
] |
{
"id": 2,
"code_window": [
"\t\t\t\t\treturn 0, errBadPeer\n",
"\t\t\t\t}\n",
"\t\t\t\tstart = check\n",
"\n",
"\t\t\tcase <-timeout:\n",
"\t\t\t\tp.log.Debug(\"Waiting for search header timed out\", \"elapsed\", ttl)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\thash = h\n"
],
"file_path": "eth/downloader/downloader.go",
"type": "add",
"edit_start_line_idx": 742
} | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errors contains common error types for the OpenPGP packages.
package errors // import "golang.org/x/crypto/openpgp/errors"
import (
"strconv"
)
// A StructuralError is returned when OpenPGP data is found to be syntactically
// invalid.
type StructuralError string
func (s StructuralError) Error() string {
return "openpgp: invalid data: " + string(s)
}
// UnsupportedError indicates that, although the OpenPGP data is valid, it
// makes use of currently unimplemented features.
type UnsupportedError string
func (s UnsupportedError) Error() string {
return "openpgp: unsupported feature: " + string(s)
}
// InvalidArgumentError indicates that the caller is in error and passed an
// incorrect value.
type InvalidArgumentError string
func (i InvalidArgumentError) Error() string {
return "openpgp: invalid argument: " + string(i)
}
// SignatureError indicates that a syntactically valid signature failed to
// validate.
type SignatureError string
func (b SignatureError) Error() string {
return "openpgp: invalid signature: " + string(b)
}
type keyIncorrectError int
func (ki keyIncorrectError) Error() string {
return "openpgp: incorrect key"
}
var ErrKeyIncorrect error = keyIncorrectError(0)
type unknownIssuerError int
func (unknownIssuerError) Error() string {
return "openpgp: signature made by unknown entity"
}
var ErrUnknownIssuer error = unknownIssuerError(0)
type keyRevokedError int
func (keyRevokedError) Error() string {
return "openpgp: signature made by revoked key"
}
var ErrKeyRevoked error = keyRevokedError(0)
type UnknownPacketTypeError uint8
func (upte UnknownPacketTypeError) Error() string {
return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
}
| vendor/golang.org/x/crypto/openpgp/errors/errors.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.0004613764467649162,
0.0003042733878828585,
0.00016631765174679458,
0.0002830586163327098,
0.00011600348079809919
] |
{
"id": 2,
"code_window": [
"\t\t\t\t\treturn 0, errBadPeer\n",
"\t\t\t\t}\n",
"\t\t\t\tstart = check\n",
"\n",
"\t\t\tcase <-timeout:\n",
"\t\t\t\tp.log.Debug(\"Waiting for search header timed out\", \"elapsed\", ttl)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\thash = h\n"
],
"file_path": "eth/downloader/downloader.go",
"type": "add",
"edit_start_line_idx": 742
} | package bn256
import (
"bytes"
"crypto/rand"
"testing"
)
func TestG1Marshal(t *testing.T) {
_, Ga, err := RandomG1(rand.Reader)
if err != nil {
t.Fatal(err)
}
ma := Ga.Marshal()
Gb := new(G1)
_, err = Gb.Unmarshal(ma)
if err != nil {
t.Fatal(err)
}
mb := Gb.Marshal()
if !bytes.Equal(ma, mb) {
t.Fatal("bytes are different")
}
}
func TestG2Marshal(t *testing.T) {
_, Ga, err := RandomG2(rand.Reader)
if err != nil {
t.Fatal(err)
}
ma := Ga.Marshal()
Gb := new(G2)
_, err = Gb.Unmarshal(ma)
if err != nil {
t.Fatal(err)
}
mb := Gb.Marshal()
if !bytes.Equal(ma, mb) {
t.Fatal("bytes are different")
}
}
func TestBilinearity(t *testing.T) {
for i := 0; i < 2; i++ {
a, p1, _ := RandomG1(rand.Reader)
b, p2, _ := RandomG2(rand.Reader)
e1 := Pair(p1, p2)
e2 := Pair(&G1{curveGen}, &G2{twistGen})
e2.ScalarMult(e2, a)
e2.ScalarMult(e2, b)
if *e1.p != *e2.p {
t.Fatalf("bad pairing result: %s", e1)
}
}
}
func TestTripartiteDiffieHellman(t *testing.T) {
a, _ := rand.Int(rand.Reader, Order)
b, _ := rand.Int(rand.Reader, Order)
c, _ := rand.Int(rand.Reader, Order)
pa, pb, pc := new(G1), new(G1), new(G1)
qa, qb, qc := new(G2), new(G2), new(G2)
pa.Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
qa.Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
pb.Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
qb.Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
pc.Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
qc.Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
k1 := Pair(pb, qc)
k1.ScalarMult(k1, a)
k1Bytes := k1.Marshal()
k2 := Pair(pc, qa)
k2.ScalarMult(k2, b)
k2Bytes := k2.Marshal()
k3 := Pair(pa, qb)
k3.ScalarMult(k3, c)
k3Bytes := k3.Marshal()
if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) {
t.Errorf("keys didn't agree")
}
}
func BenchmarkG1(b *testing.B) {
x, _ := rand.Int(rand.Reader, Order)
b.ResetTimer()
for i := 0; i < b.N; i++ {
new(G1).ScalarBaseMult(x)
}
}
func BenchmarkG2(b *testing.B) {
x, _ := rand.Int(rand.Reader, Order)
b.ResetTimer()
for i := 0; i < b.N; i++ {
new(G2).ScalarBaseMult(x)
}
}
func BenchmarkPairing(b *testing.B) {
for i := 0; i < b.N; i++ {
Pair(&G1{curveGen}, &G2{twistGen})
}
}
| crypto/bn256/cloudflare/bn256_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.0001737186248647049,
0.00016936480824369937,
0.0001635423395782709,
0.0001686055911704898,
0.0000034473689538572216
] |
{
"id": 2,
"code_window": [
"\t\t\t\t\treturn 0, errBadPeer\n",
"\t\t\t\t}\n",
"\t\t\t\tstart = check\n",
"\n",
"\t\t\tcase <-timeout:\n",
"\t\t\t\tp.log.Debug(\"Waiting for search header timed out\", \"elapsed\", ttl)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\thash = h\n"
],
"file_path": "eth/downloader/downloader.go",
"type": "add",
"edit_start_line_idx": 742
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
//go:generate mimegen --types=./../../cmd/swarm/mimegen/mime.types --package=api --out=gen_mime.go
//go:generate gofmt -s -w gen_mime.go
import (
"archive/tar"
"context"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"io"
"math/big"
"net/http"
"path"
"strings"
"bytes"
"mime"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/contracts/ens"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
opentracing "github.com/opentracing/opentracing-go"
)
var (
ErrNotFound = errors.New("not found")
)
var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
apiPutCount = metrics.NewRegisteredCounter("api.put.count", nil)
apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil)
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
apiManifestUpdateCount = metrics.NewRegisteredCounter("api.manifestupdate.count", nil)
apiManifestUpdateFail = metrics.NewRegisteredCounter("api.manifestupdate.fail", nil)
apiManifestListCount = metrics.NewRegisteredCounter("api.manifestlist.count", nil)
apiManifestListFail = metrics.NewRegisteredCounter("api.manifestlist.fail", nil)
apiDeleteCount = metrics.NewRegisteredCounter("api.delete.count", nil)
apiDeleteFail = metrics.NewRegisteredCounter("api.delete.fail", nil)
apiGetTarCount = metrics.NewRegisteredCounter("api.gettar.count", nil)
apiGetTarFail = metrics.NewRegisteredCounter("api.gettar.fail", nil)
apiUploadTarCount = metrics.NewRegisteredCounter("api.uploadtar.count", nil)
apiUploadTarFail = metrics.NewRegisteredCounter("api.uploadtar.fail", nil)
apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil)
apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil)
apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil)
apiAddFileFail = metrics.NewRegisteredCounter("api.addfile.fail", nil)
apiRmFileCount = metrics.NewRegisteredCounter("api.removefile.count", nil)
apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil)
apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil)
apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil)
apiGetInvalid = metrics.NewRegisteredCounter("api.get.invalid", nil)
)
// Resolver interface resolve a domain name to a hash using ENS
type Resolver interface {
Resolve(string) (common.Hash, error)
}
// ResolveValidator is used to validate the contained Resolver
type ResolveValidator interface {
Resolver
Owner(node [32]byte) (common.Address, error)
HeaderByNumber(context.Context, *big.Int) (*types.Header, error)
}
// NoResolverError is returned by MultiResolver.Resolve if no resolver
// can be found for the address.
type NoResolverError struct {
TLD string
}
// NewNoResolverError creates a NoResolverError for the given top level domain
func NewNoResolverError(tld string) *NoResolverError {
return &NoResolverError{TLD: tld}
}
// Error NoResolverError implements error
func (e *NoResolverError) Error() string {
if e.TLD == "" {
return "no ENS resolver"
}
return fmt.Sprintf("no ENS endpoint configured to resolve .%s TLD names", e.TLD)
}
// MultiResolver is used to resolve URL addresses based on their TLDs.
// Each TLD can have multiple resolvers, and the resolution from the
// first one in the sequence will be returned.
type MultiResolver struct {
resolvers map[string][]ResolveValidator
nameHash func(string) common.Hash
}
// MultiResolverOption sets options for MultiResolver and is used as
// arguments for its constructor.
type MultiResolverOption func(*MultiResolver)
// MultiResolverOptionWithResolver adds a Resolver to a list of resolvers
// for a specific TLD. If TLD is an empty string, the resolver will be added
// to the list of default resolver, the ones that will be used for resolution
// of addresses which do not have their TLD resolver specified.
func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolverOption {
return func(m *MultiResolver) {
m.resolvers[tld] = append(m.resolvers[tld], r)
}
}
// MultiResolverOptionWithNameHash is unused at the time of this writing
func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption {
return func(m *MultiResolver) {
m.nameHash = nameHash
}
}
// NewMultiResolver creates a new instance of MultiResolver.
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
m = &MultiResolver{
resolvers: make(map[string][]ResolveValidator),
nameHash: ens.EnsNode,
}
for _, o := range opts {
o(m)
}
return m
}
// Resolve resolves address by choosing a Resolver by TLD.
// If there are more default Resolvers, or for a specific TLD,
// the Hash from the first one which does not return error
// will be returned.
func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
rs, err := m.getResolveValidator(addr)
if err != nil {
return h, err
}
for _, r := range rs {
h, err = r.Resolve(addr)
if err == nil {
return
}
}
return
}
// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address
func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) {
rs, err := m.getResolveValidator(name)
if err != nil {
return false, err
}
var addr common.Address
for _, r := range rs {
addr, err = r.Owner(m.nameHash(name))
// we hide the error if it is not for the last resolver we check
if err == nil {
return addr == address, nil
}
}
return false, err
}
// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number
func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) {
rs, err := m.getResolveValidator(name)
if err != nil {
return nil, err
}
for _, r := range rs {
var header *types.Header
header, err = r.HeaderByNumber(ctx, blockNr)
// we hide the error if it is not for the last resolver we check
if err == nil {
return header, nil
}
}
return nil, err
}
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
rs := m.resolvers[""]
tld := path.Ext(name)
if tld != "" {
tld = tld[1:]
rstld, ok := m.resolvers[tld]
if ok {
return rstld, nil
}
}
if len(rs) == 0 {
return rs, NewNoResolverError(tld)
}
return rs, nil
}
// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses
func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) {
m.nameHash = nameHash
}
/*
API implements webserver/file system related content storage and retrieval
on top of the FileStore
it is the public interface of the FileStore which is included in the ethereum stack
*/
type API struct {
feed *feed.Handler
fileStore *storage.FileStore
dns Resolver
Decryptor func(context.Context, string) DecryptFunc
}
// NewAPI the api constructor initialises a new API instance.
func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey) (self *API) {
self = &API{
fileStore: fileStore,
dns: dns,
feed: feedHandler,
Decryptor: func(ctx context.Context, credentials string) DecryptFunc {
return self.doDecrypt(ctx, credentials, pk)
},
}
return
}
// Retrieve FileStore reader API
func (a *API) Retrieve(ctx context.Context, addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) {
return a.fileStore.Retrieve(ctx, addr)
}
// Store wraps the Store API call of the embedded FileStore
func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(ctx context.Context) error, err error) {
log.Debug("api.store", "size", size)
return a.fileStore.Store(ctx, data, size, toEncrypt)
}
// ErrResolve is returned when an URI cannot be resolved from ENS.
type ErrResolve error
// Resolve a name into a content-addressed hash
// where address could be an ENS name, or a content addressed hash
func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) {
// if DNS is not configured, return an error
if a.dns == nil {
if hashMatcher.MatchString(address) {
return common.Hex2Bytes(address), nil
}
apiResolveFail.Inc(1)
return nil, fmt.Errorf("no DNS to resolve name: %q", address)
}
// try and resolve the address
resolved, err := a.dns.Resolve(address)
if err != nil {
if hashMatcher.MatchString(address) {
return common.Hex2Bytes(address), nil
}
return nil, err
}
return resolved[:], nil
}
// Resolve resolves a URI to an Address using the MultiResolver.
func (a *API) ResolveURI(ctx context.Context, uri *URI, credentials string) (storage.Address, error) {
apiResolveCount.Inc(1)
log.Trace("resolving", "uri", uri.Addr)
var sp opentracing.Span
ctx, sp = spancontext.StartSpan(
ctx,
"api.resolve")
defer sp.Finish()
// if the URI is immutable, check if the address looks like a hash
if uri.Immutable() {
key := uri.Address()
if key == nil {
return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr)
}
return key, nil
}
addr, err := a.Resolve(ctx, uri.Addr)
if err != nil {
return nil, err
}
if uri.Path == "" {
return addr, nil
}
walker, err := a.NewManifestWalker(ctx, addr, a.Decryptor(ctx, credentials), nil)
if err != nil {
return nil, err
}
var entry *ManifestEntry
walker.Walk(func(e *ManifestEntry) error {
// if the entry matches the path, set entry and stop
// the walk
if e.Path == uri.Path {
entry = e
// return an error to cancel the walk
return errors.New("found")
}
// ignore non-manifest files
if e.ContentType != ManifestType {
return nil
}
// if the manifest's path is a prefix of the
// requested path, recurse into it by returning
// nil and continuing the walk
if strings.HasPrefix(uri.Path, e.Path) {
return nil
}
return ErrSkipManifest
})
if entry == nil {
return nil, errors.New("not found")
}
addr = storage.Address(common.Hex2Bytes(entry.Hash))
return addr, nil
}
// Put provides singleton manifest creation on top of FileStore store
func (a *API) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
apiPutCount.Inc(1)
r := strings.NewReader(content)
key, waitContent, err := a.fileStore.Store(ctx, r, int64(len(content)), toEncrypt)
if err != nil {
apiPutFail.Inc(1)
return nil, nil, err
}
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
r = strings.NewReader(manifest)
key, waitManifest, err := a.fileStore.Store(ctx, r, int64(len(manifest)), toEncrypt)
if err != nil {
apiPutFail.Inc(1)
return nil, nil, err
}
return key, func(ctx context.Context) error {
err := waitContent(ctx)
if err != nil {
return err
}
return waitManifest(ctx)
}, nil
}
// Get uses iterative manifest retrieval and prefix matching
// to resolve basePath to content using FileStore retrieve
// it returns a section reader, mimeType, status, the key of the actual content and an error
func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) {
log.Debug("api.get", "key", manifestAddr, "path", path)
apiGetCount.Inc(1)
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
return nil, "", http.StatusNotFound, nil, err
}
log.Debug("trie getting entry", "key", manifestAddr, "path", path)
entry, _ := trie.getEntry(path)
if entry != nil {
log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash)
if entry.ContentType == ManifestType {
log.Debug("entry is manifest", "key", manifestAddr, "new key", entry.Hash)
adr, err := hex.DecodeString(entry.Hash)
if err != nil {
return nil, "", 0, nil, err
}
return a.Get(ctx, decrypt, adr, entry.Path)
}
// we need to do some extra work if this is a Swarm feed manifest
if entry.ContentType == FeedContentType {
if entry.Feed == nil {
return reader, mimeType, status, nil, fmt.Errorf("Cannot decode Feed in manifest")
}
_, err := a.feed.Lookup(ctx, feed.NewQueryLatest(entry.Feed, lookup.NoClue))
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Debug(fmt.Sprintf("get feed update content error: %v", err))
return reader, mimeType, status, nil, err
}
// get the data of the update
_, rsrcData, err := a.feed.GetContent(entry.Feed)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Warn(fmt.Sprintf("get feed update content error: %v", err))
return reader, mimeType, status, nil, err
}
// extract multihash
decodedMultihash, err := multihash.FromMultihash(rsrcData)
if err != nil {
apiGetInvalid.Inc(1)
status = http.StatusUnprocessableEntity
log.Warn("invalid multihash in feed update", "err", err)
return reader, mimeType, status, nil, err
}
manifestAddr = storage.Address(decodedMultihash)
log.Trace("feed update contains multihash", "key", manifestAddr)
// get the manifest the multihash digest points to
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Warn(fmt.Sprintf("loadManifestTrie (feed update multihash) error: %v", err))
return reader, mimeType, status, nil, err
}
// finally, get the manifest entry
// it will always be the entry on path ""
entry, _ = trie.getEntry(path)
if entry == nil {
status = http.StatusNotFound
apiGetNotFound.Inc(1)
err = fmt.Errorf("manifest (feed update multihash) entry for '%s' not found", path)
log.Trace("manifest (feed update multihash) entry not found", "key", manifestAddr, "path", path)
return reader, mimeType, status, nil, err
}
}
// regardless of feed update manifests or normal manifests we will converge at this point
// get the key the manifest entry points to and serve it if it's unambiguous
contentAddr = common.Hex2Bytes(entry.Hash)
status = entry.Status
if status == http.StatusMultipleChoices {
apiGetHTTP300.Inc(1)
return nil, entry.ContentType, status, contentAddr, err
}
mimeType = entry.ContentType
log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType)
reader, _ = a.fileStore.Retrieve(ctx, contentAddr)
} else {
// no entry found
status = http.StatusNotFound
apiGetNotFound.Inc(1)
err = fmt.Errorf("manifest entry for '%s' not found", path)
log.Trace("manifest entry not found", "key", contentAddr, "path", path)
}
return
}
func (a *API) Delete(ctx context.Context, addr string, path string) (storage.Address, error) {
apiDeleteCount.Inc(1)
uri, err := Parse("bzz:/" + addr)
if err != nil {
apiDeleteFail.Inc(1)
return nil, err
}
key, err := a.ResolveURI(ctx, uri, EMPTY_CREDENTIALS)
if err != nil {
return nil, err
}
newKey, err := a.UpdateManifest(ctx, key, func(mw *ManifestWriter) error {
log.Debug(fmt.Sprintf("removing %s from manifest %s", path, key.Log()))
return mw.RemoveEntry(path)
})
if err != nil {
apiDeleteFail.Inc(1)
return nil, err
}
return newKey, nil
}
// GetDirectoryTar fetches a requested directory as a tarstream
// it returns an io.Reader and an error. Do not forget to Close() the returned ReadCloser
func (a *API) GetDirectoryTar(ctx context.Context, decrypt DecryptFunc, uri *URI) (io.ReadCloser, error) {
apiGetTarCount.Inc(1)
addr, err := a.Resolve(ctx, uri.Addr)
if err != nil {
return nil, err
}
walker, err := a.NewManifestWalker(ctx, addr, decrypt, nil)
if err != nil {
apiGetTarFail.Inc(1)
return nil, err
}
piper, pipew := io.Pipe()
tw := tar.NewWriter(pipew)
go func() {
err := walker.Walk(func(entry *ManifestEntry) error {
// ignore manifests (walk will recurse into them)
if entry.ContentType == ManifestType {
return nil
}
// retrieve the entry's key and size
reader, _ := a.Retrieve(ctx, storage.Address(common.Hex2Bytes(entry.Hash)))
size, err := reader.Size(ctx, nil)
if err != nil {
return err
}
// write a tar header for the entry
hdr := &tar.Header{
Name: entry.Path,
Mode: entry.Mode,
Size: size,
ModTime: entry.ModTime,
Xattrs: map[string]string{
"user.swarm.content-type": entry.ContentType,
},
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
// copy the file into the tar stream
n, err := io.Copy(tw, io.LimitReader(reader, hdr.Size))
if err != nil {
return err
} else if n != size {
return fmt.Errorf("error writing %s: expected %d bytes but sent %d", entry.Path, size, n)
}
return nil
})
// close tar writer before closing pipew
// to flush remaining data to pipew
// regardless of error value
tw.Close()
if err != nil {
apiGetTarFail.Inc(1)
pipew.CloseWithError(err)
} else {
pipew.Close()
}
}()
return piper, nil
}
// GetManifestList lists the manifest entries for the specified address and prefix
// and returns it as a ManifestList
func (a *API) GetManifestList(ctx context.Context, decryptor DecryptFunc, addr storage.Address, prefix string) (list ManifestList, err error) {
apiManifestListCount.Inc(1)
walker, err := a.NewManifestWalker(ctx, addr, decryptor, nil)
if err != nil {
apiManifestListFail.Inc(1)
return ManifestList{}, err
}
err = walker.Walk(func(entry *ManifestEntry) error {
// handle non-manifest files
if entry.ContentType != ManifestType {
// ignore the file if it doesn't have the specified prefix
if !strings.HasPrefix(entry.Path, prefix) {
return nil
}
// if the path after the prefix contains a slash, add a
// common prefix to the list, otherwise add the entry
suffix := strings.TrimPrefix(entry.Path, prefix)
if index := strings.Index(suffix, "/"); index > -1 {
list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1])
return nil
}
if entry.Path == "" {
entry.Path = "/"
}
list.Entries = append(list.Entries, entry)
return nil
}
// if the manifest's path is a prefix of the specified prefix
// then just recurse into the manifest by returning nil and
// continuing the walk
if strings.HasPrefix(prefix, entry.Path) {
return nil
}
// if the manifest's path has the specified prefix, then if the
// path after the prefix contains a slash, add a common prefix
// to the list and skip the manifest, otherwise recurse into
// the manifest by returning nil and continuing the walk
if strings.HasPrefix(entry.Path, prefix) {
suffix := strings.TrimPrefix(entry.Path, prefix)
if index := strings.Index(suffix, "/"); index > -1 {
list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1])
return ErrSkipManifest
}
return nil
}
// the manifest neither has the prefix or needs recursing in to
// so just skip it
return ErrSkipManifest
})
if err != nil {
apiManifestListFail.Inc(1)
return ManifestList{}, err
}
return list, nil
}
func (a *API) UpdateManifest(ctx context.Context, addr storage.Address, update func(mw *ManifestWriter) error) (storage.Address, error) {
apiManifestUpdateCount.Inc(1)
mw, err := a.NewManifestWriter(ctx, addr, nil)
if err != nil {
apiManifestUpdateFail.Inc(1)
return nil, err
}
if err := update(mw); err != nil {
apiManifestUpdateFail.Inc(1)
return nil, err
}
addr, err = mw.Store()
if err != nil {
apiManifestUpdateFail.Inc(1)
return nil, err
}
log.Debug(fmt.Sprintf("generated manifest %s", addr))
return addr, nil
}
// Modify loads manifest and checks the content hash before recalculating and storing the manifest.
func (a *API) Modify(ctx context.Context, addr storage.Address, path, contentHash, contentType string) (storage.Address, error) {
apiModifyCount.Inc(1)
quitC := make(chan bool)
trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt)
if err != nil {
apiModifyFail.Inc(1)
return nil, err
}
if contentHash != "" {
entry := newManifestTrieEntry(&ManifestEntry{
Path: path,
ContentType: contentType,
}, nil)
entry.Hash = contentHash
trie.addEntry(entry, quitC)
} else {
trie.deleteEntry(path, quitC)
}
if err := trie.recalcAndStore(); err != nil {
apiModifyFail.Inc(1)
return nil, err
}
return trie.ref, nil
}
// AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm.
func (a *API) AddFile(ctx context.Context, mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) {
apiAddFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
mkey, err := a.ResolveURI(ctx, uri, EMPTY_CREDENTIALS)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
// trim the root dir we added
if path[:1] == "/" {
path = path[1:]
}
entry := &ManifestEntry{
Path: filepath.Join(path, fname),
ContentType: mime.TypeByExtension(filepath.Ext(fname)),
Mode: 0700,
Size: int64(len(content)),
ModTime: time.Now(),
}
mw, err := a.NewManifestWriter(ctx, mkey, nil)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
fkey, err := mw.AddEntry(ctx, bytes.NewReader(content), entry)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
newMkey, err := mw.Store()
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
return fkey, newMkey.String(), nil
}
func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestPath, defaultPath string, mw *ManifestWriter) (storage.Address, error) {
apiUploadTarCount.Inc(1)
var contentKey storage.Address
tr := tar.NewReader(bodyReader)
defer bodyReader.Close()
var defaultPathFound bool
for {
hdr, err := tr.Next()
if err == io.EOF {
break
} else if err != nil {
apiUploadTarFail.Inc(1)
return nil, fmt.Errorf("error reading tar stream: %s", err)
}
// only store regular files
if !hdr.FileInfo().Mode().IsRegular() {
continue
}
// add the entry under the path from the request
manifestPath := path.Join(manifestPath, hdr.Name)
contentType := hdr.Xattrs["user.swarm.content-type"]
if contentType == "" {
contentType = mime.TypeByExtension(filepath.Ext(hdr.Name))
}
//DetectContentType("")
entry := &ManifestEntry{
Path: manifestPath,
ContentType: contentType,
Mode: hdr.Mode,
Size: hdr.Size,
ModTime: hdr.ModTime,
}
contentKey, err = mw.AddEntry(ctx, tr, entry)
if err != nil {
apiUploadTarFail.Inc(1)
return nil, fmt.Errorf("error adding manifest entry from tar stream: %s", err)
}
if hdr.Name == defaultPath {
contentType := hdr.Xattrs["user.swarm.content-type"]
if contentType == "" {
contentType = mime.TypeByExtension(filepath.Ext(hdr.Name))
}
entry := &ManifestEntry{
Hash: contentKey.Hex(),
Path: "", // default entry
ContentType: contentType,
Mode: hdr.Mode,
Size: hdr.Size,
ModTime: hdr.ModTime,
}
contentKey, err = mw.AddEntry(ctx, nil, entry)
if err != nil {
apiUploadTarFail.Inc(1)
return nil, fmt.Errorf("error adding default manifest entry from tar stream: %s", err)
}
defaultPathFound = true
}
}
if defaultPath != "" && !defaultPathFound {
return contentKey, fmt.Errorf("default path %q not found", defaultPath)
}
return contentKey, nil
}
// RemoveFile removes a file entry in a manifest.
func (a *API) RemoveFile(ctx context.Context, mhash string, path string, fname string, nameresolver bool) (string, error) {
apiRmFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash)
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
mkey, err := a.ResolveURI(ctx, uri, EMPTY_CREDENTIALS)
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
// trim the root dir we added
if path[:1] == "/" {
path = path[1:]
}
mw, err := a.NewManifestWriter(ctx, mkey, nil)
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
err = mw.RemoveEntry(filepath.Join(path, fname))
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
newMkey, err := mw.Store()
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
return newMkey.String(), nil
}
// AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm.
func (a *API) AppendFile(ctx context.Context, mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) {
apiAppendFileCount.Inc(1)
buffSize := offset + addSize
if buffSize < existingSize {
buffSize = existingSize
}
buf := make([]byte, buffSize)
oldReader, _ := a.Retrieve(ctx, oldAddr)
io.ReadAtLeast(oldReader, buf, int(offset))
newReader := bytes.NewReader(content)
io.ReadAtLeast(newReader, buf[offset:], int(addSize))
if buffSize < existingSize {
io.ReadAtLeast(oldReader, buf[addSize:], int(buffSize))
}
combinedReader := bytes.NewReader(buf)
totalSize := int64(len(buf))
// TODO(jmozah): to append using pyramid chunker when it is ready
//oldReader := a.Retrieve(oldKey)
//newReader := bytes.NewReader(content)
//combinedReader := io.MultiReader(oldReader, newReader)
uri, err := Parse("bzz:/" + mhash)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
mkey, err := a.ResolveURI(ctx, uri, EMPTY_CREDENTIALS)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
// trim the root dir we added
if path[:1] == "/" {
path = path[1:]
}
mw, err := a.NewManifestWriter(ctx, mkey, nil)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
err = mw.RemoveEntry(filepath.Join(path, fname))
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
entry := &ManifestEntry{
Path: filepath.Join(path, fname),
ContentType: mime.TypeByExtension(filepath.Ext(fname)),
Mode: 0700,
Size: totalSize,
ModTime: time.Now(),
}
fkey, err := mw.AddEntry(ctx, io.Reader(combinedReader), entry)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
newMkey, err := mw.Store()
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
return fkey, newMkey.String(), nil
}
// BuildDirectoryTree used by swarmfs_unix
func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) {
uri, err := Parse("bzz:/" + mhash)
if err != nil {
return nil, nil, err
}
addr, err = a.Resolve(ctx, uri.Addr)
if err != nil {
return nil, nil, err
}
quitC := make(chan bool)
rootTrie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt)
if err != nil {
return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err)
}
manifestEntryMap = map[string]*manifestTrieEntry{}
err = rootTrie.listWithPrefix(uri.Path, quitC, func(entry *manifestTrieEntry, suffix string) {
manifestEntryMap[suffix] = entry
})
if err != nil {
return nil, nil, fmt.Errorf("list with prefix failed %v: %v", addr.String(), err)
}
return addr, manifestEntryMap, nil
}
// FeedsLookup finds Swarm feeds updates at specific points in time, or the latest update
func (a *API) FeedsLookup(ctx context.Context, query *feed.Query) ([]byte, error) {
_, err := a.feed.Lookup(ctx, query)
if err != nil {
return nil, err
}
var data []byte
_, data, err = a.feed.GetContent(&query.Feed)
if err != nil {
return nil, err
}
return data, nil
}
// FeedsNewRequest creates a Request object to update a specific feed
func (a *API) FeedsNewRequest(ctx context.Context, feed *feed.Feed) (*feed.Request, error) {
return a.feed.NewRequest(ctx, feed)
}
// FeedsUpdate publishes a new update on the given feed
func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.Address, error) {
return a.feed.Update(ctx, request)
}
// FeedsHashSize returned the size of the digest produced by Swarm feeds' hashing function
func (a *API) FeedsHashSize() int {
return a.feed.HashSize
}
// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails
var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest")
// ErrNotAFeedManifest is returned when the address provided returned something other than a valid manifest
var ErrNotAFeedManifest = errors.New("Not a feed manifest")
// ResolveFeedManifest retrieves the Swarm feed manifest for the given address, and returns the referenced Feed.
func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*feed.Feed, error) {
trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt)
if err != nil {
return nil, ErrCannotLoadFeedManifest
}
entry, _ := trie.getEntry("")
if entry.ContentType != FeedContentType {
return nil, ErrNotAFeedManifest
}
return entry.Feed, nil
}
// ErrCannotResolveFeedURI is returned when the ENS resolver is not able to translate a name to a Swarm feed
var ErrCannotResolveFeedURI = errors.New("Cannot resolve Feed URI")
// ErrCannotResolveFeed is returned when values provided are not enough or invalid to recreate a
// feed out of them.
var ErrCannotResolveFeed = errors.New("Cannot resolve Feed")
// ResolveFeed attempts to extract feed information out of the manifest, if provided
// If not, it attempts to extract the feed out of a set of key-value pairs
func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feed.Values) (*feed.Feed, error) {
var fd *feed.Feed
var err error
if uri.Addr != "" {
// resolve the content key.
manifestAddr := uri.Address()
if manifestAddr == nil {
manifestAddr, err = a.Resolve(ctx, uri.Addr)
if err != nil {
return nil, ErrCannotResolveFeedURI
}
}
// get the Swarm feed from the manifest
fd, err = a.ResolveFeedManifest(ctx, manifestAddr)
if err != nil {
return nil, err
}
log.Debug("handle.get.feed: resolved", "manifestkey", manifestAddr, "feed", fd.Hex())
} else {
var f feed.Feed
if err := f.FromValues(values); err != nil {
return nil, ErrCannotResolveFeed
}
fd = &f
}
return fd, nil
}
// MimeOctetStream default value of http Content-Type header
const MimeOctetStream = "application/octet-stream"
// DetectContentType by file file extension, or fallback to content sniff
func DetectContentType(fileName string, f io.ReadSeeker) (string, error) {
ctype := mime.TypeByExtension(filepath.Ext(fileName))
if ctype != "" {
return ctype, nil
}
// save/rollback to get content probe from begin of file
currentPosition, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err)
}
// read a chunk to decide between utf-8 text and binary
var buf [512]byte
n, _ := f.Read(buf[:])
ctype = http.DetectContentType(buf[:n])
_, err = f.Seek(currentPosition, io.SeekStart) // rewind to output whole file
if err != nil {
return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err)
}
return ctype, nil
}
| swarm/api/api.go | 0 | https://github.com/ethereum/go-ethereum/commit/eea3ae42a3d9bcbd33474c0e482754c5196a469f | [
0.00565599137917161,
0.0002904467983171344,
0.00016201937978621572,
0.00017013173783198,
0.000693409820087254
] |
{
"id": 0,
"code_window": [
"\tctx.Data[\"Title\"] = milestone.Name\n",
"\tctx.Data[\"Milestone\"] = milestone\n",
"\n",
"\tissues(ctx, milestoneID, util.OptionalBoolNone)\n",
"\n",
"\tctx.HTML(200, tplMilestoneIssues)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\tperm, err := models.GetUserRepoPermission(ctx.Repo.Repository, ctx.User)\n",
"\tif err != nil {\n",
"\t\tctx.ServerError(\"GetUserRepoPermission\", err)\n",
"\t\treturn\n",
"\t}\n",
"\tctx.Data[\"CanWriteIssues\"] = perm.CanWriteIssuesOrPulls(false)\n",
"\tctx.Data[\"CanWritePulls\"] = perm.CanWriteIssuesOrPulls(true)\n",
"\n"
],
"file_path": "routers/repo/milestone.go",
"type": "add",
"edit_start_line_idx": 262
} | // Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package routes
import (
"encoding/gob"
"fmt"
"net/http"
"os"
"path"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/gzip"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/metrics"
"code.gitea.io/gitea/modules/options"
"code.gitea.io/gitea/modules/public"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/templates"
"code.gitea.io/gitea/modules/validation"
"code.gitea.io/gitea/routers"
"code.gitea.io/gitea/routers/admin"
apiv1 "code.gitea.io/gitea/routers/api/v1"
"code.gitea.io/gitea/routers/dev"
"code.gitea.io/gitea/routers/org"
"code.gitea.io/gitea/routers/private"
"code.gitea.io/gitea/routers/repo"
"code.gitea.io/gitea/routers/user"
userSetting "code.gitea.io/gitea/routers/user/setting"
"github.com/go-macaron/binding"
"github.com/go-macaron/cache"
"github.com/go-macaron/captcha"
"github.com/go-macaron/csrf"
"github.com/go-macaron/i18n"
"github.com/go-macaron/session"
"github.com/go-macaron/toolbox"
"github.com/prometheus/client_golang/prometheus"
"github.com/tstranex/u2f"
macaron "gopkg.in/macaron.v1"
)
func giteaLogger(l *log.LoggerAsWriter) macaron.Handler {
return func(ctx *macaron.Context) {
start := time.Now()
l.Log(fmt.Sprintf("[Macaron] Started %s %s for %s", ctx.Req.Method, ctx.Req.RequestURI, ctx.RemoteAddr()))
ctx.Next()
rw := ctx.Resp.(macaron.ResponseWriter)
l.Log(fmt.Sprintf("[Macaron] Completed %s %s %v %s in %v", ctx.Req.Method, ctx.Req.RequestURI, rw.Status(), http.StatusText(rw.Status()), time.Since(start)))
}
}
// NewMacaron initializes Macaron instance.
func NewMacaron() *macaron.Macaron {
gob.Register(&u2f.Challenge{})
var m *macaron.Macaron
if setting.RedirectMacaronLog {
loggerAsWriter := log.NewLoggerAsWriter("INFO")
m = macaron.NewWithLogger(loggerAsWriter)
if !setting.DisableRouterLog {
m.Use(giteaLogger(loggerAsWriter))
}
} else {
m = macaron.New()
if !setting.DisableRouterLog {
m.Use(macaron.Logger())
}
}
m.Use(macaron.Recovery())
if setting.EnableGzip {
m.Use(gzip.Middleware())
}
if setting.Protocol == setting.FCGI {
m.SetURLPrefix(setting.AppSubURL)
}
m.Use(public.Custom(
&public.Options{
SkipLogging: setting.DisableRouterLog,
ExpiresAfter: time.Hour * 6,
},
))
m.Use(public.Static(
&public.Options{
Directory: path.Join(setting.StaticRootPath, "public"),
SkipLogging: setting.DisableRouterLog,
ExpiresAfter: time.Hour * 6,
},
))
m.Use(public.StaticHandler(
setting.AvatarUploadPath,
&public.Options{
Prefix: "avatars",
SkipLogging: setting.DisableRouterLog,
ExpiresAfter: time.Hour * 6,
},
))
m.Use(templates.HTMLRenderer())
models.InitMailRender(templates.Mailer())
localeNames, err := options.Dir("locale")
if err != nil {
log.Fatal(4, "Failed to list locale files: %v", err)
}
localFiles := make(map[string][]byte)
for _, name := range localeNames {
localFiles[name], err = options.Locale(name)
if err != nil {
log.Fatal(4, "Failed to load %s locale file. %v", name, err)
}
}
m.Use(i18n.I18n(i18n.Options{
SubURL: setting.AppSubURL,
Files: localFiles,
Langs: setting.Langs,
Names: setting.Names,
DefaultLang: "en-US",
Redirect: false,
}))
m.Use(cache.Cacher(cache.Options{
Adapter: setting.CacheService.Adapter,
AdapterConfig: setting.CacheService.Conn,
Interval: setting.CacheService.Interval,
}))
m.Use(captcha.Captchaer(captcha.Options{
SubURL: setting.AppSubURL,
}))
m.Use(session.Sessioner(setting.SessionConfig))
m.Use(csrf.Csrfer(csrf.Options{
Secret: setting.SecretKey,
Cookie: setting.CSRFCookieName,
SetCookie: true,
Secure: setting.SessionConfig.Secure,
CookieHttpOnly: true,
Header: "X-Csrf-Token",
CookiePath: setting.AppSubURL,
}))
m.Use(toolbox.Toolboxer(m, toolbox.Options{
HealthCheckFuncs: []*toolbox.HealthCheckFuncDesc{
{
Desc: "Database connection",
Func: models.Ping,
},
},
DisableDebug: !setting.EnablePprof,
}))
m.Use(context.Contexter())
// OK we are now set-up enough to allow us to create a nicer recovery than
// the default macaron recovery
m.Use(context.Recovery())
m.SetAutoHead(true)
return m
}
// RegisterRoutes routes routes to Macaron
func RegisterRoutes(m *macaron.Macaron) {
reqSignIn := context.Toggle(&context.ToggleOptions{SignInRequired: true})
ignSignIn := context.Toggle(&context.ToggleOptions{SignInRequired: setting.Service.RequireSignInView})
ignSignInAndCsrf := context.Toggle(&context.ToggleOptions{DisableCSRF: true})
reqSignOut := context.Toggle(&context.ToggleOptions{SignOutRequired: true})
bindIgnErr := binding.BindIgnErr
validation.AddBindingRules()
openIDSignInEnabled := func(ctx *context.Context) {
if !setting.Service.EnableOpenIDSignIn {
ctx.Error(403)
return
}
}
openIDSignUpEnabled := func(ctx *context.Context) {
if !setting.Service.EnableOpenIDSignUp {
ctx.Error(403)
return
}
}
m.Use(user.GetNotificationCount)
// FIXME: not all routes need go through same middlewares.
// Especially some AJAX requests, we can reduce middleware number to improve performance.
// Routers.
// for health check
m.Head("/", func() string {
return ""
})
m.Get("/", routers.Home)
m.Group("/explore", func() {
m.Get("", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL + "/explore/repos")
})
m.Get("/repos", routers.ExploreRepos)
m.Get("/users", routers.ExploreUsers)
m.Get("/organizations", routers.ExploreOrganizations)
m.Get("/code", routers.ExploreCode)
}, ignSignIn)
m.Combo("/install", routers.InstallInit).Get(routers.Install).
Post(bindIgnErr(auth.InstallForm{}), routers.InstallPost)
m.Get("/^:type(issues|pulls)$", reqSignIn, user.Issues)
// ***** START: User *****
m.Group("/user", func() {
m.Get("/login", user.SignIn)
m.Post("/login", bindIgnErr(auth.SignInForm{}), user.SignInPost)
m.Group("", func() {
m.Combo("/login/openid").
Get(user.SignInOpenID).
Post(bindIgnErr(auth.SignInOpenIDForm{}), user.SignInOpenIDPost)
}, openIDSignInEnabled)
m.Group("/openid", func() {
m.Combo("/connect").
Get(user.ConnectOpenID).
Post(bindIgnErr(auth.ConnectOpenIDForm{}), user.ConnectOpenIDPost)
m.Group("/register", func() {
m.Combo("").
Get(user.RegisterOpenID, openIDSignUpEnabled).
Post(bindIgnErr(auth.SignUpOpenIDForm{}), user.RegisterOpenIDPost)
}, openIDSignUpEnabled)
}, openIDSignInEnabled)
m.Get("/sign_up", user.SignUp)
m.Post("/sign_up", bindIgnErr(auth.RegisterForm{}), user.SignUpPost)
m.Get("/reset_password", user.ResetPasswd)
m.Post("/reset_password", user.ResetPasswdPost)
m.Group("/oauth2", func() {
m.Get("/:provider", user.SignInOAuth)
m.Get("/:provider/callback", user.SignInOAuthCallback)
})
m.Get("/link_account", user.LinkAccount)
m.Post("/link_account_signin", bindIgnErr(auth.SignInForm{}), user.LinkAccountPostSignIn)
m.Post("/link_account_signup", bindIgnErr(auth.RegisterForm{}), user.LinkAccountPostRegister)
m.Group("/two_factor", func() {
m.Get("", user.TwoFactor)
m.Post("", bindIgnErr(auth.TwoFactorAuthForm{}), user.TwoFactorPost)
m.Get("/scratch", user.TwoFactorScratch)
m.Post("/scratch", bindIgnErr(auth.TwoFactorScratchAuthForm{}), user.TwoFactorScratchPost)
})
m.Group("/u2f", func() {
m.Get("", user.U2F)
m.Get("/challenge", user.U2FChallenge)
m.Post("/sign", bindIgnErr(u2f.SignResponse{}), user.U2FSign)
})
}, reqSignOut)
m.Group("/login/oauth", func() {
m.Get("/authorize", bindIgnErr(auth.AuthorizationForm{}), user.AuthorizeOAuth)
m.Post("/grant", bindIgnErr(auth.GrantApplicationForm{}), user.GrantApplicationOAuth)
// TODO manage redirection
m.Post("/authorize", bindIgnErr(auth.AuthorizationForm{}), user.AuthorizeOAuth)
}, ignSignInAndCsrf, reqSignIn)
m.Post("/login/oauth/access_token", bindIgnErr(auth.AccessTokenForm{}), ignSignInAndCsrf, user.AccessTokenOAuth)
m.Group("/user/settings", func() {
m.Get("", userSetting.Profile)
m.Post("", bindIgnErr(auth.UpdateProfileForm{}), userSetting.ProfilePost)
m.Get("/change_password", user.MustChangePassword)
m.Post("/change_password", bindIgnErr(auth.MustChangePasswordForm{}), user.MustChangePasswordPost)
m.Post("/avatar", binding.MultipartForm(auth.AvatarForm{}), userSetting.AvatarPost)
m.Post("/avatar/delete", userSetting.DeleteAvatar)
m.Group("/account", func() {
m.Combo("").Get(userSetting.Account).Post(bindIgnErr(auth.ChangePasswordForm{}), userSetting.AccountPost)
m.Post("/email", bindIgnErr(auth.AddEmailForm{}), userSetting.EmailPost)
m.Post("/email/delete", userSetting.DeleteEmail)
m.Post("/delete", userSetting.DeleteAccount)
m.Post("/theme", bindIgnErr(auth.UpdateThemeForm{}), userSetting.UpdateUIThemePost)
})
m.Group("/security", func() {
m.Get("", userSetting.Security)
m.Group("/two_factor", func() {
m.Post("/regenerate_scratch", userSetting.RegenerateScratchTwoFactor)
m.Post("/disable", userSetting.DisableTwoFactor)
m.Get("/enroll", userSetting.EnrollTwoFactor)
m.Post("/enroll", bindIgnErr(auth.TwoFactorAuthForm{}), userSetting.EnrollTwoFactorPost)
})
m.Group("/u2f", func() {
m.Post("/request_register", bindIgnErr(auth.U2FRegistrationForm{}), userSetting.U2FRegister)
m.Post("/register", bindIgnErr(u2f.RegisterResponse{}), userSetting.U2FRegisterPost)
m.Post("/delete", bindIgnErr(auth.U2FDeleteForm{}), userSetting.U2FDelete)
})
m.Group("/openid", func() {
m.Post("", bindIgnErr(auth.AddOpenIDForm{}), userSetting.OpenIDPost)
m.Post("/delete", userSetting.DeleteOpenID)
m.Post("/toggle_visibility", userSetting.ToggleOpenIDVisibility)
}, openIDSignInEnabled)
m.Post("/account_link", userSetting.DeleteAccountLink)
})
m.Group("/applications/oauth2", func() {
m.Get("/:id", userSetting.OAuth2ApplicationShow)
m.Post("/:id", bindIgnErr(auth.EditOAuth2ApplicationForm{}), userSetting.OAuthApplicationsEdit)
m.Post("/:id/regenerate_secret", userSetting.OAuthApplicationsRegenerateSecret)
m.Post("", bindIgnErr(auth.EditOAuth2ApplicationForm{}), userSetting.OAuthApplicationsPost)
m.Post("/delete", userSetting.DeleteOAuth2Application)
})
m.Combo("/applications").Get(userSetting.Applications).
Post(bindIgnErr(auth.NewAccessTokenForm{}), userSetting.ApplicationsPost)
m.Post("/applications/delete", userSetting.DeleteApplication)
m.Combo("/keys").Get(userSetting.Keys).
Post(bindIgnErr(auth.AddKeyForm{}), userSetting.KeysPost)
m.Post("/keys/delete", userSetting.DeleteKey)
m.Get("/organization", userSetting.Organization)
m.Get("/repos", userSetting.Repos)
// redirects from old settings urls to new ones
// TODO: can be removed on next major version
m.Get("/avatar", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL+"/user/settings", http.StatusMovedPermanently)
})
m.Get("/email", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL+"/user/settings/account", http.StatusMovedPermanently)
})
m.Get("/delete", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL+"/user/settings/account", http.StatusMovedPermanently)
})
m.Get("/openid", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL+"/user/settings/security", http.StatusMovedPermanently)
})
m.Get("/account_link", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL+"/user/settings/security", http.StatusMovedPermanently)
})
}, reqSignIn, func(ctx *context.Context) {
ctx.Data["PageIsUserSettings"] = true
ctx.Data["AllThemes"] = setting.UI.Themes
})
m.Group("/user", func() {
// r.Get("/feeds", binding.Bind(auth.FeedsForm{}), user.Feeds)
m.Any("/activate", user.Activate)
m.Any("/activate_email", user.ActivateEmail)
m.Get("/email2user", user.Email2User)
m.Get("/forgot_password", user.ForgotPasswd)
m.Post("/forgot_password", user.ForgotPasswdPost)
m.Get("/logout", user.SignOut)
})
// ***** END: User *****
adminReq := context.Toggle(&context.ToggleOptions{SignInRequired: true, AdminRequired: true})
// ***** START: Admin *****
m.Group("/admin", func() {
m.Get("", adminReq, admin.Dashboard)
m.Get("/config", admin.Config)
m.Post("/config/test_mail", admin.SendTestMail)
m.Get("/monitor", admin.Monitor)
m.Group("/users", func() {
m.Get("", admin.Users)
m.Combo("/new").Get(admin.NewUser).Post(bindIgnErr(auth.AdminCreateUserForm{}), admin.NewUserPost)
m.Combo("/:userid").Get(admin.EditUser).Post(bindIgnErr(auth.AdminEditUserForm{}), admin.EditUserPost)
m.Post("/:userid/delete", admin.DeleteUser)
})
m.Group("/orgs", func() {
m.Get("", admin.Organizations)
})
m.Group("/repos", func() {
m.Get("", admin.Repos)
m.Post("/delete", admin.DeleteRepo)
})
m.Group("/auths", func() {
m.Get("", admin.Authentications)
m.Combo("/new").Get(admin.NewAuthSource).Post(bindIgnErr(auth.AuthenticationForm{}), admin.NewAuthSourcePost)
m.Combo("/:authid").Get(admin.EditAuthSource).
Post(bindIgnErr(auth.AuthenticationForm{}), admin.EditAuthSourcePost)
m.Post("/:authid/delete", admin.DeleteAuthSource)
})
m.Group("/notices", func() {
m.Get("", admin.Notices)
m.Post("/delete", admin.DeleteNotices)
m.Get("/empty", admin.EmptyNotices)
})
}, adminReq)
// ***** END: Admin *****
m.Group("", func() {
m.Group("/:username", func() {
m.Get("", user.Profile)
m.Get("/followers", user.Followers)
m.Get("/following", user.Following)
})
m.Get("/attachments/:uuid", func(ctx *context.Context) {
attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
if err != nil {
if models.IsErrAttachmentNotExist(err) {
ctx.Error(404)
} else {
ctx.ServerError("GetAttachmentByUUID", err)
}
return
}
fr, err := os.Open(attach.LocalPath())
if err != nil {
ctx.ServerError("Open", err)
return
}
defer fr.Close()
if err := attach.IncreaseDownloadCount(); err != nil {
ctx.ServerError("Update", err)
return
}
if err = repo.ServeData(ctx, attach.Name, fr); err != nil {
ctx.ServerError("ServeData", err)
return
}
})
m.Post("/attachments", repo.UploadAttachment)
}, ignSignIn)
m.Group("/:username", func() {
m.Get("/action/:action", user.Action)
}, reqSignIn)
if macaron.Env == macaron.DEV {
m.Get("/template/*", dev.TemplatePreview)
}
reqRepoAdmin := context.RequireRepoAdmin()
reqRepoCodeWriter := context.RequireRepoWriter(models.UnitTypeCode)
reqRepoCodeReader := context.RequireRepoReader(models.UnitTypeCode)
reqRepoReleaseWriter := context.RequireRepoWriter(models.UnitTypeReleases)
reqRepoReleaseReader := context.RequireRepoReader(models.UnitTypeReleases)
reqRepoWikiWriter := context.RequireRepoWriter(models.UnitTypeWiki)
reqRepoIssueReader := context.RequireRepoReader(models.UnitTypeIssues)
reqRepoPullsWriter := context.RequireRepoWriter(models.UnitTypePullRequests)
reqRepoPullsReader := context.RequireRepoReader(models.UnitTypePullRequests)
reqRepoIssuesOrPullsWriter := context.RequireRepoWriterOr(models.UnitTypeIssues, models.UnitTypePullRequests)
reqRepoIssuesOrPullsReader := context.RequireRepoReaderOr(models.UnitTypeIssues, models.UnitTypePullRequests)
reqRepoIssueWriter := func(ctx *context.Context) {
if !ctx.Repo.CanWrite(models.UnitTypeIssues) {
ctx.Error(403)
return
}
}
// ***** START: Organization *****
m.Group("/org", func() {
m.Group("", func() {
m.Get("/create", org.Create)
m.Post("/create", bindIgnErr(auth.CreateOrgForm{}), org.CreatePost)
})
m.Group("/:org", func() {
m.Get("/dashboard", user.Dashboard)
m.Get("/^:type(issues|pulls)$", user.Issues)
m.Get("/members", org.Members)
m.Get("/members/action/:action", org.MembersAction)
m.Get("/teams", org.Teams)
}, context.OrgAssignment(true))
m.Group("/:org", func() {
m.Get("/teams/:team", org.TeamMembers)
m.Get("/teams/:team/repositories", org.TeamRepositories)
m.Route("/teams/:team/action/:action", "GET,POST", org.TeamsAction)
m.Route("/teams/:team/action/repo/:action", "GET,POST", org.TeamsRepoAction)
}, context.OrgAssignment(true, false, true))
m.Group("/:org", func() {
m.Get("/teams/new", org.NewTeam)
m.Post("/teams/new", bindIgnErr(auth.CreateTeamForm{}), org.NewTeamPost)
m.Get("/teams/:team/edit", org.EditTeam)
m.Post("/teams/:team/edit", bindIgnErr(auth.CreateTeamForm{}), org.EditTeamPost)
m.Post("/teams/:team/delete", org.DeleteTeam)
m.Group("/settings", func() {
m.Combo("").Get(org.Settings).
Post(bindIgnErr(auth.UpdateOrgSettingForm{}), org.SettingsPost)
m.Post("/avatar", binding.MultipartForm(auth.AvatarForm{}), org.SettingsAvatar)
m.Post("/avatar/delete", org.SettingsDeleteAvatar)
m.Group("/hooks", func() {
m.Get("", org.Webhooks)
m.Post("/delete", org.DeleteWebhook)
m.Get("/:type/new", repo.WebhooksNew)
m.Post("/gitea/new", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksNewPost)
m.Post("/gogs/new", bindIgnErr(auth.NewGogshookForm{}), repo.GogsHooksNewPost)
m.Post("/slack/new", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksNewPost)
m.Post("/discord/new", bindIgnErr(auth.NewDiscordHookForm{}), repo.DiscordHooksNewPost)
m.Post("/dingtalk/new", bindIgnErr(auth.NewDingtalkHookForm{}), repo.DingtalkHooksNewPost)
m.Get("/:id", repo.WebHooksEdit)
m.Post("/gitea/:id", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksEditPost)
m.Post("/gogs/:id", bindIgnErr(auth.NewGogshookForm{}), repo.GogsHooksEditPost)
m.Post("/slack/:id", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksEditPost)
m.Post("/discord/:id", bindIgnErr(auth.NewDiscordHookForm{}), repo.DiscordHooksEditPost)
m.Post("/dingtalk/:id", bindIgnErr(auth.NewDingtalkHookForm{}), repo.DingtalkHooksEditPost)
})
m.Route("/delete", "GET,POST", org.SettingsDelete)
})
}, context.OrgAssignment(true, true))
}, reqSignIn)
// ***** END: Organization *****
// ***** START: Repository *****
m.Group("/repo", func() {
m.Get("/create", repo.Create)
m.Post("/create", bindIgnErr(auth.CreateRepoForm{}), repo.CreatePost)
m.Get("/migrate", repo.Migrate)
m.Post("/migrate", bindIgnErr(auth.MigrateRepoForm{}), repo.MigratePost)
m.Group("/fork", func() {
m.Combo("/:repoid").Get(repo.Fork).
Post(bindIgnErr(auth.CreateRepoForm{}), repo.ForkPost)
}, context.RepoIDAssignment(), context.UnitTypes(), reqRepoCodeReader)
}, reqSignIn)
// ***** Release Attachment Download without Signin
m.Get("/:username/:reponame/releases/download/:vTag/:fileName", ignSignIn, context.RepoAssignment(), repo.MustBeNotEmpty, repo.RedirectDownload)
m.Group("/:username/:reponame", func() {
m.Group("/settings", func() {
m.Combo("").Get(repo.Settings).
Post(bindIgnErr(auth.RepoSettingForm{}), repo.SettingsPost)
m.Group("/collaboration", func() {
m.Combo("").Get(repo.Collaboration).Post(repo.CollaborationPost)
m.Post("/access_mode", repo.ChangeCollaborationAccessMode)
m.Post("/delete", repo.DeleteCollaboration)
})
m.Group("/branches", func() {
m.Combo("").Get(repo.ProtectedBranch).Post(repo.ProtectedBranchPost)
m.Combo("/*").Get(repo.SettingsProtectedBranch).
Post(bindIgnErr(auth.ProtectBranchForm{}), context.RepoMustNotBeArchived(), repo.SettingsProtectedBranchPost)
}, repo.MustBeNotEmpty)
m.Group("/hooks", func() {
m.Get("", repo.Webhooks)
m.Post("/delete", repo.DeleteWebhook)
m.Get("/:type/new", repo.WebhooksNew)
m.Post("/gitea/new", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksNewPost)
m.Post("/gogs/new", bindIgnErr(auth.NewGogshookForm{}), repo.GogsHooksNewPost)
m.Post("/slack/new", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksNewPost)
m.Post("/discord/new", bindIgnErr(auth.NewDiscordHookForm{}), repo.DiscordHooksNewPost)
m.Post("/dingtalk/new", bindIgnErr(auth.NewDingtalkHookForm{}), repo.DingtalkHooksNewPost)
m.Get("/:id", repo.WebHooksEdit)
m.Post("/:id/test", repo.TestWebhook)
m.Post("/gitea/:id", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksEditPost)
m.Post("/gogs/:id", bindIgnErr(auth.NewGogshookForm{}), repo.GogsHooksEditPost)
m.Post("/slack/:id", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksEditPost)
m.Post("/discord/:id", bindIgnErr(auth.NewDiscordHookForm{}), repo.DiscordHooksEditPost)
m.Post("/dingtalk/:id", bindIgnErr(auth.NewDingtalkHookForm{}), repo.DingtalkHooksEditPost)
m.Group("/git", func() {
m.Get("", repo.GitHooks)
m.Combo("/:name").Get(repo.GitHooksEdit).
Post(repo.GitHooksEditPost)
}, context.GitHookService())
})
m.Group("/keys", func() {
m.Combo("").Get(repo.DeployKeys).
Post(bindIgnErr(auth.AddKeyForm{}), repo.DeployKeysPost)
m.Post("/delete", repo.DeleteDeployKey)
})
}, func(ctx *context.Context) {
ctx.Data["PageIsSettings"] = true
})
}, reqSignIn, context.RepoAssignment(), reqRepoAdmin, context.UnitTypes(), context.RepoRef())
m.Get("/:username/:reponame/action/:action", reqSignIn, context.RepoAssignment(), context.UnitTypes(), context.RepoMustNotBeArchived(), repo.Action)
m.Group("/:username/:reponame", func() {
m.Group("/issues", func() {
m.Combo("/new").Get(context.RepoRef(), repo.NewIssue).
Post(bindIgnErr(auth.CreateIssueForm{}), repo.NewIssuePost)
}, context.RepoMustNotBeArchived(), reqRepoIssueReader)
// FIXME: should use different URLs but mostly same logic for comments of issue and pull reuqest.
// So they can apply their own enable/disable logic on routers.
m.Group("/issues", func() {
m.Group("/:index", func() {
m.Post("/title", repo.UpdateIssueTitle)
m.Post("/content", repo.UpdateIssueContent)
m.Post("/watch", repo.IssueWatch)
m.Group("/dependency", func() {
m.Post("/add", repo.AddDependency)
m.Post("/delete", repo.RemoveDependency)
})
m.Combo("/comments").Post(repo.MustAllowUserComment, bindIgnErr(auth.CreateCommentForm{}), repo.NewComment)
m.Group("/times", func() {
m.Post("/add", bindIgnErr(auth.AddTimeManuallyForm{}), repo.AddTimeManually)
m.Group("/stopwatch", func() {
m.Post("/toggle", repo.IssueStopwatch)
m.Post("/cancel", repo.CancelStopwatch)
})
})
m.Post("/reactions/:action", bindIgnErr(auth.ReactionForm{}), repo.ChangeIssueReaction)
m.Post("/lock", reqRepoIssueWriter, bindIgnErr(auth.IssueLockForm{}), repo.LockIssue)
m.Post("/unlock", reqRepoIssueWriter, repo.UnlockIssue)
}, context.RepoMustNotBeArchived())
m.Post("/labels", reqRepoIssuesOrPullsWriter, repo.UpdateIssueLabel)
m.Post("/milestone", reqRepoIssuesOrPullsWriter, repo.UpdateIssueMilestone)
m.Post("/assignee", reqRepoIssuesOrPullsWriter, repo.UpdateIssueAssignee)
m.Post("/status", reqRepoIssuesOrPullsWriter, repo.UpdateIssueStatus)
}, context.RepoMustNotBeArchived())
m.Group("/comments/:id", func() {
m.Post("", repo.UpdateCommentContent)
m.Post("/delete", repo.DeleteComment)
m.Post("/reactions/:action", bindIgnErr(auth.ReactionForm{}), repo.ChangeCommentReaction)
}, context.RepoMustNotBeArchived())
m.Group("/labels", func() {
m.Post("/new", bindIgnErr(auth.CreateLabelForm{}), repo.NewLabel)
m.Post("/edit", bindIgnErr(auth.CreateLabelForm{}), repo.UpdateLabel)
m.Post("/delete", repo.DeleteLabel)
m.Post("/initialize", bindIgnErr(auth.InitializeLabelsForm{}), repo.InitializeLabels)
}, context.RepoMustNotBeArchived(), reqRepoIssuesOrPullsWriter, context.RepoRef())
m.Group("/milestones", func() {
m.Combo("/new").Get(repo.NewMilestone).
Post(bindIgnErr(auth.CreateMilestoneForm{}), repo.NewMilestonePost)
m.Get("/:id/edit", repo.EditMilestone)
m.Post("/:id/edit", bindIgnErr(auth.CreateMilestoneForm{}), repo.EditMilestonePost)
m.Get("/:id/:action", repo.ChangeMilestonStatus)
m.Post("/delete", repo.DeleteMilestone)
}, context.RepoMustNotBeArchived(), reqRepoIssuesOrPullsWriter, context.RepoRef())
m.Group("/milestone", func() {
m.Get("/:id", repo.MilestoneIssuesAndPulls)
}, reqRepoIssuesOrPullsWriter, context.RepoRef())
m.Combo("/compare/*", context.RepoMustNotBeArchived(), reqRepoCodeReader, reqRepoPullsReader, repo.MustAllowPulls, repo.SetEditorconfigIfExists).
Get(repo.SetDiffViewStyle, repo.CompareAndPullRequest).
Post(bindIgnErr(auth.CreateIssueForm{}), repo.CompareAndPullRequestPost)
m.Group("", func() {
m.Group("", func() {
m.Combo("/_edit/*").Get(repo.EditFile).
Post(bindIgnErr(auth.EditRepoFileForm{}), repo.EditFilePost)
m.Combo("/_new/*").Get(repo.NewFile).
Post(bindIgnErr(auth.EditRepoFileForm{}), repo.NewFilePost)
m.Post("/_preview/*", bindIgnErr(auth.EditPreviewDiffForm{}), repo.DiffPreviewPost)
m.Combo("/_delete/*").Get(repo.DeleteFile).
Post(bindIgnErr(auth.DeleteRepoFileForm{}), repo.DeleteFilePost)
m.Combo("/_upload/*", repo.MustBeAbleToUpload).
Get(repo.UploadFile).
Post(bindIgnErr(auth.UploadRepoFileForm{}), repo.UploadFilePost)
}, context.RepoRefByType(context.RepoRefBranch), repo.MustBeEditable)
m.Group("", func() {
m.Post("/upload-file", repo.UploadFileToServer)
m.Post("/upload-remove", bindIgnErr(auth.RemoveUploadFileForm{}), repo.RemoveUploadFileFromServer)
}, context.RepoRef(), repo.MustBeEditable, repo.MustBeAbleToUpload)
}, context.RepoMustNotBeArchived(), reqRepoCodeWriter, repo.MustBeNotEmpty)
m.Group("/branches", func() {
m.Group("/_new/", func() {
m.Post("/branch/*", context.RepoRefByType(context.RepoRefBranch), repo.CreateBranch)
m.Post("/tag/*", context.RepoRefByType(context.RepoRefTag), repo.CreateBranch)
m.Post("/commit/*", context.RepoRefByType(context.RepoRefCommit), repo.CreateBranch)
}, bindIgnErr(auth.NewBranchForm{}))
m.Post("/delete", repo.DeleteBranchPost)
m.Post("/restore", repo.RestoreBranchPost)
}, context.RepoMustNotBeArchived(), reqRepoCodeWriter, repo.MustBeNotEmpty)
}, reqSignIn, context.RepoAssignment(), context.UnitTypes())
// Releases
m.Group("/:username/:reponame", func() {
m.Group("/releases", func() {
m.Get("/", repo.MustBeNotEmpty, repo.Releases)
}, repo.MustBeNotEmpty, context.RepoRef())
m.Group("/releases", func() {
m.Get("/new", repo.NewRelease)
m.Post("/new", bindIgnErr(auth.NewReleaseForm{}), repo.NewReleasePost)
m.Post("/delete", repo.DeleteRelease)
}, reqSignIn, repo.MustBeNotEmpty, context.RepoMustNotBeArchived(), reqRepoReleaseWriter, context.RepoRef())
m.Group("/releases", func() {
m.Get("/edit/*", repo.EditRelease)
m.Post("/edit/*", bindIgnErr(auth.EditReleaseForm{}), repo.EditReleasePost)
}, reqSignIn, repo.MustBeNotEmpty, context.RepoMustNotBeArchived(), reqRepoReleaseWriter, func(ctx *context.Context) {
var err error
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetBranchCommit(ctx.Repo.Repository.DefaultBranch)
if err != nil {
ctx.ServerError("GetBranchCommit", err)
return
}
ctx.Repo.CommitsCount, err = ctx.Repo.GetCommitsCount()
if err != nil {
ctx.ServerError("GetCommitsCount", err)
return
}
ctx.Data["CommitsCount"] = ctx.Repo.CommitsCount
})
}, ignSignIn, context.RepoAssignment(), context.UnitTypes(), reqRepoReleaseReader)
m.Group("/:username/:reponame", func() {
m.Post("/topics", repo.TopicsPost)
}, context.RepoAssignment(), context.RepoMustNotBeArchived(), reqRepoAdmin)
m.Group("/:username/:reponame", func() {
m.Group("", func() {
m.Get("/^:type(issues|pulls)$", repo.Issues)
m.Get("/^:type(issues|pulls)$/:index", repo.ViewIssue)
m.Get("/labels/", reqRepoIssuesOrPullsReader, repo.RetrieveLabels, repo.Labels)
m.Get("/milestones", reqRepoIssuesOrPullsReader, repo.Milestones)
}, context.RepoRef())
m.Group("/wiki", func() {
m.Get("/?:page", repo.Wiki)
m.Get("/_pages", repo.WikiPages)
m.Group("", func() {
m.Combo("/_new").Get(repo.NewWiki).
Post(bindIgnErr(auth.NewWikiForm{}), repo.NewWikiPost)
m.Combo("/:page/_edit").Get(repo.EditWiki).
Post(bindIgnErr(auth.NewWikiForm{}), repo.EditWikiPost)
m.Post("/:page/delete", repo.DeleteWikiPagePost)
}, context.RepoMustNotBeArchived(), reqSignIn, reqRepoWikiWriter)
}, repo.MustEnableWiki, context.RepoRef())
m.Group("/wiki", func() {
m.Get("/raw/*", repo.WikiRaw)
}, repo.MustEnableWiki)
m.Group("/activity", func() {
m.Get("", repo.Activity)
m.Get("/:period", repo.Activity)
}, context.RepoRef(), repo.MustBeNotEmpty, context.RequireRepoReaderOr(models.UnitTypePullRequests, models.UnitTypeIssues, models.UnitTypeReleases))
m.Get("/archive/*", repo.MustBeNotEmpty, reqRepoCodeReader, repo.Download)
m.Group("/branches", func() {
m.Get("", repo.Branches)
}, repo.MustBeNotEmpty, context.RepoRef(), reqRepoCodeReader)
m.Group("/pulls/:index", func() {
m.Get(".diff", repo.DownloadPullDiff)
m.Get(".patch", repo.DownloadPullPatch)
m.Get("/commits", context.RepoRef(), repo.ViewPullCommits)
m.Post("/merge", context.RepoMustNotBeArchived(), reqRepoPullsWriter, bindIgnErr(auth.MergePullRequestForm{}), repo.MergePullRequest)
m.Post("/cleanup", context.RepoMustNotBeArchived(), context.RepoRef(), repo.CleanUpPullRequest)
m.Group("/files", func() {
m.Get("", context.RepoRef(), repo.SetEditorconfigIfExists, repo.SetDiffViewStyle, repo.SetWhitespaceBehavior, repo.ViewPullFiles)
m.Group("/reviews", func() {
m.Post("/comments", bindIgnErr(auth.CodeCommentForm{}), repo.CreateCodeComment)
m.Post("/submit", bindIgnErr(auth.SubmitReviewForm{}), repo.SubmitReview)
}, context.RepoMustNotBeArchived())
})
}, repo.MustAllowPulls)
m.Group("/media", func() {
m.Get("/branch/*", context.RepoRefByType(context.RepoRefBranch), repo.SingleDownloadOrLFS)
m.Get("/tag/*", context.RepoRefByType(context.RepoRefTag), repo.SingleDownloadOrLFS)
m.Get("/commit/*", context.RepoRefByType(context.RepoRefCommit), repo.SingleDownloadOrLFS)
m.Get("/blob/:sha", context.RepoRefByType(context.RepoRefBlob), repo.DownloadByIDOrLFS)
// "/*" route is deprecated, and kept for backward compatibility
m.Get("/*", context.RepoRefByType(context.RepoRefLegacy), repo.SingleDownloadOrLFS)
}, repo.MustBeNotEmpty, reqRepoCodeReader)
m.Group("/raw", func() {
m.Get("/branch/*", context.RepoRefByType(context.RepoRefBranch), repo.SingleDownload)
m.Get("/tag/*", context.RepoRefByType(context.RepoRefTag), repo.SingleDownload)
m.Get("/commit/*", context.RepoRefByType(context.RepoRefCommit), repo.SingleDownload)
m.Get("/blob/:sha", context.RepoRefByType(context.RepoRefBlob), repo.DownloadByID)
// "/*" route is deprecated, and kept for backward compatibility
m.Get("/*", context.RepoRefByType(context.RepoRefLegacy), repo.SingleDownload)
}, repo.MustBeNotEmpty, reqRepoCodeReader)
m.Group("/commits", func() {
m.Get("/branch/*", context.RepoRefByType(context.RepoRefBranch), repo.RefCommits)
m.Get("/tag/*", context.RepoRefByType(context.RepoRefTag), repo.RefCommits)
m.Get("/commit/*", context.RepoRefByType(context.RepoRefCommit), repo.RefCommits)
// "/*" route is deprecated, and kept for backward compatibility
m.Get("/*", context.RepoRefByType(context.RepoRefLegacy), repo.RefCommits)
}, repo.MustBeNotEmpty, reqRepoCodeReader)
m.Group("", func() {
m.Get("/graph", repo.Graph)
m.Get("/commit/:sha([a-f0-9]{7,40})$", repo.SetEditorconfigIfExists, repo.SetDiffViewStyle, repo.Diff)
}, repo.MustBeNotEmpty, context.RepoRef(), reqRepoCodeReader)
m.Group("/src", func() {
m.Get("/branch/*", context.RepoRefByType(context.RepoRefBranch), repo.Home)
m.Get("/tag/*", context.RepoRefByType(context.RepoRefTag), repo.Home)
m.Get("/commit/*", context.RepoRefByType(context.RepoRefCommit), repo.Home)
// "/*" route is deprecated, and kept for backward compatibility
m.Get("/*", context.RepoRefByType(context.RepoRefLegacy), repo.Home)
}, repo.SetEditorconfigIfExists)
m.Group("", func() {
m.Get("/forks", repo.Forks)
}, context.RepoRef(), reqRepoCodeReader)
m.Get("/commit/:sha([a-f0-9]{7,40})\\.:ext(patch|diff)",
repo.MustBeNotEmpty, reqRepoCodeReader, repo.RawDiff)
m.Get("/compare/:before([a-z0-9]{40})\\.\\.\\.:after([a-z0-9]{40})", repo.SetEditorconfigIfExists,
repo.SetDiffViewStyle, repo.MustBeNotEmpty, reqRepoCodeReader, repo.CompareDiff)
}, ignSignIn, context.RepoAssignment(), context.UnitTypes())
m.Group("/:username/:reponame", func() {
m.Get("/stars", repo.Stars)
m.Get("/watchers", repo.Watchers)
m.Get("/search", reqRepoCodeReader, repo.Search)
}, ignSignIn, context.RepoAssignment(), context.RepoRef(), context.UnitTypes())
m.Group("/:username", func() {
m.Group("/:reponame", func() {
m.Get("", repo.SetEditorconfigIfExists, repo.Home)
m.Get("\\.git$", repo.SetEditorconfigIfExists, repo.Home)
}, ignSignIn, context.RepoAssignment(), context.RepoRef(), context.UnitTypes())
m.Group("/:reponame", func() {
m.Group("\\.git/info/lfs", func() {
m.Post("/objects/batch", lfs.BatchHandler)
m.Get("/objects/:oid/:filename", lfs.ObjectOidHandler)
m.Any("/objects/:oid", lfs.ObjectOidHandler)
m.Post("/objects", lfs.PostHandler)
m.Post("/verify", lfs.VerifyHandler)
m.Group("/locks", func() {
m.Get("/", lfs.GetListLockHandler)
m.Post("/", lfs.PostLockHandler)
m.Post("/verify", lfs.VerifyLockHandler)
m.Post("/:lid/unlock", lfs.UnLockHandler)
}, context.RepoAssignment())
m.Any("/*", func(ctx *context.Context) {
ctx.NotFound("", nil)
})
}, ignSignInAndCsrf)
m.Any("/*", ignSignInAndCsrf, repo.HTTP)
m.Head("/tasks/trigger", repo.TriggerTask)
})
})
// ***** END: Repository *****
m.Group("/notifications", func() {
m.Get("", user.Notifications)
m.Post("/status", user.NotificationStatusPost)
m.Post("/purge", user.NotificationPurgePost)
}, reqSignIn)
if setting.API.EnableSwagger {
m.Get("/swagger.v1.json", templates.JSONRenderer(), routers.SwaggerV1Json)
}
m.Group("/api", func() {
apiv1.RegisterRoutes(m)
}, ignSignIn)
m.Group("/api/internal", func() {
// package name internal is ideal but Golang is not allowed, so we use private as package name.
private.RegisterRoutes(m)
})
// robots.txt
m.Get("/robots.txt", func(ctx *context.Context) {
if setting.HasRobotsTxt {
ctx.ServeFileContent(path.Join(setting.CustomPath, "robots.txt"))
} else {
ctx.NotFound("", nil)
}
})
// Progressive Web App
m.Get("/manifest.json", templates.JSONRenderer(), func(ctx *context.Context) {
ctx.HTML(200, "pwa/manifest_json")
})
m.Get("/serviceworker.js", templates.JSRenderer(), func(ctx *context.Context) {
ctx.HTML(200, "pwa/serviceworker_js")
})
// prometheus metrics endpoint
if setting.Metrics.Enabled {
c := metrics.NewCollector()
prometheus.MustRegister(c)
m.Get("/metrics", routers.Metrics)
}
// Not found handler.
m.NotFound(routers.NotFound)
}
| routers/routes/routes.go | 1 | https://github.com/go-gitea/gitea/commit/c55bdca562e8fffaaf4028cf9563443027d52970 | [
0.0005370582803152502,
0.00018111703684553504,
0.00016555060574319214,
0.00017403147649019957,
0.00004370655005914159
] |
{
"id": 0,
"code_window": [
"\tctx.Data[\"Title\"] = milestone.Name\n",
"\tctx.Data[\"Milestone\"] = milestone\n",
"\n",
"\tissues(ctx, milestoneID, util.OptionalBoolNone)\n",
"\n",
"\tctx.HTML(200, tplMilestoneIssues)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\tperm, err := models.GetUserRepoPermission(ctx.Repo.Repository, ctx.User)\n",
"\tif err != nil {\n",
"\t\tctx.ServerError(\"GetUserRepoPermission\", err)\n",
"\t\treturn\n",
"\t}\n",
"\tctx.Data[\"CanWriteIssues\"] = perm.CanWriteIssuesOrPulls(false)\n",
"\tctx.Data[\"CanWritePulls\"] = perm.CanWriteIssuesOrPulls(true)\n",
"\n"
],
"file_path": "routers/repo/milestone.go",
"type": "add",
"edit_start_line_idx": 262
} | // Copyright (c) 2016 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
// MultiGet is a helper function to retrieve mutiple keys from a
// KVReader, and might be used by KVStore implementations that don't
// have a native multi-get facility.
func MultiGet(kvreader KVReader, keys [][]byte) ([][]byte, error) {
vals := make([][]byte, 0, len(keys))
for i, key := range keys {
val, err := kvreader.Get(key)
if err != nil {
return nil, err
}
vals[i] = val
}
return vals, nil
}
| vendor/github.com/blevesearch/bleve/index/store/multiget.go | 0 | https://github.com/go-gitea/gitea/commit/c55bdca562e8fffaaf4028cf9563443027d52970 | [
0.00017813673184718937,
0.00017262581968680024,
0.00016583815158810467,
0.00017326422675978392,
0.000004531417289399542
] |
{
"id": 0,
"code_window": [
"\tctx.Data[\"Title\"] = milestone.Name\n",
"\tctx.Data[\"Milestone\"] = milestone\n",
"\n",
"\tissues(ctx, milestoneID, util.OptionalBoolNone)\n",
"\n",
"\tctx.HTML(200, tplMilestoneIssues)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\tperm, err := models.GetUserRepoPermission(ctx.Repo.Repository, ctx.User)\n",
"\tif err != nil {\n",
"\t\tctx.ServerError(\"GetUserRepoPermission\", err)\n",
"\t\treturn\n",
"\t}\n",
"\tctx.Data[\"CanWriteIssues\"] = perm.CanWriteIssuesOrPulls(false)\n",
"\tctx.Data[\"CanWritePulls\"] = perm.CanWriteIssuesOrPulls(true)\n",
"\n"
],
"file_path": "routers/repo/milestone.go",
"type": "add",
"edit_start_line_idx": 262
} | // Copyright 2016 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"bytes"
"fmt"
"html/template"
"path"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/mailer"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"gopkg.in/gomail.v2"
"gopkg.in/macaron.v1"
)
const (
mailAuthActivate base.TplName = "auth/activate"
mailAuthActivateEmail base.TplName = "auth/activate_email"
mailAuthResetPassword base.TplName = "auth/reset_passwd"
mailAuthRegisterNotify base.TplName = "auth/register_notify"
mailIssueComment base.TplName = "issue/comment"
mailIssueMention base.TplName = "issue/mention"
mailNotifyCollaborator base.TplName = "notify/collaborator"
)
var templates *template.Template
// InitMailRender initializes the macaron mail renderer
func InitMailRender(tmpls *template.Template) {
templates = tmpls
}
// SendTestMail sends a test mail
func SendTestMail(email string) error {
return gomail.Send(mailer.Sender, mailer.NewMessage([]string{email}, "Gitea Test Email!", "Gitea Test Email!").Message)
}
// SendUserMail sends a mail to the user
func SendUserMail(c *macaron.Context, u *User, tpl base.TplName, code, subject, info string) {
data := map[string]interface{}{
"Username": u.DisplayName(),
"ActiveCodeLives": base.MinutesToFriendly(setting.Service.ActiveCodeLives, c.Locale.Language()),
"ResetPwdCodeLives": base.MinutesToFriendly(setting.Service.ResetPwdCodeLives, c.Locale.Language()),
"Code": code,
}
var content bytes.Buffer
if err := templates.ExecuteTemplate(&content, string(tpl), data); err != nil {
log.Error(3, "Template: %v", err)
return
}
msg := mailer.NewMessage([]string{u.Email}, subject, content.String())
msg.Info = fmt.Sprintf("UID: %d, %s", u.ID, info)
mailer.SendAsync(msg)
}
// SendActivateAccountMail sends an activation mail to the user (new user registration)
func SendActivateAccountMail(c *macaron.Context, u *User) {
SendUserMail(c, u, mailAuthActivate, u.GenerateActivateCode(), c.Tr("mail.activate_account"), "activate account")
}
// SendResetPasswordMail sends a password reset mail to the user
func SendResetPasswordMail(c *macaron.Context, u *User) {
SendUserMail(c, u, mailAuthResetPassword, u.GenerateActivateCode(), c.Tr("mail.reset_password"), "reset password")
}
// SendActivateEmailMail sends confirmation email to confirm new email address
func SendActivateEmailMail(c *macaron.Context, u *User, email *EmailAddress) {
data := map[string]interface{}{
"Username": u.DisplayName(),
"ActiveCodeLives": base.MinutesToFriendly(setting.Service.ActiveCodeLives, c.Locale.Language()),
"Code": u.GenerateEmailActivateCode(email.Email),
"Email": email.Email,
}
var content bytes.Buffer
if err := templates.ExecuteTemplate(&content, string(mailAuthActivateEmail), data); err != nil {
log.Error(3, "Template: %v", err)
return
}
msg := mailer.NewMessage([]string{email.Email}, c.Tr("mail.activate_email"), content.String())
msg.Info = fmt.Sprintf("UID: %d, activate email", u.ID)
mailer.SendAsync(msg)
}
// SendRegisterNotifyMail triggers a notify e-mail by admin created a account.
func SendRegisterNotifyMail(c *macaron.Context, u *User) {
data := map[string]interface{}{
"Username": u.DisplayName(),
}
var content bytes.Buffer
if err := templates.ExecuteTemplate(&content, string(mailAuthRegisterNotify), data); err != nil {
log.Error(3, "Template: %v", err)
return
}
msg := mailer.NewMessage([]string{u.Email}, c.Tr("mail.register_notify"), content.String())
msg.Info = fmt.Sprintf("UID: %d, registration notify", u.ID)
mailer.SendAsync(msg)
}
// SendCollaboratorMail sends mail notification to new collaborator.
func SendCollaboratorMail(u, doer *User, repo *Repository) {
repoName := path.Join(repo.Owner.Name, repo.Name)
subject := fmt.Sprintf("%s added you to %s", doer.DisplayName(), repoName)
data := map[string]interface{}{
"Subject": subject,
"RepoName": repoName,
"Link": repo.HTMLURL(),
}
var content bytes.Buffer
if err := templates.ExecuteTemplate(&content, string(mailNotifyCollaborator), data); err != nil {
log.Error(3, "Template: %v", err)
return
}
msg := mailer.NewMessage([]string{u.Email}, subject, content.String())
msg.Info = fmt.Sprintf("UID: %d, add collaborator", u.ID)
mailer.SendAsync(msg)
}
func composeTplData(subject, body, link string) map[string]interface{} {
data := make(map[string]interface{}, 10)
data["Subject"] = subject
data["Body"] = body
data["Link"] = link
return data
}
func composeIssueCommentMessage(issue *Issue, doer *User, content string, comment *Comment, tplName base.TplName, tos []string, info string) *mailer.Message {
subject := issue.mailSubject()
issue.LoadRepo()
body := string(markup.RenderByType(markdown.MarkupName, []byte(content), issue.Repo.HTMLURL(), issue.Repo.ComposeMetas()))
data := make(map[string]interface{}, 10)
if comment != nil {
data = composeTplData(subject, body, issue.HTMLURL()+"#"+comment.HashTag())
} else {
data = composeTplData(subject, body, issue.HTMLURL())
}
data["Doer"] = doer
var mailBody bytes.Buffer
if err := templates.ExecuteTemplate(&mailBody, string(tplName), data); err != nil {
log.Error(3, "Template: %v", err)
}
msg := mailer.NewMessageFrom(tos, doer.DisplayName(), setting.MailService.FromEmail, subject, mailBody.String())
msg.Info = fmt.Sprintf("Subject: %s, %s", subject, info)
return msg
}
// SendIssueCommentMail composes and sends issue comment emails to target receivers.
func SendIssueCommentMail(issue *Issue, doer *User, content string, comment *Comment, tos []string) {
if len(tos) == 0 {
return
}
mailer.SendAsync(composeIssueCommentMessage(issue, doer, content, comment, mailIssueComment, tos, "issue comment"))
}
// SendIssueMentionMail composes and sends issue mention emails to target receivers.
func SendIssueMentionMail(issue *Issue, doer *User, content string, comment *Comment, tos []string) {
if len(tos) == 0 {
return
}
mailer.SendAsync(composeIssueCommentMessage(issue, doer, content, comment, mailIssueMention, tos, "issue mention"))
}
| models/mail.go | 0 | https://github.com/go-gitea/gitea/commit/c55bdca562e8fffaaf4028cf9563443027d52970 | [
0.0001934166793944314,
0.0001713621459202841,
0.00016205089923460037,
0.00017047201981768012,
0.000006285862582444679
] |
{
"id": 0,
"code_window": [
"\tctx.Data[\"Title\"] = milestone.Name\n",
"\tctx.Data[\"Milestone\"] = milestone\n",
"\n",
"\tissues(ctx, milestoneID, util.OptionalBoolNone)\n",
"\n",
"\tctx.HTML(200, tplMilestoneIssues)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\tperm, err := models.GetUserRepoPermission(ctx.Repo.Repository, ctx.User)\n",
"\tif err != nil {\n",
"\t\tctx.ServerError(\"GetUserRepoPermission\", err)\n",
"\t\treturn\n",
"\t}\n",
"\tctx.Data[\"CanWriteIssues\"] = perm.CanWriteIssuesOrPulls(false)\n",
"\tctx.Data[\"CanWritePulls\"] = perm.CanWriteIssuesOrPulls(true)\n",
"\n"
],
"file_path": "routers/repo/milestone.go",
"type": "add",
"edit_start_line_idx": 262
} | package pq
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
var typeByteSlice = reflect.TypeOf([]byte{})
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
// slice of any dimension.
//
// For example:
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
//
// var x []sql.NullInt64
// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
//
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
// bound is not one (such as `[0:0]={1}') are not supported.
func Array(a interface{}) interface {
driver.Valuer
sql.Scanner
} {
switch a := a.(type) {
case []bool:
return (*BoolArray)(&a)
case []float64:
return (*Float64Array)(&a)
case []int64:
return (*Int64Array)(&a)
case []string:
return (*StringArray)(&a)
case *[]bool:
return (*BoolArray)(a)
case *[]float64:
return (*Float64Array)(a)
case *[]int64:
return (*Int64Array)(a)
case *[]string:
return (*StringArray)(a)
}
return GenericArray{a}
}
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
// to override the array delimiter used by GenericArray.
type ArrayDelimiter interface {
// ArrayDelimiter returns the delimiter character(s) for this element's type.
ArrayDelimiter() string
}
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
type BoolArray []bool
// Scan implements the sql.Scanner interface.
func (a *BoolArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
}
func (a *BoolArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
for i, v := range elems {
if len(v) != 1 {
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
switch v[0] {
case 't':
b[i] = true
case 'f':
b[i] = false
default:
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a BoolArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be exactly two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1+2*n)
for i := 0; i < n; i++ {
b[2*i] = ','
if a[i] {
b[1+2*i] = 't'
} else {
b[1+2*i] = 'f'
}
}
b[0] = '{'
b[2*n] = '}'
return string(b), nil
}
return "{}", nil
}
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
type ByteaArray [][]byte
// Scan implements the sql.Scanner interface.
func (a *ByteaArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
}
func (a *ByteaArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
for i, v := range elems {
b[i], err = parseBytea(v)
if err != nil {
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface. It uses the "hex" format which
// is only supported on PostgreSQL 9.0 or newer.
func (a ByteaArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
size := 1 + 6*n
for _, x := range a {
size += hex.EncodedLen(len(x))
}
b := make([]byte, size)
for i, s := 0, b; i < n; i++ {
o := copy(s, `,"\\x`)
o += hex.Encode(s[o:], a[i])
s[o] = '"'
s = s[o+1:]
}
b[0] = '{'
b[size-1] = '}'
return string(b), nil
}
return "{}", nil
}
// Float64Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float64Array []float64
// Scan implements the sql.Scanner interface.
func (a *Float64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
}
func (a *Float64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Float64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
// an array or slice of any dimension.
type GenericArray struct{ A interface{} }
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
var assign func([]byte, reflect.Value) error
var del = ","
// TODO calculate the assign function for other types
// TODO repeat this section on the element type of arrays or slices (multidimensional)
{
if reflect.PtrTo(rt).Implements(typeSQLScanner) {
// dest is always addressable because it is an element of a slice.
assign = func(src []byte, dest reflect.Value) (err error) {
ss := dest.Addr().Interface().(sql.Scanner)
if src == nil {
err = ss.Scan(nil)
} else {
err = ss.Scan(src)
}
return
}
goto FoundType
}
assign = func([]byte, reflect.Value) error {
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
}
}
FoundType:
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
return rt, assign, del
}
// Scan implements the sql.Scanner interface.
func (a GenericArray) Scan(src interface{}) error {
dpv := reflect.ValueOf(a.A)
switch {
case dpv.Kind() != reflect.Ptr:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
case dpv.IsNil():
return fmt.Errorf("pq: destination %T is nil", a.A)
}
dv := dpv.Elem()
switch dv.Kind() {
case reflect.Slice:
case reflect.Array:
default:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
}
switch src := src.(type) {
case []byte:
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
case nil:
if dv.Kind() == reflect.Slice {
dv.Set(reflect.Zero(dv.Type()))
return nil
}
}
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
}
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
dims, elems, err := parseArray(src, []byte(del))
if err != nil {
return err
}
// TODO allow multidimensional
if len(dims) > 1 {
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
}
// Treat a zero-dimensional array like an array with a single dimension of zero.
if len(dims) == 0 {
dims = append(dims, 0)
}
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
switch rt.Kind() {
case reflect.Slice:
case reflect.Array:
if rt.Len() != dims[i] {
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
}
default:
// TODO handle multidimensional
}
}
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
for i, e := range elems {
if err := assign(e, values.Index(i)); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
// TODO handle multidimensional
switch dv.Kind() {
case reflect.Slice:
dv.Set(values.Slice(0, dims[0]))
case reflect.Array:
for i := 0; i < dims[0]; i++ {
dv.Index(i).Set(values.Index(i))
}
}
return nil
}
// Value implements the driver.Valuer interface.
func (a GenericArray) Value() (driver.Value, error) {
if a.A == nil {
return nil, nil
}
rv := reflect.ValueOf(a.A)
switch rv.Kind() {
case reflect.Slice:
if rv.IsNil() {
return nil, nil
}
case reflect.Array:
default:
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}
if n := rv.Len(); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 0, 1+2*n)
b, _, err := appendArray(b, rv, n)
return string(b), err
}
return "{}", nil
}
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
type Int64Array []int64
// Scan implements the sql.Scanner interface.
func (a *Int64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
}
func (a *Int64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Int64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendInt(b, a[0], 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, a[i], 10)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// StringArray represents a one-dimensional array of the PostgreSQL character types.
type StringArray []string
// Scan implements the sql.Scanner interface.
func (a *StringArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
}
func (a *StringArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
for i, v := range elems {
if b[i] = string(v); v == nil {
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a StringArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+3*n)
b[0] = '{'
b = appendArrayQuotedBytes(b, []byte(a[0]))
for i := 1; i < n; i++ {
b = append(b, ',')
b = appendArrayQuotedBytes(b, []byte(a[i]))
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// appendArray appends rv to the buffer, returning the extended buffer and
// the delimiter used between elements.
//
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
var del string
var err error
b = append(b, '{')
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
return b, del, err
}
for i := 1; i < n; i++ {
b = append(b, del...)
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
return b, del, err
}
}
return append(b, '}'), del, nil
}
// appendArrayElement appends rv to the buffer, returning the extended buffer
// and the delimiter to use before the next element.
//
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
// using driver.DefaultParameterConverter and the resulting []byte or string
// is double-quoted.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
if n := rv.Len(); n > 0 {
return appendArray(b, rv, n)
}
return b, "", nil
}
}
var del = ","
var err error
var iv interface{} = rv.Interface()
if ad, ok := iv.(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
return b, del, err
}
switch v := iv.(type) {
case nil:
return append(b, "NULL"...), del, nil
case []byte:
return appendArrayQuotedBytes(b, v), del, nil
case string:
return appendArrayQuotedBytes(b, []byte(v)), del, nil
}
b, err = appendValue(b, iv)
return b, del, err
}
func appendArrayQuotedBytes(b, v []byte) []byte {
b = append(b, '"')
for {
i := bytes.IndexAny(v, `"\`)
if i < 0 {
b = append(b, v...)
break
}
if i > 0 {
b = append(b, v[:i]...)
}
b = append(b, '\\', v[i])
v = v[i+1:]
}
return append(b, '"')
}
func appendValue(b []byte, v driver.Value) ([]byte, error) {
return append(b, encode(nil, v, 0)...), nil
}
// parseArray extracts the dimensions and elements of an array represented in
// text format. Only representations emitted by the backend are supported.
// Notably, whitespace around brackets and delimiters is significant, and NULL
// is case-sensitive.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
var depth, i int
if len(src) < 1 || src[0] != '{' {
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
}
Open:
for i < len(src) {
switch src[i] {
case '{':
depth++
i++
case '}':
elems = make([][]byte, 0)
goto Close
default:
break Open
}
}
dims = make([]int, i)
Element:
for i < len(src) {
switch src[i] {
case '{':
if depth == len(dims) {
break Element
}
depth++
dims[depth-1] = 0
i++
case '"':
var elem = []byte{}
var escape bool
for i++; i < len(src); i++ {
if escape {
elem = append(elem, src[i])
escape = false
} else {
switch src[i] {
default:
elem = append(elem, src[i])
case '\\':
escape = true
case '"':
elems = append(elems, elem)
i++
break Element
}
}
}
default:
for start := i; i < len(src); i++ {
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
elem := src[start:i]
if len(elem) == 0 {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
if bytes.Equal(elem, []byte("NULL")) {
elem = nil
}
elems = append(elems, elem)
break Element
}
}
}
}
for i < len(src) {
if bytes.HasPrefix(src[i:], del) && depth > 0 {
dims[depth-1]++
i += len(del)
goto Element
} else if src[i] == '}' && depth > 0 {
dims[depth-1]++
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
Close:
for i < len(src) {
if src[i] == '}' && depth > 0 {
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
if depth > 0 {
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
}
if err == nil {
for _, d := range dims {
if (len(elems) % d) != 0 {
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
}
}
}
return
}
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
dims, elems, err := parseArray(src, del)
if err != nil {
return nil, err
}
if len(dims) > 1 {
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
}
return elems, err
}
| vendor/github.com/lib/pq/array.go | 0 | https://github.com/go-gitea/gitea/commit/c55bdca562e8fffaaf4028cf9563443027d52970 | [
0.00020852508896496147,
0.00017273453704547137,
0.00016463035717606544,
0.00017293599375989288,
0.0000054827360145282
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.