hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 1,
"code_window": [
"\t\t\te.rowKeyCache[i] = rk\n",
"\t\t}\n",
"\t}\n",
"\tb, err := codec.EncodeValue(b, row.Data...)\n",
"\treturn b, errors.Trace(err)\n",
"}\n",
"\n",
"func (e *HashJoinExec) decodeRow(data []byte) (*Row, error) {\n",
"\trow := new(Row)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tloc := e.ctx.GetSessionVars().GetTimeZone()\n",
"\tfor _, datum := range row.Data {\n",
"\t\ttmp, err := tablecodec.EncodeValue(datum, loc)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, errors.Trace(err)\n",
"\t\t}\n",
"\t\tb = append(b, tmp...)\n",
"\t}\n",
"\treturn b, nil\n"
],
"file_path": "executor/join.go",
"type": "replace",
"edit_start_line_idx": 279
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"fmt"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/tidb"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/types"
)
func (s *testSuite) TestNestedLoopJoin(c *C) {
bigExec := &MockExec{Rows: []*executor.Row{
{Data: types.MakeDatums(1)},
{Data: types.MakeDatums(2)},
{Data: types.MakeDatums(3)},
{Data: types.MakeDatums(4)},
{Data: types.MakeDatums(5)},
{Data: types.MakeDatums(6)},
}}
smallExec := &MockExec{Rows: []*executor.Row{
{Data: types.MakeDatums(1)},
{Data: types.MakeDatums(2)},
{Data: types.MakeDatums(3)},
{Data: types.MakeDatums(4)},
{Data: types.MakeDatums(5)},
{Data: types.MakeDatums(6)},
}}
col0 := &expression.Column{Index: 0, RetType: types.NewFieldType(mysql.TypeLong)}
col1 := &expression.Column{Index: 1, RetType: types.NewFieldType(mysql.TypeLong)}
con := &expression.Constant{Value: types.NewDatum(6), RetType: types.NewFieldType(mysql.TypeLong)}
bigFilter, _ := expression.NewFunction(mock.NewContext(), ast.LT, types.NewFieldType(mysql.TypeTiny), col0, con)
smallFilter := bigFilter.Clone()
otherFilter, _ := expression.NewFunction(mock.NewContext(), ast.EQ, types.NewFieldType(mysql.TypeTiny), col0, col1)
join := &executor.NestedLoopJoinExec{
BigExec: bigExec,
SmallExec: smallExec,
Ctx: mock.NewContext(),
BigFilter: []expression.Expression{bigFilter},
SmallFilter: []expression.Expression{smallFilter},
OtherFilter: []expression.Expression{otherFilter},
}
row, err := join.Next()
c.Check(err, IsNil)
c.Check(row, NotNil)
c.Check(fmt.Sprintf("%v %v", row.Data[0].GetValue(), row.Data[1].GetValue()), Equals, "1 1")
row, err = join.Next()
c.Check(err, IsNil)
c.Check(row, NotNil)
c.Check(fmt.Sprintf("%v %v", row.Data[0].GetValue(), row.Data[1].GetValue()), Equals, "2 2")
row, err = join.Next()
c.Check(err, IsNil)
c.Check(row, NotNil)
c.Check(fmt.Sprintf("%v %v", row.Data[0].GetValue(), row.Data[1].GetValue()), Equals, "3 3")
row, err = join.Next()
c.Check(err, IsNil)
c.Check(row, NotNil)
c.Check(fmt.Sprintf("%v %v", row.Data[0].GetValue(), row.Data[1].GetValue()), Equals, "4 4")
row, err = join.Next()
c.Check(err, IsNil)
c.Check(row, NotNil)
c.Check(fmt.Sprintf("%v %v", row.Data[0].GetValue(), row.Data[1].GetValue()), Equals, "5 5")
row, err = join.Next()
c.Check(err, IsNil)
c.Check(row, IsNil)
}
func (s *testSuite) TestJoinPanic(c *C) {
defer func() {
s.cleanEnv(c)
testleak.AfterTest(c)()
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists events")
tk.MustExec("create table events (clock int, source int)")
tk.MustQuery("SELECT * FROM events e JOIN (SELECT MAX(clock) AS clock FROM events e2 GROUP BY e2.source) e3 ON e3.clock=e.clock")
}
func (s *testSuite) TestJoin(c *C) {
defer func() {
s.cleanEnv(c)
testleak.AfterTest(c)()
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int)")
tk.MustExec("insert t values (1)")
tests := []struct {
sql string
result [][]interface{}
}{
{
"select 1 from t as a left join t as b on 0",
testkit.Rows("1"),
},
{
"select 1 from t as a join t as b on 1",
testkit.Rows("1"),
},
}
for _, tt := range tests {
result := tk.MustQuery(tt.sql)
result.Check(tt.result)
}
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(c1 int, c2 int)")
tk.MustExec("create table t1(c1 int, c2 int)")
tk.MustExec("insert into t values(1,1),(2,2)")
tk.MustExec("insert into t1 values(2,3),(4,4)")
result := tk.MustQuery("select * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20")
result.Check(testkit.Rows("1 1 <nil> <nil>"))
result = tk.MustQuery("select * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20")
result.Check(testkit.Rows("<nil> <nil> 1 1"))
result = tk.MustQuery("select * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1")
result.Check(testkit.Rows("1 1 <nil> <nil>", "2 2 2 3"))
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t1 (c1 int, c2 int)")
tk.MustExec("create table t2 (c1 int, c2 int)")
tk.MustExec("create table t3 (c1 int, c2 int)")
tk.MustExec("insert into t1 values (1,1), (2,2), (3,3)")
tk.MustExec("insert into t2 values (1,1), (3,3), (5,5)")
tk.MustExec("insert into t3 values (1,1), (5,5), (9,9)")
result = tk.MustQuery("select * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2;")
result.Check(testkit.Rows("<nil> <nil> <nil> <nil> 5 5", "<nil> <nil> <nil> <nil> 9 9", "1 1 1 1 1 1"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (c1 int)")
tk.MustExec("insert into t1 values (1), (1), (1)")
result = tk.MustQuery("select * from t1 a join t1 b on a.c1 = b.c1;")
result.Check(testkit.Rows("1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1"))
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(c1 int,c2 double)")
tk.MustExec("create table t1(c1 double,c2 int)")
tk.MustExec("insert into t values (1, 2), (1, NULL)")
tk.MustExec("insert into t1 values (1, 2), (1, NULL)")
result = tk.MustQuery("select * from t a , t1 b where (a.c1, a.c2) = (b.c1, b.c2);")
result.Check(testkit.Rows("1 2 1 2"))
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(c1 int, index k(c1))")
tk.MustExec("create table t1(c1 int)")
tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7)")
tk.MustExec("insert into t1 values (1),(2),(3),(4),(5),(6),(7)")
result = tk.MustQuery("select a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1;")
result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7"))
// Test race.
result = tk.MustQuery("select a.c1 from t a , t1 b where a.c1 = b.c1 and a.c1 + b.c1 > 5 order by b.c1")
result.Check(testkit.Rows("3", "4", "5", "6", "7"))
result = tk.MustQuery("select a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1;")
result.Check(testkit.Rows("1", "2", "3"))
plan.AllowCartesianProduct = false
_, err := tk.Exec("select * from t, t1")
c.Check(plan.ErrCartesianProductUnsupported.Equal(err), IsTrue)
_, err = tk.Exec("select * from t left join t1 on 1")
c.Check(plan.ErrCartesianProductUnsupported.Equal(err), IsTrue)
_, err = tk.Exec("select * from t right join t1 on 1")
c.Check(plan.ErrCartesianProductUnsupported.Equal(err), IsTrue)
plan.AllowCartesianProduct = true
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(c1 int)")
tk.MustExec("create table t1(c1 int unsigned)")
tk.MustExec("insert into t values (1)")
tk.MustExec("insert into t1 values (1)")
result = tk.MustQuery("select t.c1 from t , t1 where t.c1 = t1.c1")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t,t2,t1")
tk.MustExec("create table t(c1 int)")
tk.MustExec("create table t1(c1 int, c2 int)")
tk.MustExec("create table t2(c1 int, c2 int)")
tk.MustExec("insert into t1 values(1,2),(2,3),(3,4)")
tk.MustExec("insert into t2 values(1,0),(2,0),(3,0)")
tk.MustExec("insert into t values(1),(2),(3)")
result = tk.MustQuery("select * from t1 , t2 where t2.c1 = t1.c1 and t2.c2 = 0 and t1.c2 in (select * from t)")
result.Sort().Check(testkit.Rows("1 2 1 0", "2 3 2 0"))
result = tk.MustQuery("select * from t1 , t2 where t2.c1 = t1.c1 and t2.c2 = 0 and t1.c1 = 1 order by t1.c2 limit 1")
result.Sort().Check(testkit.Rows("1 2 1 0"))
tk.MustExec("drop table if exists t, t1")
tk.MustExec("create table t(a int primary key, b int)")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("insert into t values(1, 1), (2, 2), (3, 3)")
tk.MustExec("insert into t1 values(1, 2), (1, 3), (3, 4), (4, 5)")
// The physical plans of the two sql are tested at physical_plan_test.go
tk.MustQuery("select /*+ TIDB_INLJ(t, t1) */ * from t join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "3 3 3 4"))
tk.MustQuery("select /*+ TIDB_INLJ(t1) */ * from t1 join t on t.a=t1.a and t.a < t1.b").Check(testkit.Rows("1 2 1 1", "1 3 1 1", "3 4 3 3"))
tk.MustQuery("select /*+ TIDB_INLJ(t, t1) */ * from t right outer join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "3 3 3 4", "<nil> <nil> 4 5"))
tk.MustQuery("select /*+ TIDB_INLJ(t, t1) */ avg(t.b) from t right outer join t1 on t.a=t1.a").Check(testkit.Rows("1.6667"))
// Test that two conflict hints will return error
_, err = tk.Exec("select /*+ TIDB_INLJ(t) TIDB_SMJ(t) */ * from t join t1 on t.a=t1.a")
c.Assert(err, NotNil)
}
func (s *testSuite) TestMultiJoin(c *C) {
defer func() {
s.cleanEnv(c)
testleak.AfterTest(c)()
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t35(a35 int primary key, b35 int, x35 int)")
tk.MustExec("create table t40(a40 int primary key, b40 int, x40 int)")
tk.MustExec("create table t14(a14 int primary key, b14 int, x14 int)")
tk.MustExec("create table t42(a42 int primary key, b42 int, x42 int)")
tk.MustExec("create table t15(a15 int primary key, b15 int, x15 int)")
tk.MustExec("create table t7(a7 int primary key, b7 int, x7 int)")
tk.MustExec("create table t64(a64 int primary key, b64 int, x64 int)")
tk.MustExec("create table t19(a19 int primary key, b19 int, x19 int)")
tk.MustExec("create table t9(a9 int primary key, b9 int, x9 int)")
tk.MustExec("create table t8(a8 int primary key, b8 int, x8 int)")
tk.MustExec("create table t57(a57 int primary key, b57 int, x57 int)")
tk.MustExec("create table t37(a37 int primary key, b37 int, x37 int)")
tk.MustExec("create table t44(a44 int primary key, b44 int, x44 int)")
tk.MustExec("create table t38(a38 int primary key, b38 int, x38 int)")
tk.MustExec("create table t18(a18 int primary key, b18 int, x18 int)")
tk.MustExec("create table t62(a62 int primary key, b62 int, x62 int)")
tk.MustExec("create table t4(a4 int primary key, b4 int, x4 int)")
tk.MustExec("create table t48(a48 int primary key, b48 int, x48 int)")
tk.MustExec("create table t31(a31 int primary key, b31 int, x31 int)")
tk.MustExec("create table t16(a16 int primary key, b16 int, x16 int)")
tk.MustExec("create table t12(a12 int primary key, b12 int, x12 int)")
tk.MustExec("insert into t35 values(1,1,1)")
tk.MustExec("insert into t40 values(1,1,1)")
tk.MustExec("insert into t14 values(1,1,1)")
tk.MustExec("insert into t42 values(1,1,1)")
tk.MustExec("insert into t15 values(1,1,1)")
tk.MustExec("insert into t7 values(1,1,1)")
tk.MustExec("insert into t64 values(1,1,1)")
tk.MustExec("insert into t19 values(1,1,1)")
tk.MustExec("insert into t9 values(1,1,1)")
tk.MustExec("insert into t8 values(1,1,1)")
tk.MustExec("insert into t57 values(1,1,1)")
tk.MustExec("insert into t37 values(1,1,1)")
tk.MustExec("insert into t44 values(1,1,1)")
tk.MustExec("insert into t38 values(1,1,1)")
tk.MustExec("insert into t18 values(1,1,1)")
tk.MustExec("insert into t62 values(1,1,1)")
tk.MustExec("insert into t4 values(1,1,1)")
tk.MustExec("insert into t48 values(1,1,1)")
tk.MustExec("insert into t31 values(1,1,1)")
tk.MustExec("insert into t16 values(1,1,1)")
tk.MustExec("insert into t12 values(1,1,1)")
tk.MustExec("insert into t35 values(7,7,7)")
tk.MustExec("insert into t40 values(7,7,7)")
tk.MustExec("insert into t14 values(7,7,7)")
tk.MustExec("insert into t42 values(7,7,7)")
tk.MustExec("insert into t15 values(7,7,7)")
tk.MustExec("insert into t7 values(7,7,7)")
tk.MustExec("insert into t64 values(7,7,7)")
tk.MustExec("insert into t19 values(7,7,7)")
tk.MustExec("insert into t9 values(7,7,7)")
tk.MustExec("insert into t8 values(7,7,7)")
tk.MustExec("insert into t57 values(7,7,7)")
tk.MustExec("insert into t37 values(7,7,7)")
tk.MustExec("insert into t44 values(7,7,7)")
tk.MustExec("insert into t38 values(7,7,7)")
tk.MustExec("insert into t18 values(7,7,7)")
tk.MustExec("insert into t62 values(7,7,7)")
tk.MustExec("insert into t4 values(7,7,7)")
tk.MustExec("insert into t48 values(7,7,7)")
tk.MustExec("insert into t31 values(7,7,7)")
tk.MustExec("insert into t16 values(7,7,7)")
tk.MustExec("insert into t12 values(7,7,7)")
result := tk.MustQuery(`SELECT x4,x8,x38,x44,x31,x9,x57,x48,x19,x40,x14,x12,x7,x64,x37,x18,x62,x35,x42,x15,x16 FROM
t35,t40,t14,t42,t15,t7,t64,t19,t9,t8,t57,t37,t44,t38,t18,t62,t4,t48,t31,t16,t12
WHERE b48=a57
AND a4=b19
AND a14=b16
AND b37=a48
AND a40=b42
AND a31=7
AND a15=b40
AND a38=b8
AND b15=a31
AND b64=a18
AND b12=a44
AND b7=a8
AND b35=a16
AND a12=b14
AND a64=b57
AND b62=a7
AND a35=b38
AND b9=a19
AND a62=b18
AND b4=a37
AND b44=a42`)
result.Check(testkit.Rows("7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7"))
}
func (s *testSuite) TestSubquerySameTable(c *C) {
defer func() {
s.cleanEnv(c)
testleak.AfterTest(c)()
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int)")
tk.MustExec("insert t values (1), (2)")
result := tk.MustQuery("select a from t where exists(select 1 from t as x where x.a < t.a)")
result.Check(testkit.Rows("2"))
result = tk.MustQuery("select a from t where not exists(select 1 from t as x where x.a < t.a)")
result.Check(testkit.Rows("1"))
}
func (s *testSuite) TestSubquery(c *C) {
plan.JoinConcurrency = 1
defer func() {
s.cleanEnv(c)
testleak.AfterTest(c)()
plan.JoinConcurrency = 5
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (2, 2)")
tk.MustExec("insert t values (3, 4)")
tk.MustExec("commit")
result := tk.MustQuery("select * from t where exists(select * from t k where t.c = k.c having sum(c) = 1)")
result.Check(testkit.Rows("1 1"))
result = tk.MustQuery("select * from t where exists(select k.c, k.d from t k, t p where t.c = k.d)")
result.Check(testkit.Rows("1 1", "2 2"))
result = tk.MustQuery("select 1 = (select count(*) from t where t.c = k.d) from t k")
result.Check(testkit.Rows("1", "1", "0"))
result = tk.MustQuery("select 1 = (select count(*) from t where exists( select * from t m where t.c = k.d)) from t k")
result.Check(testkit.Rows("1", "1", "0"))
result = tk.MustQuery("select t.c = any (select count(*) from t) from t")
result.Check(testkit.Rows("0", "0", "1"))
result = tk.MustQuery("select * from t where (t.c, 6) = any (select count(*), sum(t.c) from t)")
result.Check(testkit.Rows("3 4"))
result = tk.MustQuery("select t.c from t where (t.c) < all (select count(*) from t)")
result.Check(testkit.Rows("1", "2"))
result = tk.MustQuery("select t.c from t where (t.c, t.d) = any (select * from t)")
result.Check(testkit.Rows("1", "2", "3"))
result = tk.MustQuery("select t.c from t where (t.c, t.d) != all (select * from t)")
result.Check(testkit.Rows())
result = tk.MustQuery("select (select count(*) from t where t.c = k.d) from t k")
result.Check(testkit.Rows("1", "1", "0"))
result = tk.MustQuery("select t.c from t where (t.c, t.d) in (select * from t)")
result.Check(testkit.Rows("1", "2", "3"))
result = tk.MustQuery("select t.c from t where (t.c, t.d) not in (select * from t)")
result.Check(testkit.Rows())
// = all empty set is true
result = tk.MustQuery("select t.c from t where (t.c, t.d) != all (select * from t where d > 1000)")
result.Check(testkit.Rows("1", "2", "3"))
result = tk.MustQuery("select t.c from t where (t.c) < any (select c from t where d > 1000)")
result.Check(testkit.Rows())
tk.MustExec("insert t values (NULL, NULL)")
result = tk.MustQuery("select (t.c) < any (select c from t) from t")
result.Check(testkit.Rows("1", "1", "<nil>", "<nil>"))
result = tk.MustQuery("select (10) > all (select c from t) from t")
result.Check(testkit.Rows("<nil>", "<nil>", "<nil>", "<nil>"))
result = tk.MustQuery("select (c) > all (select c from t) from t")
result.Check(testkit.Rows("0", "0", "0", "<nil>"))
tk.MustExec("drop table if exists a")
tk.MustExec("create table a (c int, d int)")
tk.MustExec("insert a values (1, 2)")
tk.MustExec("drop table if exists b")
tk.MustExec("create table b (c int, d int)")
tk.MustExec("insert b values (2, 1)")
result = tk.MustQuery("select * from a b where c = (select d from b a where a.c = 2 and b.c = 1)")
result.Check(testkit.Rows("1 2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(c int)")
tk.MustExec("insert t values(10), (8), (7), (9), (11)")
result = tk.MustQuery("select * from t where 9 in (select c from t s where s.c < t.c limit 3)")
result.Check(testkit.Rows("10"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int, v int)")
tk.MustExec("insert into t values(1, 1), (2, 2), (3, 3)")
result = tk.MustQuery("select * from t where v=(select min(t1.v) from t t1, t t2, t t3 where t1.id=t2.id and t2.id=t3.id and t1.id=t.id)")
result.Check(testkit.Rows("1 1", "2 2", "3 3"))
result = tk.MustQuery("select exists (select t.id from t where s.id < 2 and t.id = s.id) from t s")
result.Check(testkit.Rows("1", "0", "0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(c int)")
result = tk.MustQuery("select exists(select count(*) from t)")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, v int)")
tk.MustExec("insert into t values(1, 1), (2, 2), (3, 3)")
result = tk.MustQuery("select (select t.id from t where s.id < 2 and t.id = s.id) from t s")
result.Check(testkit.Rows("1", "<nil>", "<nil>"))
rs, err := tk.Exec("select (select t.id from t where t.id = t.v and t.v != s.id) from t s")
c.Check(err, IsNil)
_, err = tidb.GetRows(rs)
c.Check(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists s")
tk.MustExec("create table t(id int)")
tk.MustExec("create table s(id int)")
tk.MustExec("insert into t values(1), (2)")
tk.MustExec("insert into s values(2), (2)")
result = tk.MustQuery("select id from t where(select count(*) from s where s.id = t.id) > 0")
result.Check(testkit.Rows("2"))
result = tk.MustQuery("select *, (select count(*) from s where id = t.id limit 1, 1) from t")
result.Check(testkit.Rows("1 <nil>", "2 <nil>"))
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists s")
tk.MustExec("create table t(id int primary key)")
tk.MustExec("create table s(id int)")
tk.MustExec("insert into t values(1), (2)")
tk.MustExec("insert into s values(2), (2)")
result = tk.MustQuery("select *, (select count(id) from s where id = t.id) from t")
result.Check(testkit.Rows("1 0", "2 2"))
result = tk.MustQuery("select *, 0 < any (select count(id) from s where id = t.id) from t")
result.Check(testkit.Rows("1 0", "2 1"))
result = tk.MustQuery("select (select count(*) from t k where t.id = id) from s, t where t.id = s.id limit 1")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t, s")
tk.MustExec("create table t(id int primary key)")
tk.MustExec("create table s(id int, index k(id))")
tk.MustExec("insert into t values(1), (2)")
tk.MustExec("insert into s values(2), (2)")
result = tk.MustQuery("select (select id from s where s.id = t.id order by s.id limit 1) from t")
result.Check(testkit.Rows("<nil>", "2"))
}
func (s *testSuite) TestInSubquery(c *C) {
defer func() {
s.cleanEnv(c)
testleak.AfterTest(c)()
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int)")
tk.MustExec("insert t values (1, 1), (2, 1)")
result := tk.MustQuery("select m1.a from t as m1 where m1.a in (select m2.b from t as m2)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select m1.a from t as m1 where (3, m1.b) not in (select * from t as m2)")
result.Check(testkit.Rows("1", "2"))
result = tk.MustQuery("select m1.a from t as m1 where m1.a in (select m2.b+? from t as m2)", 1)
result.Check(testkit.Rows("2"))
tk.MustExec(`prepare stmt1 from 'select m1.a from t as m1 where m1.a in (select m2.b+? from t as m2)'`)
tk.MustExec("set @a = 1")
result = tk.MustQuery(`execute stmt1 using @a;`)
result.Check(testkit.Rows("2"))
tk.MustExec("set @a = 0")
result = tk.MustQuery(`execute stmt1 using @a;`)
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select m1.a from t as m1 where m1.a in (1, 3, 5)")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a float)")
tk.MustExec("insert t1 values (281.37)")
tk.MustQuery("select a from t1 where (a in (select a from t1))").Check(testkit.Rows("281.37"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)")
result = tk.MustQuery("select a from t1 where (1,1) in (select * from t2 s , t2 t where t1.a = s.a and s.a = t.a limit 1)")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert into t1 values (1),(2)")
tk.MustExec("insert into t2 values (1),(2)")
tk.MustExec("set @@session.tidb_opt_insubquery_unfold = 1")
result = tk.MustQuery("select * from t1 where a in (select * from t2)")
result.Check(testkit.Rows("1", "2"))
result = tk.MustQuery("select * from t1 where a in (select * from t2 where false)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t1 where a not in (select * from t2 where false)")
result.Check(testkit.Rows("1", "2"))
tk.MustExec("set @@session.tidb_opt_insubquery_unfold = 0")
result = tk.MustQuery("select * from t1 where a in (select * from t2)")
result.Check(testkit.Rows("1", "2"))
result = tk.MustQuery("select * from t1 where a in (select * from t2 where false)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t1 where a not in (select * from t2 where false)")
result.Check(testkit.Rows("1", "2"))
}
func (s *testSuite) TestJoinLeak(c *C) {
savedConcurrency := plan.JoinConcurrency
plan.JoinConcurrency = 1
defer func() {
plan.JoinConcurrency = savedConcurrency
s.cleanEnv(c)
testleak.AfterTest(c)()
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (d int)")
tk.MustExec("begin")
for i := 0; i < 1002; i++ {
tk.MustExec("insert t values (1)")
}
tk.MustExec("commit")
rs, err := tk.Se.Execute("select * from t t1 left join (select 1) t2 on 1")
c.Assert(err, IsNil)
result := rs[0]
result.Next()
time.Sleep(100 * time.Millisecond)
result.Close()
}
| executor/join_test.go | 1 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.3053838014602661,
0.005666416138410568,
0.0001611829939065501,
0.00017305172514170408,
0.040414340794086456
] |
{
"id": 1,
"code_window": [
"\t\t\te.rowKeyCache[i] = rk\n",
"\t\t}\n",
"\t}\n",
"\tb, err := codec.EncodeValue(b, row.Data...)\n",
"\treturn b, errors.Trace(err)\n",
"}\n",
"\n",
"func (e *HashJoinExec) decodeRow(data []byte) (*Row, error) {\n",
"\trow := new(Row)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tloc := e.ctx.GetSessionVars().GetTimeZone()\n",
"\tfor _, datum := range row.Data {\n",
"\t\ttmp, err := tablecodec.EncodeValue(datum, loc)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, errors.Trace(err)\n",
"\t\t}\n",
"\t\tb = append(b, tmp...)\n",
"\t}\n",
"\treturn b, nil\n"
],
"file_path": "executor/join.go",
"type": "replace",
"edit_start_line_idx": 279
} | // mksysnum_darwin.pl /usr/include/sys/syscall.h
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
// +build arm,darwin
package unix
const (
SYS_SYSCALL = 0
SYS_EXIT = 1
SYS_FORK = 2
SYS_READ = 3
SYS_WRITE = 4
SYS_OPEN = 5
SYS_CLOSE = 6
SYS_WAIT4 = 7
SYS_LINK = 9
SYS_UNLINK = 10
SYS_CHDIR = 12
SYS_FCHDIR = 13
SYS_MKNOD = 14
SYS_CHMOD = 15
SYS_CHOWN = 16
SYS_GETFSSTAT = 18
SYS_GETPID = 20
SYS_SETUID = 23
SYS_GETUID = 24
SYS_GETEUID = 25
SYS_PTRACE = 26
SYS_RECVMSG = 27
SYS_SENDMSG = 28
SYS_RECVFROM = 29
SYS_ACCEPT = 30
SYS_GETPEERNAME = 31
SYS_GETSOCKNAME = 32
SYS_ACCESS = 33
SYS_CHFLAGS = 34
SYS_FCHFLAGS = 35
SYS_SYNC = 36
SYS_KILL = 37
SYS_GETPPID = 39
SYS_DUP = 41
SYS_PIPE = 42
SYS_GETEGID = 43
SYS_SIGACTION = 46
SYS_GETGID = 47
SYS_SIGPROCMASK = 48
SYS_GETLOGIN = 49
SYS_SETLOGIN = 50
SYS_ACCT = 51
SYS_SIGPENDING = 52
SYS_SIGALTSTACK = 53
SYS_IOCTL = 54
SYS_REBOOT = 55
SYS_REVOKE = 56
SYS_SYMLINK = 57
SYS_READLINK = 58
SYS_EXECVE = 59
SYS_UMASK = 60
SYS_CHROOT = 61
SYS_MSYNC = 65
SYS_VFORK = 66
SYS_MUNMAP = 73
SYS_MPROTECT = 74
SYS_MADVISE = 75
SYS_MINCORE = 78
SYS_GETGROUPS = 79
SYS_SETGROUPS = 80
SYS_GETPGRP = 81
SYS_SETPGID = 82
SYS_SETITIMER = 83
SYS_SWAPON = 85
SYS_GETITIMER = 86
SYS_GETDTABLESIZE = 89
SYS_DUP2 = 90
SYS_FCNTL = 92
SYS_SELECT = 93
SYS_FSYNC = 95
SYS_SETPRIORITY = 96
SYS_SOCKET = 97
SYS_CONNECT = 98
SYS_GETPRIORITY = 100
SYS_BIND = 104
SYS_SETSOCKOPT = 105
SYS_LISTEN = 106
SYS_SIGSUSPEND = 111
SYS_GETTIMEOFDAY = 116
SYS_GETRUSAGE = 117
SYS_GETSOCKOPT = 118
SYS_READV = 120
SYS_WRITEV = 121
SYS_SETTIMEOFDAY = 122
SYS_FCHOWN = 123
SYS_FCHMOD = 124
SYS_SETREUID = 126
SYS_SETREGID = 127
SYS_RENAME = 128
SYS_FLOCK = 131
SYS_MKFIFO = 132
SYS_SENDTO = 133
SYS_SHUTDOWN = 134
SYS_SOCKETPAIR = 135
SYS_MKDIR = 136
SYS_RMDIR = 137
SYS_UTIMES = 138
SYS_FUTIMES = 139
SYS_ADJTIME = 140
SYS_GETHOSTUUID = 142
SYS_SETSID = 147
SYS_GETPGID = 151
SYS_SETPRIVEXEC = 152
SYS_PREAD = 153
SYS_PWRITE = 154
SYS_NFSSVC = 155
SYS_STATFS = 157
SYS_FSTATFS = 158
SYS_UNMOUNT = 159
SYS_GETFH = 161
SYS_QUOTACTL = 165
SYS_MOUNT = 167
SYS_CSOPS = 169
SYS_CSOPS_AUDITTOKEN = 170
SYS_WAITID = 173
SYS_KDEBUG_TRACE = 180
SYS_SETGID = 181
SYS_SETEGID = 182
SYS_SETEUID = 183
SYS_SIGRETURN = 184
SYS_CHUD = 185
SYS_FDATASYNC = 187
SYS_STAT = 188
SYS_FSTAT = 189
SYS_LSTAT = 190
SYS_PATHCONF = 191
SYS_FPATHCONF = 192
SYS_GETRLIMIT = 194
SYS_SETRLIMIT = 195
SYS_GETDIRENTRIES = 196
SYS_MMAP = 197
SYS_LSEEK = 199
SYS_TRUNCATE = 200
SYS_FTRUNCATE = 201
SYS___SYSCTL = 202
SYS_MLOCK = 203
SYS_MUNLOCK = 204
SYS_UNDELETE = 205
SYS_ATSOCKET = 206
SYS_ATGETMSG = 207
SYS_ATPUTMSG = 208
SYS_ATPSNDREQ = 209
SYS_ATPSNDRSP = 210
SYS_ATPGETREQ = 211
SYS_ATPGETRSP = 212
SYS_OPEN_DPROTECTED_NP = 216
SYS_GETATTRLIST = 220
SYS_SETATTRLIST = 221
SYS_GETDIRENTRIESATTR = 222
SYS_EXCHANGEDATA = 223
SYS_SEARCHFS = 225
SYS_DELETE = 226
SYS_COPYFILE = 227
SYS_FGETATTRLIST = 228
SYS_FSETATTRLIST = 229
SYS_POLL = 230
SYS_WATCHEVENT = 231
SYS_WAITEVENT = 232
SYS_MODWATCH = 233
SYS_GETXATTR = 234
SYS_FGETXATTR = 235
SYS_SETXATTR = 236
SYS_FSETXATTR = 237
SYS_REMOVEXATTR = 238
SYS_FREMOVEXATTR = 239
SYS_LISTXATTR = 240
SYS_FLISTXATTR = 241
SYS_FSCTL = 242
SYS_INITGROUPS = 243
SYS_POSIX_SPAWN = 244
SYS_FFSCTL = 245
SYS_NFSCLNT = 247
SYS_FHOPEN = 248
SYS_MINHERIT = 250
SYS_SEMSYS = 251
SYS_MSGSYS = 252
SYS_SHMSYS = 253
SYS_SEMCTL = 254
SYS_SEMGET = 255
SYS_SEMOP = 256
SYS_MSGCTL = 258
SYS_MSGGET = 259
SYS_MSGSND = 260
SYS_MSGRCV = 261
SYS_SHMAT = 262
SYS_SHMCTL = 263
SYS_SHMDT = 264
SYS_SHMGET = 265
SYS_SHM_OPEN = 266
SYS_SHM_UNLINK = 267
SYS_SEM_OPEN = 268
SYS_SEM_CLOSE = 269
SYS_SEM_UNLINK = 270
SYS_SEM_WAIT = 271
SYS_SEM_TRYWAIT = 272
SYS_SEM_POST = 273
SYS_SEM_GETVALUE = 274
SYS_SEM_INIT = 275
SYS_SEM_DESTROY = 276
SYS_OPEN_EXTENDED = 277
SYS_UMASK_EXTENDED = 278
SYS_STAT_EXTENDED = 279
SYS_LSTAT_EXTENDED = 280
SYS_FSTAT_EXTENDED = 281
SYS_CHMOD_EXTENDED = 282
SYS_FCHMOD_EXTENDED = 283
SYS_ACCESS_EXTENDED = 284
SYS_SETTID = 285
SYS_GETTID = 286
SYS_SETSGROUPS = 287
SYS_GETSGROUPS = 288
SYS_SETWGROUPS = 289
SYS_GETWGROUPS = 290
SYS_MKFIFO_EXTENDED = 291
SYS_MKDIR_EXTENDED = 292
SYS_IDENTITYSVC = 293
SYS_SHARED_REGION_CHECK_NP = 294
SYS_VM_PRESSURE_MONITOR = 296
SYS_PSYNCH_RW_LONGRDLOCK = 297
SYS_PSYNCH_RW_YIELDWRLOCK = 298
SYS_PSYNCH_RW_DOWNGRADE = 299
SYS_PSYNCH_RW_UPGRADE = 300
SYS_PSYNCH_MUTEXWAIT = 301
SYS_PSYNCH_MUTEXDROP = 302
SYS_PSYNCH_CVBROAD = 303
SYS_PSYNCH_CVSIGNAL = 304
SYS_PSYNCH_CVWAIT = 305
SYS_PSYNCH_RW_RDLOCK = 306
SYS_PSYNCH_RW_WRLOCK = 307
SYS_PSYNCH_RW_UNLOCK = 308
SYS_PSYNCH_RW_UNLOCK2 = 309
SYS_GETSID = 310
SYS_SETTID_WITH_PID = 311
SYS_PSYNCH_CVCLRPREPOST = 312
SYS_AIO_FSYNC = 313
SYS_AIO_RETURN = 314
SYS_AIO_SUSPEND = 315
SYS_AIO_CANCEL = 316
SYS_AIO_ERROR = 317
SYS_AIO_READ = 318
SYS_AIO_WRITE = 319
SYS_LIO_LISTIO = 320
SYS_IOPOLICYSYS = 322
SYS_PROCESS_POLICY = 323
SYS_MLOCKALL = 324
SYS_MUNLOCKALL = 325
SYS_ISSETUGID = 327
SYS___PTHREAD_KILL = 328
SYS___PTHREAD_SIGMASK = 329
SYS___SIGWAIT = 330
SYS___DISABLE_THREADSIGNAL = 331
SYS___PTHREAD_MARKCANCEL = 332
SYS___PTHREAD_CANCELED = 333
SYS___SEMWAIT_SIGNAL = 334
SYS_PROC_INFO = 336
SYS_SENDFILE = 337
SYS_STAT64 = 338
SYS_FSTAT64 = 339
SYS_LSTAT64 = 340
SYS_STAT64_EXTENDED = 341
SYS_LSTAT64_EXTENDED = 342
SYS_FSTAT64_EXTENDED = 343
SYS_GETDIRENTRIES64 = 344
SYS_STATFS64 = 345
SYS_FSTATFS64 = 346
SYS_GETFSSTAT64 = 347
SYS___PTHREAD_CHDIR = 348
SYS___PTHREAD_FCHDIR = 349
SYS_AUDIT = 350
SYS_AUDITON = 351
SYS_GETAUID = 353
SYS_SETAUID = 354
SYS_GETAUDIT_ADDR = 357
SYS_SETAUDIT_ADDR = 358
SYS_AUDITCTL = 359
SYS_BSDTHREAD_CREATE = 360
SYS_BSDTHREAD_TERMINATE = 361
SYS_KQUEUE = 362
SYS_KEVENT = 363
SYS_LCHOWN = 364
SYS_STACK_SNAPSHOT = 365
SYS_BSDTHREAD_REGISTER = 366
SYS_WORKQ_OPEN = 367
SYS_WORKQ_KERNRETURN = 368
SYS_KEVENT64 = 369
SYS___OLD_SEMWAIT_SIGNAL = 370
SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371
SYS_THREAD_SELFID = 372
SYS_LEDGER = 373
SYS___MAC_EXECVE = 380
SYS___MAC_SYSCALL = 381
SYS___MAC_GET_FILE = 382
SYS___MAC_SET_FILE = 383
SYS___MAC_GET_LINK = 384
SYS___MAC_SET_LINK = 385
SYS___MAC_GET_PROC = 386
SYS___MAC_SET_PROC = 387
SYS___MAC_GET_FD = 388
SYS___MAC_SET_FD = 389
SYS___MAC_GET_PID = 390
SYS___MAC_GET_LCID = 391
SYS___MAC_GET_LCTX = 392
SYS___MAC_SET_LCTX = 393
SYS_SETLCID = 394
SYS_GETLCID = 395
SYS_READ_NOCANCEL = 396
SYS_WRITE_NOCANCEL = 397
SYS_OPEN_NOCANCEL = 398
SYS_CLOSE_NOCANCEL = 399
SYS_WAIT4_NOCANCEL = 400
SYS_RECVMSG_NOCANCEL = 401
SYS_SENDMSG_NOCANCEL = 402
SYS_RECVFROM_NOCANCEL = 403
SYS_ACCEPT_NOCANCEL = 404
SYS_MSYNC_NOCANCEL = 405
SYS_FCNTL_NOCANCEL = 406
SYS_SELECT_NOCANCEL = 407
SYS_FSYNC_NOCANCEL = 408
SYS_CONNECT_NOCANCEL = 409
SYS_SIGSUSPEND_NOCANCEL = 410
SYS_READV_NOCANCEL = 411
SYS_WRITEV_NOCANCEL = 412
SYS_SENDTO_NOCANCEL = 413
SYS_PREAD_NOCANCEL = 414
SYS_PWRITE_NOCANCEL = 415
SYS_WAITID_NOCANCEL = 416
SYS_POLL_NOCANCEL = 417
SYS_MSGSND_NOCANCEL = 418
SYS_MSGRCV_NOCANCEL = 419
SYS_SEM_WAIT_NOCANCEL = 420
SYS_AIO_SUSPEND_NOCANCEL = 421
SYS___SIGWAIT_NOCANCEL = 422
SYS___SEMWAIT_SIGNAL_NOCANCEL = 423
SYS___MAC_MOUNT = 424
SYS___MAC_GET_MOUNT = 425
SYS___MAC_GETFSSTAT = 426
SYS_FSGETPATH = 427
SYS_AUDIT_SESSION_SELF = 428
SYS_AUDIT_SESSION_JOIN = 429
SYS_FILEPORT_MAKEPORT = 430
SYS_FILEPORT_MAKEFD = 431
SYS_AUDIT_SESSION_PORT = 432
SYS_PID_SUSPEND = 433
SYS_PID_RESUME = 434
SYS_PID_HIBERNATE = 435
SYS_PID_SHUTDOWN_SOCKETS = 436
SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438
SYS_KAS_INFO = 439
SYS_MAXSYSCALL = 440
)
| _vendor/src/golang.org/x/sys/unix/zsysnum_darwin_arm.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.0002884677960537374,
0.0001828308595577255,
0.00016177007637452334,
0.00017387481057085097,
0.0000249391177931102
] |
{
"id": 1,
"code_window": [
"\t\t\te.rowKeyCache[i] = rk\n",
"\t\t}\n",
"\t}\n",
"\tb, err := codec.EncodeValue(b, row.Data...)\n",
"\treturn b, errors.Trace(err)\n",
"}\n",
"\n",
"func (e *HashJoinExec) decodeRow(data []byte) (*Row, error) {\n",
"\trow := new(Row)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tloc := e.ctx.GetSessionVars().GetTimeZone()\n",
"\tfor _, datum := range row.Data {\n",
"\t\ttmp, err := tablecodec.EncodeValue(datum, loc)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, errors.Trace(err)\n",
"\t\t}\n",
"\t\tb = append(b, tmp...)\n",
"\t}\n",
"\treturn b, nil\n"
],
"file_path": "executor/join.go",
"type": "replace",
"edit_start_line_idx": 279
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tables
import (
"bytes"
"encoding/binary"
"io"
"github.com/juju/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/types"
)
func encodeHandle(h int64) []byte {
buf := &bytes.Buffer{}
err := binary.Write(buf, binary.BigEndian, h)
if err != nil {
panic(err)
}
return buf.Bytes()
}
func decodeHandle(data []byte) (int64, error) {
var h int64
buf := bytes.NewBuffer(data)
err := binary.Read(buf, binary.BigEndian, &h)
return h, errors.Trace(err)
}
// indexIter is for KV store index iterator.
type indexIter struct {
it kv.Iterator
idx *index
prefix kv.Key
}
// Close does the clean up works when KV store index iterator is closed.
func (c *indexIter) Close() {
if c.it != nil {
c.it.Close()
c.it = nil
}
}
// Next returns current key and moves iterator to the next step.
func (c *indexIter) Next() (val []types.Datum, h int64, err error) {
if !c.it.Valid() {
return nil, 0, errors.Trace(io.EOF)
}
if !c.it.Key().HasPrefix(c.prefix) {
return nil, 0, errors.Trace(io.EOF)
}
// get indexedValues
buf := c.it.Key()[len(c.prefix):]
vv, err := codec.Decode(buf, len(c.idx.idxInfo.Columns))
if err != nil {
return nil, 0, errors.Trace(err)
}
// if index is *not* unique, the handle is in keybuf
if !c.idx.idxInfo.Unique {
h = vv[len(vv)-1].GetInt64()
val = vv[0 : len(vv)-1]
} else {
// otherwise handle is value
h, err = decodeHandle(c.it.Value())
if err != nil {
return nil, 0, errors.Trace(err)
}
val = vv
}
// update new iter to next
err = c.it.Next()
if err != nil {
return nil, 0, errors.Trace(err)
}
return
}
// index is the data structure for index data in the KV store.
type index struct {
tblInfo *model.TableInfo
idxInfo *model.IndexInfo
prefix kv.Key
}
// NewIndex builds a new Index object.
func NewIndex(tableInfo *model.TableInfo, indexInfo *model.IndexInfo) table.Index {
index := &index{
tblInfo: tableInfo,
idxInfo: indexInfo,
prefix: kv.Key(tablecodec.EncodeTableIndexPrefix(tableInfo.ID, indexInfo.ID)),
}
return index
}
// Meta returns index info.
func (c *index) Meta() *model.IndexInfo {
return c.idxInfo
}
// GenIndexKey generates storage key for index values. Returned distinct indicates whether the
// indexed values should be distinct in storage (i.e. whether handle is encoded in the key).
func (c *index) GenIndexKey(indexedValues []types.Datum, h int64) (key []byte, distinct bool, err error) {
if c.idxInfo.Unique {
// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html
// A UNIQUE index creates a constraint such that all values in the index must be distinct.
// An error occurs if you try to add a new row with a key value that matches an existing row.
// For all engines, a UNIQUE index permits multiple NULL values for columns that can contain NULL.
distinct = true
for _, cv := range indexedValues {
if cv.IsNull() {
distinct = false
break
}
}
}
// For string columns, indexes can be created that use only the leading part of column values,
// using col_name(length) syntax to specify an index prefix length.
for i := 0; i < len(indexedValues); i++ {
v := &indexedValues[i]
if v.Kind() == types.KindString || v.Kind() == types.KindBytes {
ic := c.idxInfo.Columns[i]
if ic.Length != types.UnspecifiedLength && len(v.GetBytes()) > ic.Length {
// truncate value and limit its length
v.SetBytes(v.GetBytes()[:ic.Length])
}
}
}
key = append(key, []byte(c.prefix)...)
if distinct {
key, err = codec.EncodeKey(key, indexedValues...)
} else {
key, err = codec.EncodeKey(key, append(indexedValues, types.NewDatum(h))...)
}
if err != nil {
return nil, false, errors.Trace(err)
}
return
}
// Create creates a new entry in the kvIndex data.
// If the index is unique and there is an existing entry with the same key,
// Create will return the existing entry's handle as the first return value, ErrKeyExists as the second return value.
func (c *index) Create(rm kv.RetrieverMutator, indexedValues []types.Datum, h int64) (int64, error) {
key, distinct, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return 0, errors.Trace(err)
}
if !distinct {
// non-unique index doesn't need store value, write a '0' to reduce space
err = rm.Set(key, []byte{'0'})
return 0, errors.Trace(err)
}
value, err := rm.Get(key)
if kv.IsErrNotFound(err) {
err = rm.Set(key, encodeHandle(h))
return 0, errors.Trace(err)
}
handle, err := decodeHandle(value)
if err != nil {
return 0, errors.Trace(err)
}
return handle, errors.Trace(kv.ErrKeyExists)
}
// Delete removes the entry for handle h and indexdValues from KV index.
func (c *index) Delete(m kv.Mutator, indexedValues []types.Datum, h int64) error {
key, _, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return errors.Trace(err)
}
err = m.Delete(key)
return errors.Trace(err)
}
// Drop removes the KV index from store.
func (c *index) Drop(rm kv.RetrieverMutator) error {
it, err := rm.Seek(c.prefix)
if err != nil {
return errors.Trace(err)
}
defer it.Close()
// remove all indices
for it.Valid() {
if !it.Key().HasPrefix(c.prefix) {
break
}
err := rm.Delete(it.Key())
if err != nil {
return errors.Trace(err)
}
err = it.Next()
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// Seek searches KV index for the entry with indexedValues.
func (c *index) Seek(r kv.Retriever, indexedValues []types.Datum) (iter table.IndexIterator, hit bool, err error) {
key, _, err := c.GenIndexKey(indexedValues, 0)
if err != nil {
return nil, false, errors.Trace(err)
}
it, err := r.Seek(key)
if err != nil {
return nil, false, errors.Trace(err)
}
// check if hit
hit = false
if it.Valid() && it.Key().Cmp(key) == 0 {
hit = true
}
return &indexIter{it: it, idx: c, prefix: c.prefix}, hit, nil
}
// SeekFirst returns an iterator which points to the first entry of the KV index.
func (c *index) SeekFirst(r kv.Retriever) (iter table.IndexIterator, err error) {
it, err := r.Seek(c.prefix)
if err != nil {
return nil, errors.Trace(err)
}
return &indexIter{it: it, idx: c, prefix: c.prefix}, nil
}
func (c *index) Exist(rm kv.RetrieverMutator, indexedValues []types.Datum, h int64) (bool, int64, error) {
key, distinct, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return false, 0, errors.Trace(err)
}
value, err := rm.Get(key)
if kv.IsErrNotFound(err) {
return false, 0, nil
}
if err != nil {
return false, 0, errors.Trace(err)
}
// For distinct index, the value of key is handle.
if distinct {
handle, err := decodeHandle(value)
if err != nil {
return false, 0, errors.Trace(err)
}
if handle != h {
return true, handle, errors.Trace(kv.ErrKeyExists)
}
return true, handle, nil
}
return true, h, nil
}
func (c *index) FetchValues(r []types.Datum) ([]types.Datum, error) {
vals := make([]types.Datum, len(c.idxInfo.Columns))
for i, ic := range c.idxInfo.Columns {
if ic.Offset < 0 || ic.Offset >= len(r) {
return nil, table.ErrIndexOutBound.Gen("Index column %s offset out of bound, offset: %d, row: %v",
ic.Name, ic.Offset, r)
}
vals[i] = r[ic.Offset]
}
return vals, nil
}
| table/tables/index.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.0008254973799921572,
0.00024207324895542115,
0.0001599492970854044,
0.00016780498845037073,
0.00017578559345565736
] |
{
"id": 1,
"code_window": [
"\t\t\te.rowKeyCache[i] = rk\n",
"\t\t}\n",
"\t}\n",
"\tb, err := codec.EncodeValue(b, row.Data...)\n",
"\treturn b, errors.Trace(err)\n",
"}\n",
"\n",
"func (e *HashJoinExec) decodeRow(data []byte) (*Row, error) {\n",
"\trow := new(Row)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tloc := e.ctx.GetSessionVars().GetTimeZone()\n",
"\tfor _, datum := range row.Data {\n",
"\t\ttmp, err := tablecodec.EncodeValue(datum, loc)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, errors.Trace(err)\n",
"\t\t}\n",
"\t\tb = append(b, tmp...)\n",
"\t}\n",
"\treturn b, nil\n"
],
"file_path": "executor/join.go",
"type": "replace",
"edit_start_line_idx": 279
} | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package unix
import "syscall"
func Unsetenv(key string) error {
// This was added in Go 1.4.
return syscall.Unsetenv(key)
}
| _vendor/src/golang.org/x/sys/unix/env_unset.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.00017925087013281882,
0.00017333886353299022,
0.00016742685693316162,
0.00017333886353299022,
0.000005912006599828601
] |
{
"id": 2,
"code_window": [
"\ttime.Sleep(100 * time.Millisecond)\n",
"\tresult.Close()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func (s *testSuite) TestHashJoinExecEncodeDecodeRow(c *C) {\n",
"\ttk := testkit.NewTestKit(c, s.store)\n",
"\ttk.MustExec(\"use test\")\n",
"\ttk.MustExec(\"drop table if exists t1\")\n",
"\ttk.MustExec(\"drop table if exists t2\")\n",
"\ttk.MustExec(\"create table t1 (id int)\")\n",
"\ttk.MustExec(\"create table t2 (id int, name varchar(255), ts timestamp)\")\n",
"\ttk.MustExec(\"insert into t1 values (1)\")\n",
"\ttk.MustExec(\"insert into t2 values (1, 'xxx', '2003-06-09 10:51:26')\")\n",
"\tresult := tk.MustQuery(\"select ts from t1 inner join t2 where t2.name = 'xxx'\")\n",
"\tresult.Check(testkit.Rows(\"2003-06-09 10:51:26\"))\n",
"}"
],
"file_path": "executor/join_test.go",
"type": "add",
"edit_start_line_idx": 553
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"sync"
"sync/atomic"
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/mvmap"
"github.com/pingcap/tidb/util/types"
)
var (
_ joinExec = &NestedLoopJoinExec{}
_ Executor = &HashJoinExec{}
_ joinExec = &HashSemiJoinExec{}
_ Executor = &ApplyJoinExec{}
)
// HashJoinExec implements the hash join algorithm.
type HashJoinExec struct {
hashTable *mvmap.MVMap
smallHashKey []*expression.Column
bigHashKey []*expression.Column
smallExec Executor
bigExec Executor
prepared bool
ctx context.Context
smallFilter expression.CNFExprs
bigFilter expression.CNFExprs
otherFilter expression.CNFExprs
schema *expression.Schema
outer bool
leftSmall bool
cursor int
defaultValues []types.Datum
// targetTypes means the target the type that both smallHashKey and bigHashKey should convert to.
targetTypes []*types.FieldType
finished atomic.Value
// wg is for sync multiple join workers.
wg sync.WaitGroup
// closeCh add a lock for closing executor.
closeCh chan struct{}
rows []*Row
// concurrency is number of concurrent channels.
concurrency int
bigTableResultCh []chan *execResult
hashJoinContexts []*hashJoinCtx
// Channels for output.
resultCh chan *execResult
// rowKeyCache is used to store the table and table name from a row.
// Because every row has the same table name and table, we can use a single row key cache.
rowKeyCache []*RowKeyEntry
}
// hashJoinCtx holds the variables needed to do a hash join in one of many concurrent goroutines.
type hashJoinCtx struct {
bigFilter expression.CNFExprs
otherFilter expression.CNFExprs
// datumBuffer is used for encode hash keys.
datumBuffer []types.Datum
hashKeyBuffer []byte
}
// Close implements the Executor Close interface.
func (e *HashJoinExec) Close() error {
e.finished.Store(true)
if e.prepared {
for range e.resultCh {
}
<-e.closeCh
}
e.prepared = false
e.cursor = 0
e.rows = nil
return e.smallExec.Close()
}
// makeJoinRow simply creates a new row that appends row b to row a.
func makeJoinRow(a *Row, b *Row) *Row {
ret := &Row{
RowKeys: make([]*RowKeyEntry, 0, len(a.RowKeys)+len(b.RowKeys)),
Data: make([]types.Datum, 0, len(a.Data)+len(b.Data)),
}
ret.RowKeys = append(ret.RowKeys, a.RowKeys...)
ret.RowKeys = append(ret.RowKeys, b.RowKeys...)
ret.Data = append(ret.Data, a.Data...)
ret.Data = append(ret.Data, b.Data...)
return ret
}
// getJoinKey gets the hash key when given a row and hash columns.
// It will return a boolean value representing if the hash key has null, a byte slice representing the result hash code.
func getJoinKey(sc *variable.StatementContext, cols []*expression.Column, row *Row, targetTypes []*types.FieldType,
vals []types.Datum, bytes []byte) (bool, []byte, error) {
var err error
for i, col := range cols {
vals[i], err = col.Eval(row.Data)
if err != nil {
return false, nil, errors.Trace(err)
}
if vals[i].IsNull() {
return true, nil, nil
}
vals[i], err = vals[i].ConvertTo(sc, targetTypes[i])
if err != nil {
return false, nil, errors.Trace(err)
}
}
if len(vals) == 0 {
return false, nil, nil
}
bytes, err = codec.EncodeValue(bytes, vals...)
return false, bytes, errors.Trace(err)
}
// Schema implements the Executor Schema interface.
func (e *HashJoinExec) Schema() *expression.Schema {
return e.schema
}
var batchSize = 128
// fetchBigExec fetches rows from the big table in a background goroutine
// and sends the rows to multiple channels which will be read by multiple join workers.
func (e *HashJoinExec) fetchBigExec() {
cnt := 0
defer func() {
for _, cn := range e.bigTableResultCh {
close(cn)
}
e.bigExec.Close()
e.wg.Done()
}()
curBatchSize := 1
result := &execResult{rows: make([]*Row, 0, curBatchSize)}
txnCtx := e.ctx.GoCtx()
for {
done := false
idx := cnt % e.concurrency
for i := 0; i < curBatchSize; i++ {
if e.finished.Load().(bool) {
return
}
row, err := e.bigExec.Next()
if err != nil {
result.err = errors.Trace(err)
e.bigTableResultCh[idx] <- result
done = true
break
}
if row == nil {
done = true
break
}
result.rows = append(result.rows, row)
if len(result.rows) >= curBatchSize {
select {
case <-txnCtx.Done():
return
case e.bigTableResultCh[idx] <- result:
result = &execResult{rows: make([]*Row, 0, curBatchSize)}
}
}
}
cnt++
if done {
if len(result.rows) > 0 {
select {
case <-txnCtx.Done():
return
case e.bigTableResultCh[idx] <- result:
}
}
break
}
if curBatchSize < batchSize {
curBatchSize *= 2
}
}
}
// prepare runs the first time when 'Next' is called, it starts one worker goroutine to fetch rows from the big table,
// and reads all data from the small table to build a hash table, then starts multiple join worker goroutines.
func (e *HashJoinExec) prepare() error {
e.closeCh = make(chan struct{})
e.finished.Store(false)
e.bigTableResultCh = make([]chan *execResult, e.concurrency)
e.wg = sync.WaitGroup{}
for i := 0; i < e.concurrency; i++ {
e.bigTableResultCh[i] = make(chan *execResult, e.concurrency)
}
// Start a worker to fetch big table rows.
e.wg.Add(1)
go e.fetchBigExec()
e.hashTable = mvmap.NewMVMap()
e.cursor = 0
sc := e.ctx.GetSessionVars().StmtCtx
var buffer []byte
for {
row, err := e.smallExec.Next()
if err != nil {
return errors.Trace(err)
}
if row == nil {
e.smallExec.Close()
break
}
matched, err := expression.EvalBool(e.smallFilter, row.Data, e.ctx)
if err != nil {
return errors.Trace(err)
}
if !matched {
continue
}
hasNull, joinKey, err := getJoinKey(sc, e.smallHashKey, row, e.targetTypes, e.hashJoinContexts[0].datumBuffer, nil)
if err != nil {
return errors.Trace(err)
}
if hasNull {
continue
}
buffer = buffer[:0]
buffer, err = e.encodeRow(buffer, row)
if err != nil {
return errors.Trace(err)
}
e.hashTable.Put(joinKey, buffer)
}
e.resultCh = make(chan *execResult, e.concurrency)
for i := 0; i < e.concurrency; i++ {
e.wg.Add(1)
go e.runJoinWorker(i)
}
go e.waitJoinWorkersAndCloseResultChan()
e.prepared = true
return nil
}
func (e *HashJoinExec) encodeRow(b []byte, row *Row) ([]byte, error) {
numRowKeys := int64(len(row.RowKeys))
b = codec.EncodeVarint(b, numRowKeys)
for _, rowKey := range row.RowKeys {
b = codec.EncodeVarint(b, rowKey.Handle)
}
if numRowKeys > 0 && e.rowKeyCache == nil {
e.rowKeyCache = make([]*RowKeyEntry, len(row.RowKeys))
for i := 0; i < len(row.RowKeys); i++ {
rk := new(RowKeyEntry)
rk.Tbl = row.RowKeys[i].Tbl
rk.TableName = row.RowKeys[i].TableName
e.rowKeyCache[i] = rk
}
}
b, err := codec.EncodeValue(b, row.Data...)
return b, errors.Trace(err)
}
func (e *HashJoinExec) decodeRow(data []byte) (*Row, error) {
row := new(Row)
data, entryLen, err := codec.DecodeVarint(data)
if err != nil {
return nil, errors.Trace(err)
}
for i := 0; i < int(entryLen); i++ {
entry := new(RowKeyEntry)
data, entry.Handle, err = codec.DecodeVarint(data)
if err != nil {
return nil, errors.Trace(err)
}
entry.Tbl = e.rowKeyCache[i].Tbl
entry.TableName = e.rowKeyCache[i].TableName
row.RowKeys = append(row.RowKeys, entry)
}
values := make([]types.Datum, e.smallExec.Schema().Len())
err = codec.SetRawValues(data, values)
if err != nil {
return nil, errors.Trace(err)
}
err = decodeRawValues(values, e.smallExec.Schema(), e.ctx.GetSessionVars().GetTimeZone())
if err != nil {
return nil, errors.Trace(err)
}
row.Data = values
return row, nil
}
func (e *HashJoinExec) waitJoinWorkersAndCloseResultChan() {
e.wg.Wait()
close(e.resultCh)
e.hashTable = nil
close(e.closeCh)
}
// runJoinWorker does join job in one goroutine.
func (e *HashJoinExec) runJoinWorker(idx int) {
maxRowsCnt := 1000
result := &execResult{rows: make([]*Row, 0, maxRowsCnt)}
txnCtx := e.ctx.GoCtx()
for {
var bigTableResult *execResult
var exit bool
select {
case <-txnCtx.Done():
exit = true
case tmp, ok := <-e.bigTableResultCh[idx]:
if !ok {
exit = true
}
bigTableResult = tmp
}
if exit || e.finished.Load().(bool) {
break
}
if bigTableResult.err != nil {
e.resultCh <- &execResult{err: errors.Trace(bigTableResult.err)}
break
}
for _, bigRow := range bigTableResult.rows {
succ := e.joinOneBigRow(e.hashJoinContexts[idx], bigRow, result)
if !succ {
break
}
if len(result.rows) >= maxRowsCnt {
e.resultCh <- result
result = &execResult{rows: make([]*Row, 0, maxRowsCnt)}
}
}
}
if len(result.rows) != 0 || result.err != nil {
e.resultCh <- result
}
e.wg.Done()
}
// joinOneBigRow creates result rows from a row in a big table and sends them to resultRows channel.
// Every matching row generates a result row.
// If there are no matching rows and it is outer join, a null filled result row is created.
func (e *HashJoinExec) joinOneBigRow(ctx *hashJoinCtx, bigRow *Row, result *execResult) bool {
var (
matchedRows []*Row
err error
)
bigMatched := true
bigMatched, err = expression.EvalBool(ctx.bigFilter, bigRow.Data, e.ctx)
if err != nil {
result.err = errors.Trace(err)
return false
}
if bigMatched {
matchedRows, err = e.constructMatchedRows(ctx, bigRow)
if err != nil {
result.err = errors.Trace(err)
return false
}
}
for _, r := range matchedRows {
result.rows = append(result.rows, r)
}
if len(matchedRows) == 0 && e.outer {
r := e.fillRowWithDefaultValues(bigRow)
result.rows = append(result.rows, r)
}
return true
}
// constructMatchedRows creates matching result rows from a row in the big table.
func (e *HashJoinExec) constructMatchedRows(ctx *hashJoinCtx, bigRow *Row) (matchedRows []*Row, err error) {
sc := e.ctx.GetSessionVars().StmtCtx
hasNull, joinKey, err := getJoinKey(sc, e.bigHashKey, bigRow, e.targetTypes, ctx.datumBuffer, ctx.hashKeyBuffer[0:0:cap(ctx.hashKeyBuffer)])
if err != nil {
return nil, errors.Trace(err)
}
if hasNull {
return
}
values := e.hashTable.Get(joinKey)
if len(values) == 0 {
return
}
// match eq condition
for _, value := range values {
var smallRow *Row
smallRow, err = e.decodeRow(value)
if err != nil {
return nil, errors.Trace(err)
}
var matchedRow *Row
if e.leftSmall {
matchedRow = makeJoinRow(smallRow, bigRow)
} else {
matchedRow = makeJoinRow(bigRow, smallRow)
}
otherMatched, err := expression.EvalBool(ctx.otherFilter, matchedRow.Data, e.ctx)
if err != nil {
return nil, errors.Trace(err)
}
if otherMatched {
matchedRows = append(matchedRows, matchedRow)
}
}
return matchedRows, nil
}
// fillRowWithDefaultValues creates a result row filled with default values from a row in the big table.
// It is used for outer join, when a row from outer table doesn't have any matching rows.
func (e *HashJoinExec) fillRowWithDefaultValues(bigRow *Row) (returnRow *Row) {
smallRow := &Row{
Data: make([]types.Datum, e.smallExec.Schema().Len()),
}
copy(smallRow.Data, e.defaultValues)
if e.leftSmall {
returnRow = makeJoinRow(smallRow, bigRow)
} else {
returnRow = makeJoinRow(bigRow, smallRow)
}
return returnRow
}
// Next implements the Executor Next interface.
func (e *HashJoinExec) Next() (*Row, error) {
if !e.prepared {
if err := e.prepare(); err != nil {
return nil, errors.Trace(err)
}
}
txnCtx := e.ctx.GoCtx()
if e.cursor >= len(e.rows) {
var result *execResult
select {
case tmp, ok := <-e.resultCh:
if !ok {
return nil, nil
}
result = tmp
if result.err != nil {
e.finished.Store(true)
return nil, errors.Trace(result.err)
}
case <-txnCtx.Done():
return nil, nil
}
if len(result.rows) == 0 {
return nil, nil
}
e.rows = result.rows
e.cursor = 0
}
row := e.rows[e.cursor]
e.cursor++
return row, nil
}
// joinExec is the common interface of join algorithm except for hash join.
type joinExec interface {
Executor
// fetchBigRow fetches a valid row from big Exec and returns a bool value that means if it is matched.
fetchBigRow() (*Row, bool, error)
// prepare reads all records from small Exec and stores them.
prepare() error
// doJoin fetches a row from big exec and a bool value that means if it's matched with big filter,
// then get all the rows matches the on condition.
doJoin(*Row, bool) ([]*Row, error)
}
// NestedLoopJoinExec implements nested-loop algorithm for join.
type NestedLoopJoinExec struct {
innerRows []*Row
cursor int
resultRows []*Row
SmallExec Executor
BigExec Executor
leftSmall bool
prepared bool
Ctx context.Context
SmallFilter expression.CNFExprs
BigFilter expression.CNFExprs
OtherFilter expression.CNFExprs
schema *expression.Schema
outer bool
defaultValues []types.Datum
}
// Schema implements Executor interface.
func (e *NestedLoopJoinExec) Schema() *expression.Schema {
return e.schema
}
// Close implements Executor interface.
func (e *NestedLoopJoinExec) Close() error {
e.resultRows = nil
e.innerRows = nil
e.cursor = 0
e.prepared = false
err := e.BigExec.Close()
if err != nil {
return errors.Trace(err)
}
return e.SmallExec.Close()
}
func (e *NestedLoopJoinExec) fetchBigRow() (*Row, bool, error) {
for {
bigRow, err := e.BigExec.Next()
if err != nil {
return nil, false, errors.Trace(err)
}
if bigRow == nil {
return nil, false, e.BigExec.Close()
}
matched, err := expression.EvalBool(e.BigFilter, bigRow.Data, e.Ctx)
if err != nil {
return nil, false, errors.Trace(err)
}
if matched {
return bigRow, true, nil
} else if e.outer {
return bigRow, false, nil
}
}
}
// prepare runs the first time when 'Next' is called and it reads all data from the small table and stores
// them in a slice.
func (e *NestedLoopJoinExec) prepare() error {
err := e.SmallExec.Close()
if err != nil {
return errors.Trace(err)
}
e.innerRows = e.innerRows[:0]
e.prepared = true
for {
row, err := e.SmallExec.Next()
if err != nil {
return errors.Trace(err)
}
if row == nil {
return e.SmallExec.Close()
}
matched, err := expression.EvalBool(e.SmallFilter, row.Data, e.Ctx)
if err != nil {
return errors.Trace(err)
}
if matched {
e.innerRows = append(e.innerRows, row)
}
}
}
func (e *NestedLoopJoinExec) fillRowWithDefaultValue(bigRow *Row) (returnRow *Row) {
smallRow := &Row{
Data: make([]types.Datum, e.SmallExec.Schema().Len()),
}
copy(smallRow.Data, e.defaultValues)
if e.leftSmall {
returnRow = makeJoinRow(smallRow, bigRow)
} else {
returnRow = makeJoinRow(bigRow, smallRow)
}
return returnRow
}
func (e *NestedLoopJoinExec) doJoin(bigRow *Row, match bool) ([]*Row, error) {
e.resultRows = e.resultRows[0:0]
if !match && e.outer {
row := e.fillRowWithDefaultValue(bigRow)
e.resultRows = append(e.resultRows, row)
return e.resultRows, nil
}
for _, row := range e.innerRows {
var mergedRow *Row
if e.leftSmall {
mergedRow = makeJoinRow(row, bigRow)
} else {
mergedRow = makeJoinRow(bigRow, row)
}
matched, err := expression.EvalBool(e.OtherFilter, mergedRow.Data, e.Ctx)
if err != nil {
return nil, errors.Trace(err)
}
if !matched {
continue
}
e.resultRows = append(e.resultRows, mergedRow)
}
if len(e.resultRows) == 0 && e.outer {
e.resultRows = append(e.resultRows, e.fillRowWithDefaultValue(bigRow))
}
return e.resultRows, nil
}
// Next implements the Executor interface.
func (e *NestedLoopJoinExec) Next() (*Row, error) {
if !e.prepared {
if err := e.prepare(); err != nil {
return nil, errors.Trace(err)
}
}
for {
if e.cursor < len(e.resultRows) {
retRow := e.resultRows[e.cursor]
e.cursor++
return retRow, nil
}
bigRow, match, err := e.fetchBigRow()
if bigRow == nil || err != nil {
return bigRow, errors.Trace(err)
}
e.resultRows, err = e.doJoin(bigRow, match)
if err != nil {
return nil, errors.Trace(err)
}
e.cursor = 0
}
}
// HashSemiJoinExec implements the hash join algorithm for semi join.
type HashSemiJoinExec struct {
hashTable map[string][]*Row
smallHashKey []*expression.Column
bigHashKey []*expression.Column
smallExec Executor
bigExec Executor
prepared bool
ctx context.Context
smallFilter expression.CNFExprs
bigFilter expression.CNFExprs
otherFilter expression.CNFExprs
schema *expression.Schema
resultRows []*Row
// auxMode is a mode that the result row always returns with an extra column which stores a boolean
// or NULL value to indicate if this row is matched.
auxMode bool
targetTypes []*types.FieldType
smallTableHasNull bool
// anti is true, semi join only output the unmatched row.
anti bool
}
// Close implements the Executor Close interface.
func (e *HashSemiJoinExec) Close() error {
e.prepared = false
e.hashTable = make(map[string][]*Row)
e.smallTableHasNull = false
e.resultRows = nil
err := e.smallExec.Close()
if err != nil {
return errors.Trace(err)
}
return e.bigExec.Close()
}
// Schema implements the Executor Schema interface.
func (e *HashSemiJoinExec) Schema() *expression.Schema {
return e.schema
}
// prepare runs the first time when 'Next' is called and it reads all data from the small table and stores
// them in a hash table.
func (e *HashSemiJoinExec) prepare() error {
err := e.smallExec.Close()
if err != nil {
return errors.Trace(err)
}
e.hashTable = make(map[string][]*Row)
sc := e.ctx.GetSessionVars().StmtCtx
e.resultRows = make([]*Row, 1)
for {
row, err := e.smallExec.Next()
if err != nil {
return errors.Trace(err)
}
if row == nil {
e.smallExec.Close()
break
}
matched, err := expression.EvalBool(e.smallFilter, row.Data, e.ctx)
if err != nil {
return errors.Trace(err)
}
if !matched {
continue
}
hasNull, hashcode, err := getJoinKey(sc, e.smallHashKey, row, e.targetTypes, make([]types.Datum, len(e.smallHashKey)), nil)
if err != nil {
return errors.Trace(err)
}
if hasNull {
e.smallTableHasNull = true
continue
}
if rows, ok := e.hashTable[string(hashcode)]; !ok {
e.hashTable[string(hashcode)] = []*Row{row}
} else {
e.hashTable[string(hashcode)] = append(rows, row)
}
}
e.prepared = true
return nil
}
func (e *HashSemiJoinExec) rowIsMatched(bigRow *Row) (matched bool, hasNull bool, err error) {
sc := e.ctx.GetSessionVars().StmtCtx
hasNull, hashcode, err := getJoinKey(sc, e.bigHashKey, bigRow, e.targetTypes, make([]types.Datum, len(e.smallHashKey)), nil)
if err != nil {
return false, false, errors.Trace(err)
}
if hasNull {
return false, true, nil
}
rows, ok := e.hashTable[string(hashcode)]
if !ok {
return
}
// match eq condition
for _, smallRow := range rows {
matchedRow := makeJoinRow(bigRow, smallRow)
matched, err = expression.EvalBool(e.otherFilter, matchedRow.Data, e.ctx)
if err != nil {
return false, false, errors.Trace(err)
}
if matched {
return
}
}
return
}
func (e *HashSemiJoinExec) fetchBigRow() (*Row, bool, error) {
for {
bigRow, err := e.bigExec.Next()
if err != nil {
return nil, false, errors.Trace(err)
}
if bigRow == nil {
return nil, false, errors.Trace(e.bigExec.Close())
}
matched, err := expression.EvalBool(e.bigFilter, bigRow.Data, e.ctx)
if err != nil {
return nil, false, errors.Trace(err)
}
if matched {
return bigRow, true, nil
} else if e.auxMode {
return bigRow, false, nil
}
}
}
func (e *HashSemiJoinExec) doJoin(bigRow *Row, match bool) ([]*Row, error) {
if e.auxMode && !match {
bigRow.Data = append(bigRow.Data, types.NewDatum(false))
e.resultRows[0] = bigRow
return e.resultRows, nil
}
matched, isNull, err := e.rowIsMatched(bigRow)
if err != nil {
return nil, errors.Trace(err)
}
if !matched && e.smallTableHasNull {
isNull = true
}
if e.anti && !isNull {
matched = !matched
}
// For the auxMode subquery, we return the row with a Datum indicating if it's a match,
// For the non-auxMode subquery, we return the matching row only.
if e.auxMode {
if isNull {
bigRow.Data = append(bigRow.Data, types.NewDatum(nil))
} else {
bigRow.Data = append(bigRow.Data, types.NewDatum(matched))
}
matched = true
}
if matched {
e.resultRows[0] = bigRow
return e.resultRows, nil
}
return nil, nil
}
// Next implements the Executor Next interface.
func (e *HashSemiJoinExec) Next() (*Row, error) {
if !e.prepared {
if err := e.prepare(); err != nil {
return nil, errors.Trace(err)
}
}
for {
bigRow, match, err := e.fetchBigRow()
if bigRow == nil || err != nil {
return bigRow, errors.Trace(err)
}
resultRows, err := e.doJoin(bigRow, match)
if err != nil {
return nil, errors.Trace(err)
}
if len(resultRows) > 0 {
return resultRows[0], nil
}
}
}
// ApplyJoinExec is the new logic of apply.
type ApplyJoinExec struct {
join joinExec
outerSchema []*expression.CorrelatedColumn
cursor int
resultRows []*Row
schema *expression.Schema
}
// Schema implements the Executor interface.
func (e *ApplyJoinExec) Schema() *expression.Schema {
return e.schema
}
// Close implements the Executor interface.
func (e *ApplyJoinExec) Close() error {
e.cursor = 0
e.resultRows = nil
return e.join.Close()
}
// Next implements the Executor interface.
func (e *ApplyJoinExec) Next() (*Row, error) {
for {
if e.cursor < len(e.resultRows) {
row := e.resultRows[e.cursor]
e.cursor++
return row, nil
}
bigRow, match, err := e.join.fetchBigRow()
if bigRow == nil || err != nil {
return nil, errors.Trace(err)
}
for _, col := range e.outerSchema {
*col.Data = bigRow.Data[col.Index]
}
err = e.join.prepare()
if err != nil {
return nil, errors.Trace(err)
}
e.resultRows, err = e.join.doJoin(bigRow, match)
if err != nil {
return nil, errors.Trace(err)
}
e.cursor = 0
}
}
| executor/join.go | 1 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.02505090832710266,
0.0010054289596155286,
0.00016575530753470957,
0.000173562511918135,
0.0031205182895064354
] |
{
"id": 2,
"code_window": [
"\ttime.Sleep(100 * time.Millisecond)\n",
"\tresult.Close()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func (s *testSuite) TestHashJoinExecEncodeDecodeRow(c *C) {\n",
"\ttk := testkit.NewTestKit(c, s.store)\n",
"\ttk.MustExec(\"use test\")\n",
"\ttk.MustExec(\"drop table if exists t1\")\n",
"\ttk.MustExec(\"drop table if exists t2\")\n",
"\ttk.MustExec(\"create table t1 (id int)\")\n",
"\ttk.MustExec(\"create table t2 (id int, name varchar(255), ts timestamp)\")\n",
"\ttk.MustExec(\"insert into t1 values (1)\")\n",
"\ttk.MustExec(\"insert into t2 values (1, 'xxx', '2003-06-09 10:51:26')\")\n",
"\tresult := tk.MustQuery(\"select ts from t1 inner join t2 where t2.name = 'xxx'\")\n",
"\tresult.Check(testkit.Rows(\"2003-06-09 10:51:26\"))\n",
"}"
],
"file_path": "executor/join_test.go",
"type": "add",
"edit_start_line_idx": 553
} | // Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"io"
"github.com/golang/protobuf/proto"
)
// WriteDelimited encodes and dumps a message to the provided writer prefixed
// with a 32-bit varint indicating the length of the encoded message, producing
// a length-delimited record stream, which can be used to chain together
// encoded messages of the same type together in a file. It returns the total
// number of bytes written and any applicable error. This is roughly
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
buffer, err := proto.Marshal(m)
if err != nil {
return 0, err
}
var buf [binary.MaxVarintLen32]byte
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
sync, err := w.Write(buf[:encodedLength])
if err != nil {
return sync, err
}
n, err = w.Write(buffer)
return n + sync, err
}
| _vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.00022695724328514189,
0.00018237685435451567,
0.00016727286856621504,
0.00017365353414788842,
0.000022506348614115268
] |
{
"id": 2,
"code_window": [
"\ttime.Sleep(100 * time.Millisecond)\n",
"\tresult.Close()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func (s *testSuite) TestHashJoinExecEncodeDecodeRow(c *C) {\n",
"\ttk := testkit.NewTestKit(c, s.store)\n",
"\ttk.MustExec(\"use test\")\n",
"\ttk.MustExec(\"drop table if exists t1\")\n",
"\ttk.MustExec(\"drop table if exists t2\")\n",
"\ttk.MustExec(\"create table t1 (id int)\")\n",
"\ttk.MustExec(\"create table t2 (id int, name varchar(255), ts timestamp)\")\n",
"\ttk.MustExec(\"insert into t1 values (1)\")\n",
"\ttk.MustExec(\"insert into t2 values (1, 'xxx', '2003-06-09 10:51:26')\")\n",
"\tresult := tk.MustQuery(\"select ts from t1 inner join t2 where t2.name = 'xxx'\")\n",
"\tresult.Check(testkit.Rows(\"2003-06-09 10:51:26\"))\n",
"}"
],
"file_path": "executor/join_test.go",
"type": "add",
"edit_start_line_idx": 553
} | // Copyright 2015 The parser Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run generate.go
//go:generate golex -o goscanner.go go.l
//go:generate golex -o scanner.go y.l
//go:generate go run generate.go -2
// Package parser implements a parser for yacc source files.
//
// Note: Rule.Body element's type
//
// int Eg. 65 represents literal 'A'
//
// string Eg. "Start" represents rule component Start
//
// *Action Mid rule action or rule semantic action
package parser
import (
"bytes"
"fmt"
"go/token"
"github.com/cznic/golex/lex"
)
const (
// ActionValueGo is used for a Go code fragment
ActionValueGo = iota
// ActionValueDlrDlr is used for $$.
ActionValueDlrDlr
// ActionValueDlrTagDlr is used for $<tag>$.
ActionValueDlrTagDlr
// ActionValueDlrNum is used for $num.
ActionValueDlrNum
// ActionValueDlrTagNum is used for $<tag>num.
ActionValueDlrTagNum
)
// ActionValue is an item of Action.Value
type ActionValue struct {
Num int // The number in $num.
Pos token.Pos // Position of the start of the ActionValue.
Src string // Source for this value.
Tag string // The tag in $<tag>$ or $<tag>num.
Type int // One of ActionValue{Go,DlrDlr,DlrTagDlr,DlrNum,DlrTagNum} constants.
}
// Token captures a lexem with position, value and comments, if any.
type Token struct {
Comments []string
Val string
File *token.File
lex.Char
}
// Pos retruns the token.Pos for t.
func (t *Token) Pos() token.Pos { return t.Char.Pos() }
// Position returns the token.Position for t
func (t *Token) Position() token.Position { return t.File.Position(t.Pos()) }
// Strings implements fmt.Stringer.
func (t *Token) String() string {
return fmt.Sprintf("%v: %v %q, Comments: %q", t.File.Position(t.Char.Pos()), yySymName(int(t.Char.Rune)), t.Val, t.Comments)
}
// Parse parses src as a single yacc source file fname and returns the
// corresponding Specification. If the source couldn't be read, the returned
// Specification is nil and the error indicates all of the specific failures.
func Parse(fset *token.FileSet, fname string, src []byte) (s *Specification, err error) {
r := bytes.NewBuffer(src)
file := fset.AddFile(fname, -1, len(src))
lx, err := newLexer(file, r)
if err != nil {
return nil, err
}
y := yyParse(lx)
n := len(lx.errors)
if y != 0 || n != 0 {
if n == 0 {
panic("internal error")
}
return nil, lx.errors
}
return lx.spec, nil
}
| _vendor/src/github.com/cznic/parser/yacc/api.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.14124539494514465,
0.014280836097896099,
0.00016578486247453839,
0.000172218686202541,
0.04232151806354523
] |
{
"id": 2,
"code_window": [
"\ttime.Sleep(100 * time.Millisecond)\n",
"\tresult.Close()\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func (s *testSuite) TestHashJoinExecEncodeDecodeRow(c *C) {\n",
"\ttk := testkit.NewTestKit(c, s.store)\n",
"\ttk.MustExec(\"use test\")\n",
"\ttk.MustExec(\"drop table if exists t1\")\n",
"\ttk.MustExec(\"drop table if exists t2\")\n",
"\ttk.MustExec(\"create table t1 (id int)\")\n",
"\ttk.MustExec(\"create table t2 (id int, name varchar(255), ts timestamp)\")\n",
"\ttk.MustExec(\"insert into t1 values (1)\")\n",
"\ttk.MustExec(\"insert into t2 values (1, 'xxx', '2003-06-09 10:51:26')\")\n",
"\tresult := tk.MustQuery(\"select ts from t1 inner join t2 where t2.name = 'xxx'\")\n",
"\tresult.Check(testkit.Rows(\"2003-06-09 10:51:26\"))\n",
"}"
],
"file_path": "executor/join_test.go",
"type": "add",
"edit_start_line_idx": 553
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"fmt"
"math"
"math/rand"
"time"
"github.com/juju/errors"
"github.com/ngaut/log"
goctx "golang.org/x/net/context"
)
const (
// NoJitter makes the backoff sequence strict exponential.
NoJitter = 1 + iota
// FullJitter applies random factors to strict exponential.
FullJitter
// EqualJitter is also randomized, but prevents very short sleeps.
EqualJitter
// DecorrJitter increases the maximum jitter based on the last random value.
DecorrJitter
)
// NewBackoffFn creates a backoff func which implements exponential backoff with
// optional jitters.
// See http://www.awsarchitectureblog.com/2015/03/backoff.html
func NewBackoffFn(base, cap, jitter int) func() int {
attempts := 0
lastSleep := base
return func() int {
var sleep int
switch jitter {
case NoJitter:
sleep = expo(base, cap, attempts)
case FullJitter:
v := expo(base, cap, attempts)
sleep = rand.Intn(v)
case EqualJitter:
v := expo(base, cap, attempts)
sleep = v/2 + rand.Intn(v/2)
case DecorrJitter:
sleep = int(math.Min(float64(cap), float64(base+rand.Intn(lastSleep*3-base))))
}
time.Sleep(time.Duration(sleep) * time.Millisecond)
attempts++
lastSleep = sleep
return lastSleep
}
}
func expo(base, cap, n int) int {
return int(math.Min(float64(cap), float64(base)*math.Pow(2.0, float64(n))))
}
type backoffType int
const (
boTiKVRPC backoffType = iota
boTxnLock
boTxnLockFast
boPDRPC
boRegionMiss
boServerBusy
)
func (t backoffType) createFn() func() int {
switch t {
case boTiKVRPC:
return NewBackoffFn(100, 2000, EqualJitter)
case boTxnLock:
return NewBackoffFn(200, 3000, EqualJitter)
case boTxnLockFast:
return NewBackoffFn(100, 3000, EqualJitter)
case boPDRPC:
return NewBackoffFn(500, 3000, EqualJitter)
case boRegionMiss:
return NewBackoffFn(100, 500, NoJitter)
case boServerBusy:
return NewBackoffFn(2000, 10000, EqualJitter)
}
return nil
}
func (t backoffType) String() string {
switch t {
case boTiKVRPC:
return "tikvRPC"
case boTxnLock:
return "txnLock"
case boTxnLockFast:
return "txnLockFast"
case boPDRPC:
return "pdRPC"
case boRegionMiss:
return "regionMiss"
case boServerBusy:
return "serverBusy"
}
return ""
}
// Maximum total sleep time(in ms) for kv/cop commands.
const (
copBuildTaskMaxBackoff = 5000
tsoMaxBackoff = 5000
scannerNextMaxBackoff = 15000
batchGetMaxBackoff = 15000
copNextMaxBackoff = 15000
getMaxBackoff = 15000
prewriteMaxBackoff = 15000
commitMaxBackoff = 15000
commitPrimaryMaxBackoff = -1
cleanupMaxBackoff = 15000
gcMaxBackoff = 100000
gcResolveLockMaxBackoff = 100000
rawkvMaxBackoff = 15000
)
// Backoffer is a utility for retrying queries.
type Backoffer struct {
fn map[backoffType]func() int
maxSleep int
totalSleep int
errors []error
ctx goctx.Context
types []backoffType
}
// NewBackoffer creates a Backoffer with maximum sleep time(in ms).
func NewBackoffer(maxSleep int, ctx goctx.Context) *Backoffer {
return &Backoffer{
maxSleep: maxSleep,
ctx: ctx,
}
}
// Backoff sleeps a while base on the backoffType and records the error message.
// It returns a retryable error if total sleep time exceeds maxSleep.
func (b *Backoffer) Backoff(typ backoffType, err error) error {
backoffCounter.WithLabelValues(typ.String()).Inc()
// Lazy initialize.
if b.fn == nil {
b.fn = make(map[backoffType]func() int)
}
f, ok := b.fn[typ]
if !ok {
f = typ.createFn()
b.fn[typ] = f
}
b.totalSleep += f()
b.types = append(b.types, typ)
log.Debugf("%v, retry later(totalSleep %dms, maxSleep %dms)", err, b.totalSleep, b.maxSleep)
b.errors = append(b.errors, err)
if b.maxSleep > 0 && b.totalSleep >= b.maxSleep {
errMsg := fmt.Sprintf("backoffer.maxSleep %dms is exceeded, errors:", b.maxSleep)
for i, err := range b.errors {
// Print only last 3 errors for non-DEBUG log levels.
if log.GetLogLevel() >= log.LOG_LEVEL_DEBUG || i >= len(b.errors)-3 {
errMsg += "\n" + err.Error()
}
}
return errors.Annotate(errors.New(errMsg), txnRetryableMark)
}
return nil
}
func (b *Backoffer) String() string {
if b.totalSleep == 0 {
return ""
}
return fmt.Sprintf(" backoff(%dms %s)", b.totalSleep, b.types)
}
// Fork creates a new Backoffer which keeps current Backoffer's sleep time and errors, and holds
// a child context of current Backoffer's context.
func (b *Backoffer) Fork() (*Backoffer, goctx.CancelFunc) {
ctx, cancel := goctx.WithCancel(b.ctx)
return &Backoffer{
maxSleep: b.maxSleep,
totalSleep: b.totalSleep,
errors: b.errors,
ctx: ctx,
}, cancel
}
| store/tikv/backoff.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.18995074927806854,
0.009917655028402805,
0.00016682845307514071,
0.00018141913460567594,
0.04028720781207085
] |
{
"id": 3,
"code_window": [
"\t\t\t}\n",
"\t\t} else {\n",
"\t\t\tdata := row[colID]\n",
"\t\t\tft := distsql.FieldTypeFromPBColumn(col)\n",
"\t\t\t// TODO: Should use session's TimeZone instead of UTC.\n",
"\t\t\tdatum, err := tablecodec.DecodeColumnValue(data, ft, time.UTC)\n",
"\t\t\tif err != nil {\n",
"\t\t\t\treturn errors.Trace(err)\n",
"\t\t\t}\n",
"\t\t\tctx.eval.Row[colID] = datum\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tdatum, err := tablecodec.DecodeColumnValue(data, ft, ctx.eval.TimeZone)\n"
],
"file_path": "store/localstore/local_region.go",
"type": "replace",
"edit_start_line_idx": 652
} | package localstore
import (
"bytes"
"container/heap"
"encoding/binary"
"sort"
"time"
"github.com/golang/protobuf/proto"
"github.com/juju/errors"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/distsql/xeval"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/types"
"github.com/pingcap/tipb/go-tipb"
)
// local region server.
type localRegion struct {
id int
store *dbStore
startKey []byte
endKey []byte
}
type regionRequest struct {
Tp int64
data []byte
startKey []byte
endKey []byte
ranges []kv.KeyRange
}
type regionResponse struct {
req *regionRequest
err error
data []byte
// If region missed some request key range, newStartKey and newEndKey is returned.
newStartKey []byte
newEndKey []byte
}
const chunkSize = 64
type sortRow struct {
key []types.Datum
meta tipb.RowMeta
data []byte
}
// topnSorter implements sort.Interface. When all rows have been processed, the topnSorter will sort the whole data in heap.
type topnSorter struct {
orderByItems []*tipb.ByItem
rows []*sortRow
err error
ctx *selectContext
}
func (t *topnSorter) Len() int {
return len(t.rows)
}
func (t *topnSorter) Swap(i, j int) {
t.rows[i], t.rows[j] = t.rows[j], t.rows[i]
}
func (t *topnSorter) Less(i, j int) bool {
for index, by := range t.orderByItems {
v1 := t.rows[i].key[index]
v2 := t.rows[j].key[index]
ret, err := v1.CompareDatum(t.ctx.sc, v2)
if err != nil {
t.err = errors.Trace(err)
return true
}
if by.Desc {
ret = -ret
}
if ret < 0 {
return true
} else if ret > 0 {
return false
}
}
return false
}
// topnHeap holds the top n elements using heap structure. It implements heap.Interface.
// When we insert a row, topnHeap will check if the row can become one of the top n element or not.
type topnHeap struct {
topnSorter
// totalCount is equal to the limit count, which means the max size of heap.
totalCount int
// heapSize means the current size of this heap.
heapSize int
}
func (t *topnHeap) Len() int {
return t.heapSize
}
func (t *topnHeap) Push(x interface{}) {
t.rows = append(t.rows, x.(*sortRow))
t.heapSize++
}
func (t *topnHeap) Pop() interface{} {
return nil
}
func (t *topnHeap) Less(i, j int) bool {
for index, by := range t.orderByItems {
v1 := t.rows[i].key[index]
v2 := t.rows[j].key[index]
ret, err := v1.CompareDatum(t.ctx.sc, v2)
if err != nil {
t.err = errors.Trace(err)
return true
}
if by.Desc {
ret = -ret
}
if ret > 0 {
return true
} else if ret < 0 {
return false
}
}
return false
}
// tryToAddRow tries to add a row to heap.
// When this row is not less than any rows in heap, it will never become the top n element.
// Then this function returns false.
func (t *topnHeap) tryToAddRow(row *sortRow) bool {
success := false
if t.heapSize == t.totalCount {
t.rows = append(t.rows, row)
// When this row is less than the top element, it will replace it and adjust the heap structure.
if t.Less(0, t.heapSize) {
t.Swap(0, t.heapSize)
heap.Fix(t, 0)
success = true
}
t.rows = t.rows[:t.heapSize]
} else {
heap.Push(t, row)
success = true
}
return success
}
type selectContext struct {
sel *tipb.SelectRequest
txn kv.Transaction
eval *xeval.Evaluator
whereColumns map[int64]*tipb.ColumnInfo
aggColumns map[int64]*tipb.ColumnInfo
topnColumns map[int64]*tipb.ColumnInfo
groups map[string]bool
groupKeys [][]byte
aggregates []*aggregateFuncExpr
topnHeap *topnHeap
keyRanges []kv.KeyRange
// TODO: Only one of these three flags can be true at the same time. We should set this as an enum var.
aggregate bool
descScan bool
topn bool
// Use for DecodeRow.
colTps map[int64]*types.FieldType
chunks []tipb.Chunk
sc *variable.StatementContext
}
func (rs *localRegion) Handle(req *regionRequest) (*regionResponse, error) {
resp := ®ionResponse{
req: req,
}
if req.Tp == kv.ReqTypeSelect || req.Tp == kv.ReqTypeIndex {
sel := new(tipb.SelectRequest)
err := proto.Unmarshal(req.data, sel)
if err != nil {
return nil, errors.Trace(err)
}
txn := newTxn(rs.store, kv.Version{Ver: uint64(sel.StartTs)})
ctx := &selectContext{
sel: sel,
txn: txn,
keyRanges: req.ranges,
sc: xeval.FlagsToStatementContext(sel.Flags),
}
loc := time.FixedZone("UTC", int(sel.TimeZoneOffset))
ctx.eval = xeval.NewEvaluator(ctx.sc, loc)
if sel.Where != nil {
ctx.whereColumns = make(map[int64]*tipb.ColumnInfo)
collectColumnsInExpr(sel.Where, ctx, ctx.whereColumns)
}
if len(sel.OrderBy) > 0 {
if sel.OrderBy[0].Expr == nil {
ctx.descScan = sel.OrderBy[0].Desc
} else {
if sel.Limit == nil {
return nil, errors.New("we don't support pushing down Sort without Limit")
}
ctx.topn = true
ctx.topnHeap = &topnHeap{
totalCount: int(*sel.Limit),
topnSorter: topnSorter{
orderByItems: sel.OrderBy,
ctx: ctx,
},
}
ctx.topnColumns = make(map[int64]*tipb.ColumnInfo)
for _, item := range sel.OrderBy {
collectColumnsInExpr(item.Expr, ctx, ctx.topnColumns)
}
for k := range ctx.whereColumns {
// It will be handled in where.
delete(ctx.topnColumns, k)
}
}
}
ctx.aggregate = len(sel.Aggregates) > 0 || len(sel.GetGroupBy()) > 0
if ctx.aggregate {
// compose aggregateFuncExpr
ctx.aggregates = make([]*aggregateFuncExpr, 0, len(sel.Aggregates))
ctx.aggColumns = make(map[int64]*tipb.ColumnInfo)
for _, agg := range sel.Aggregates {
aggExpr := &aggregateFuncExpr{expr: agg}
ctx.aggregates = append(ctx.aggregates, aggExpr)
collectColumnsInExpr(agg, ctx, ctx.aggColumns)
}
ctx.groups = make(map[string]bool)
ctx.groupKeys = make([][]byte, 0)
for _, item := range ctx.sel.GetGroupBy() {
collectColumnsInExpr(item.Expr, ctx, ctx.aggColumns)
}
for k := range ctx.whereColumns {
// It will be handled in where.
delete(ctx.aggColumns, k)
}
}
if req.Tp == kv.ReqTypeSelect {
err = rs.getRowsFromSelectReq(ctx)
} else {
// The PKHandle column info has been collected in ctx, so we can remove it in IndexInfo.
length := len(sel.IndexInfo.Columns)
if sel.IndexInfo.Columns[length-1].GetPkHandle() {
sel.IndexInfo.Columns = sel.IndexInfo.Columns[:length-1]
}
err = rs.getRowsFromIndexReq(ctx)
}
if ctx.topn {
rs.setTopNDataForCtx(ctx)
}
selResp := new(tipb.SelectResponse)
selResp.Error = toPBError(err)
selResp.Chunks = ctx.chunks
resp.err = err
data, err := proto.Marshal(selResp)
if err != nil {
return nil, errors.Trace(err)
}
resp.data = data
}
if bytes.Compare(rs.startKey, req.startKey) < 0 || bytes.Compare(rs.endKey, req.endKey) > 0 {
resp.newStartKey = rs.startKey
resp.newEndKey = rs.endKey
}
return resp, nil
}
func (rs *localRegion) setTopNDataForCtx(ctx *selectContext) {
sort.Sort(&ctx.topnHeap.topnSorter)
for _, row := range ctx.topnHeap.rows {
chunk := rs.getChunk(ctx)
chunk.RowsData = append(chunk.RowsData, row.data...)
chunk.RowsMeta = append(chunk.RowsMeta, row.meta)
}
}
func collectColumnsInExpr(expr *tipb.Expr, ctx *selectContext, collector map[int64]*tipb.ColumnInfo) error {
if expr == nil {
return nil
}
if expr.GetTp() == tipb.ExprType_ColumnRef {
_, i, err := codec.DecodeInt(expr.Val)
if err != nil {
return errors.Trace(err)
}
var columns []*tipb.ColumnInfo
if ctx.sel.TableInfo != nil {
columns = ctx.sel.TableInfo.Columns
} else {
columns = ctx.sel.IndexInfo.Columns
}
for _, c := range columns {
if c.GetColumnId() == i {
collector[i] = c
return nil
}
}
return xeval.ErrInvalid.Gen("column %d not found", i)
}
for _, child := range expr.Children {
err := collectColumnsInExpr(child, ctx, collector)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (rs *localRegion) getRowsFromSelectReq(ctx *selectContext) error {
// Init ctx.colTps and use it to decode all the rows.
columns := ctx.sel.TableInfo.Columns
ctx.colTps = make(map[int64]*types.FieldType, len(columns))
for _, col := range columns {
if col.GetPkHandle() {
continue
}
ctx.colTps[col.GetColumnId()] = distsql.FieldTypeFromPBColumn(col)
}
kvRanges := rs.extractKVRanges(ctx)
limit := int64(-1)
if ctx.sel.Limit != nil {
limit = ctx.sel.GetLimit()
}
for _, ran := range kvRanges {
if limit == 0 {
break
}
count, err := rs.getRowsFromRange(ctx, ran, limit, ctx.descScan)
if err != nil {
return errors.Trace(err)
}
limit -= count
}
if ctx.aggregate {
return rs.getRowsFromAgg(ctx)
}
return nil
}
/*
* Convert aggregate partial result to rows.
* Data layout example:
* SQL: select count(c1), sum(c2), avg(c3) from t;
* Aggs: count(c1), sum(c2), avg(c3)
* Rows: groupKey1, count1, value2, count3, value3
* groupKey2, count1, value2, count3, value3
*/
func (rs *localRegion) getRowsFromAgg(ctx *selectContext) error {
for _, gk := range ctx.groupKeys {
chunk := rs.getChunk(ctx)
// Each aggregate partial result will be converted to one or two datums.
rowData := make([]types.Datum, 0, 1+2*len(ctx.aggregates))
// The first column is group key.
rowData = append(rowData, types.NewBytesDatum(gk))
for _, agg := range ctx.aggregates {
agg.currentGroup = gk
ds, err := agg.toDatums(ctx)
if err != nil {
return errors.Trace(err)
}
rowData = append(rowData, ds...)
}
var err error
beforeLen := len(chunk.RowsData)
chunk.RowsData, err = codec.EncodeValue(chunk.RowsData, rowData...)
if err != nil {
return errors.Trace(err)
}
var rowMeta tipb.RowMeta
rowMeta.Length = int64(len(chunk.RowsData) - beforeLen)
chunk.RowsMeta = append(chunk.RowsMeta, rowMeta)
}
return nil
}
// extractKVRanges extracts kv.KeyRanges slice from ctx.keyRanges, and also returns if it is in descending order.
func (rs *localRegion) extractKVRanges(ctx *selectContext) (kvRanges []kv.KeyRange) {
for _, kran := range ctx.keyRanges {
upperKey := kran.EndKey
if bytes.Compare(upperKey, rs.startKey) <= 0 {
continue
}
lowerKey := kran.StartKey
if bytes.Compare(lowerKey, rs.endKey) >= 0 {
break
}
var kvr kv.KeyRange
if bytes.Compare(lowerKey, rs.startKey) <= 0 {
kvr.StartKey = rs.startKey
} else {
kvr.StartKey = lowerKey
}
if bytes.Compare(upperKey, rs.endKey) <= 0 {
kvr.EndKey = upperKey
} else {
kvr.EndKey = rs.endKey
}
kvRanges = append(kvRanges, kvr)
}
if ctx.descScan {
reverseKVRanges(kvRanges)
}
return
}
func (rs *localRegion) getRowsFromRange(ctx *selectContext, ran kv.KeyRange, limit int64, desc bool) (count int64, err error) {
if limit == 0 {
return 0, nil
}
if ran.IsPoint() {
var value []byte
value, err = ctx.txn.Get(ran.StartKey)
if terror.ErrorEqual(err, kv.ErrNotExist) {
return 0, nil
} else if err != nil {
return 0, errors.Trace(err)
}
var h int64
h, err = tablecodec.DecodeRowKey(ran.StartKey)
if err != nil {
return 0, errors.Trace(err)
}
gotRow, err1 := rs.handleRowData(ctx, h, value)
if err1 != nil {
return 0, errors.Trace(err1)
}
if gotRow {
count++
}
return
}
var seekKey kv.Key
if desc {
seekKey = ran.EndKey
} else {
seekKey = ran.StartKey
}
for {
if limit == 0 {
break
}
var (
it kv.Iterator
err error
)
if desc {
it, err = ctx.txn.SeekReverse(seekKey)
} else {
it, err = ctx.txn.Seek(seekKey)
}
if err != nil {
return 0, errors.Trace(err)
}
if !it.Valid() {
break
}
if desc {
if it.Key().Cmp(ran.StartKey) < 0 {
break
}
seekKey = tablecodec.TruncateToRowKeyLen(it.Key())
} else {
if it.Key().Cmp(ran.EndKey) >= 0 {
break
}
seekKey = it.Key().PrefixNext()
}
h, err := tablecodec.DecodeRowKey(it.Key())
if err != nil {
return 0, errors.Trace(err)
}
gotRow, err := rs.handleRowData(ctx, h, it.Value())
if err != nil {
return 0, errors.Trace(err)
}
if gotRow {
limit--
count++
}
}
return count, nil
}
// handleRowData deals with raw row data:
// 1. Decodes row from raw byte slice.
// 2. Checks if it fit where condition.
// 3. Update aggregate functions.
// returns true if got a row.
func (rs *localRegion) handleRowData(ctx *selectContext, handle int64, value []byte) (bool, error) {
columns := ctx.sel.TableInfo.Columns
values, err := rs.getRowData(value, ctx.colTps)
if err != nil {
return false, errors.Trace(err)
}
// Fill handle and null columns.
for _, col := range columns {
if col.GetPkHandle() {
var handleDatum types.Datum
if mysql.HasUnsignedFlag(uint(col.Flag)) {
// PK column is Unsigned
handleDatum = types.NewUintDatum(uint64(handle))
} else {
handleDatum = types.NewIntDatum(handle)
}
handleData, err1 := codec.EncodeValue(nil, handleDatum)
if err1 != nil {
return false, errors.Trace(err1)
}
values[col.GetColumnId()] = handleData
} else {
_, ok := values[col.GetColumnId()]
if ok {
continue
}
if len(col.DefaultVal) > 0 {
values[col.GetColumnId()] = col.DefaultVal
continue
}
if mysql.HasNotNullFlag(uint(col.Flag)) {
return false, errors.New("Miss column")
}
values[col.GetColumnId()] = []byte{codec.NilFlag}
}
}
return rs.valuesToRow(ctx, handle, values)
}
// evalTopN evaluates the top n elements from the data. The input receives a record including its handle and data.
// And this function will check if this record can replace one of the old records.
func (rs *localRegion) evalTopN(ctx *selectContext, handle int64, values map[int64][]byte, columns []*tipb.ColumnInfo) error {
err := rs.setColumnValueToCtx(ctx, handle, values, ctx.topnColumns)
if err != nil {
return errors.Trace(err)
}
newRow := &sortRow{
meta: tipb.RowMeta{Handle: handle},
}
for _, item := range ctx.topnHeap.orderByItems {
result, err := ctx.eval.Eval(item.Expr)
if err != nil {
return errors.Trace(err)
}
newRow.key = append(newRow.key, result)
}
if ctx.topnHeap.tryToAddRow(newRow) {
for _, col := range columns {
val := values[col.GetColumnId()]
newRow.data = append(newRow.data, val...)
newRow.meta.Length += int64(len(val))
}
}
return errors.Trace(ctx.topnHeap.err)
}
func (rs *localRegion) valuesToRow(ctx *selectContext, handle int64, values map[int64][]byte) (bool, error) {
var columns []*tipb.ColumnInfo
if ctx.sel.TableInfo != nil {
columns = ctx.sel.TableInfo.Columns
} else {
columns = ctx.sel.IndexInfo.Columns
}
// Evaluate where
match, err := rs.evalWhereForRow(ctx, handle, values)
if err != nil {
return false, errors.Trace(err)
}
if !match {
return false, nil
}
if ctx.topn {
return false, errors.Trace(rs.evalTopN(ctx, handle, values, columns))
}
if ctx.aggregate {
// Update aggregate functions.
err = rs.aggregate(ctx, handle, values)
if err != nil {
return false, errors.Trace(err)
}
return false, nil
}
chunk := rs.getChunk(ctx)
var rowMeta tipb.RowMeta
rowMeta.Handle = handle
// If without aggregate functions, just return raw row data.
for _, col := range columns {
val := values[col.GetColumnId()]
rowMeta.Length += int64(len(val))
chunk.RowsData = append(chunk.RowsData, val...)
}
chunk.RowsMeta = append(chunk.RowsMeta, rowMeta)
return true, nil
}
func (rs *localRegion) getChunk(ctx *selectContext) *tipb.Chunk {
chunkLen := len(ctx.chunks)
if chunkLen == 0 || len(ctx.chunks[chunkLen-1].RowsMeta) >= chunkSize {
newChunk := tipb.Chunk{}
ctx.chunks = append(ctx.chunks, newChunk)
}
return &ctx.chunks[len(ctx.chunks)-1]
}
func (rs *localRegion) getRowData(value []byte, colTps map[int64]*types.FieldType) (map[int64][]byte, error) {
res, err := tablecodec.CutRow(value, colTps)
if err != nil {
return nil, errors.Trace(err)
}
if res == nil {
res = make(map[int64][]byte, len(colTps))
}
return res, nil
}
// Put column values into ctx, the values will be used for expr evaluation.
func (rs *localRegion) setColumnValueToCtx(ctx *selectContext, h int64, row map[int64][]byte, cols map[int64]*tipb.ColumnInfo) error {
for colID, col := range cols {
if col.GetPkHandle() {
if mysql.HasUnsignedFlag(uint(col.GetFlag())) {
ctx.eval.Row[colID] = types.NewUintDatum(uint64(h))
} else {
ctx.eval.Row[colID] = types.NewIntDatum(h)
}
} else {
data := row[colID]
ft := distsql.FieldTypeFromPBColumn(col)
// TODO: Should use session's TimeZone instead of UTC.
datum, err := tablecodec.DecodeColumnValue(data, ft, time.UTC)
if err != nil {
return errors.Trace(err)
}
ctx.eval.Row[colID] = datum
}
}
return nil
}
func (rs *localRegion) evalWhereForRow(ctx *selectContext, h int64, row map[int64][]byte) (bool, error) {
if ctx.sel.Where == nil {
return true, nil
}
err := rs.setColumnValueToCtx(ctx, h, row, ctx.whereColumns)
if err != nil {
return false, errors.Trace(err)
}
result, err := ctx.eval.Eval(ctx.sel.Where)
if err != nil {
return false, errors.Trace(err)
}
if result.IsNull() {
return false, nil
}
boolResult, err := result.ToBool(ctx.sc)
if err != nil {
return false, errors.Trace(err)
}
return boolResult == 1, nil
}
func toPBError(err error) *tipb.Error {
if err == nil {
return nil
}
perr := new(tipb.Error)
code := int32(1)
perr.Code = code
errStr := err.Error()
perr.Msg = errStr
return perr
}
func (rs *localRegion) getRowsFromIndexReq(ctx *selectContext) error {
kvRanges := rs.extractKVRanges(ctx)
limit := int64(-1)
if ctx.sel.Limit != nil {
limit = ctx.sel.GetLimit()
}
for _, ran := range kvRanges {
if limit == 0 {
break
}
count, err := rs.getIndexRowFromRange(ctx, ran, ctx.descScan, limit)
if err != nil {
return errors.Trace(err)
}
limit -= int64(count)
}
if ctx.aggregate {
return rs.getRowsFromAgg(ctx)
}
return nil
}
func reverseKVRanges(kvRanges []kv.KeyRange) {
for i := 0; i < len(kvRanges)/2; i++ {
j := len(kvRanges) - i - 1
kvRanges[i], kvRanges[j] = kvRanges[j], kvRanges[i]
}
}
func (rs *localRegion) getIndexRowFromRange(ctx *selectContext, ran kv.KeyRange, desc bool, limit int64) (count int64, err error) {
idxInfo := ctx.sel.IndexInfo
txn := ctx.txn
var seekKey kv.Key
if desc {
seekKey = ran.EndKey
} else {
seekKey = ran.StartKey
}
ids := make([]int64, len(idxInfo.Columns))
for i, col := range idxInfo.Columns {
ids[i] = col.GetColumnId()
}
for {
if limit == 0 {
break
}
var it kv.Iterator
if desc {
it, err = txn.SeekReverse(seekKey)
if err != nil {
return 0, errors.Trace(err)
}
seekKey = it.Key()
} else {
it, err = txn.Seek(seekKey)
if err != nil {
return 0, errors.Trace(err)
}
seekKey = it.Key().PrefixNext()
}
if !it.Valid() {
break
}
if desc {
if it.Key().Cmp(ran.StartKey) < 0 {
break
}
} else {
if it.Key().Cmp(ran.EndKey) >= 0 {
break
}
}
values, b, err1 := tablecodec.CutIndexKey(it.Key(), ids)
if err1 != nil {
return 0, errors.Trace(err1)
}
var handle int64
if len(b) > 0 {
var handleDatum types.Datum
_, handleDatum, err = codec.DecodeOne(b)
if err != nil {
return 0, errors.Trace(err)
}
handle = handleDatum.GetInt64()
} else {
handle, err = decodeHandle(it.Value())
if err != nil {
return 0, errors.Trace(err)
}
}
gotRow, err := rs.valuesToRow(ctx, handle, values)
if err != nil {
return 0, errors.Trace(err)
}
if gotRow {
limit--
count++
}
}
return count, nil
}
func decodeHandle(data []byte) (int64, error) {
var h int64
buf := bytes.NewBuffer(data)
err := binary.Read(buf, binary.BigEndian, &h)
return h, errors.Trace(err)
}
func buildLocalRegionServers(store *dbStore) []*localRegion {
return []*localRegion{
{
id: 1,
store: store,
startKey: []byte(""),
endKey: []byte("t"),
},
{
id: 2,
store: store,
startKey: []byte("t"),
endKey: []byte("u"),
},
{
id: 3,
store: store,
startKey: []byte("u"),
endKey: []byte("z"),
},
}
}
func isDefaultNull(err error, col *tipb.ColumnInfo) bool {
return terror.ErrorEqual(err, kv.ErrNotExist) && !mysql.HasNotNullFlag(uint(col.GetFlag()))
}
| store/localstore/local_region.go | 1 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.9983590245246887,
0.012632719241082668,
0.00015785143477842212,
0.00018262758385390043,
0.10820837318897247
] |
{
"id": 3,
"code_window": [
"\t\t\t}\n",
"\t\t} else {\n",
"\t\t\tdata := row[colID]\n",
"\t\t\tft := distsql.FieldTypeFromPBColumn(col)\n",
"\t\t\t// TODO: Should use session's TimeZone instead of UTC.\n",
"\t\t\tdatum, err := tablecodec.DecodeColumnValue(data, ft, time.UTC)\n",
"\t\t\tif err != nil {\n",
"\t\t\t\treturn errors.Trace(err)\n",
"\t\t\t}\n",
"\t\t\tctx.eval.Row[colID] = datum\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tdatum, err := tablecodec.DecodeColumnValue(data, ft, ctx.eval.TimeZone)\n"
],
"file_path": "store/localstore/local_region.go",
"type": "replace",
"edit_start_line_idx": 652
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"github.com/juju/errors"
"github.com/pingcap/tidb"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/types"
)
// TiDBDriver implements IDriver.
type TiDBDriver struct {
store kv.Storage
}
// NewTiDBDriver creates a new TiDBDriver.
func NewTiDBDriver(store kv.Storage) *TiDBDriver {
driver := &TiDBDriver{
store: store,
}
return driver
}
// TiDBContext implements QueryCtx.
type TiDBContext struct {
session tidb.Session
currentDB string
stmts map[int]*TiDBStatement
}
// TiDBStatement implements PreparedStatement.
type TiDBStatement struct {
id uint32
numParams int
boundParams [][]byte
paramsType []byte
ctx *TiDBContext
}
// ID implements PreparedStatement ID method.
func (ts *TiDBStatement) ID() int {
return int(ts.id)
}
// Execute implements PreparedStatement Execute method.
func (ts *TiDBStatement) Execute(args ...interface{}) (rs ResultSet, err error) {
tidbRecordset, err := ts.ctx.session.ExecutePreparedStmt(ts.id, args...)
if err != nil {
return nil, errors.Trace(err)
}
if tidbRecordset == nil {
return
}
rs = &tidbResultSet{
recordSet: tidbRecordset,
}
return
}
// AppendParam implements PreparedStatement AppendParam method.
func (ts *TiDBStatement) AppendParam(paramID int, data []byte) error {
if paramID >= len(ts.boundParams) {
return mysql.NewErr(mysql.ErrWrongArguments, "stmt_send_longdata")
}
ts.boundParams[paramID] = append(ts.boundParams[paramID], data...)
return nil
}
// NumParams implements PreparedStatement NumParams method.
func (ts *TiDBStatement) NumParams() int {
return ts.numParams
}
// BoundParams implements PreparedStatement BoundParams method.
func (ts *TiDBStatement) BoundParams() [][]byte {
return ts.boundParams
}
// SetParamsType implements PreparedStatement SetParamsType method.
func (ts *TiDBStatement) SetParamsType(paramsType []byte) {
ts.paramsType = paramsType
}
// GetParamsType implements PreparedStatement GetParamsType method.
func (ts *TiDBStatement) GetParamsType() []byte {
return ts.paramsType
}
// Reset implements PreparedStatement Reset method.
func (ts *TiDBStatement) Reset() {
for i := range ts.boundParams {
ts.boundParams[i] = nil
}
}
// Close implements PreparedStatement Close method.
func (ts *TiDBStatement) Close() error {
//TODO close at tidb level
err := ts.ctx.session.DropPreparedStmt(ts.id)
if err != nil {
return errors.Trace(err)
}
delete(ts.ctx.stmts, int(ts.id))
return nil
}
// OpenCtx implements IDriver.
func (qd *TiDBDriver) OpenCtx(connID uint64, capability uint32, collation uint8, dbname string) (QueryCtx, error) {
session, err := tidb.CreateSession(qd.store)
if err != nil {
return nil, errors.Trace(err)
}
session.SetClientCapability(capability)
session.SetConnectionID(connID)
if dbname != "" {
_, err = session.Execute("use " + dbname)
if err != nil {
return nil, errors.Trace(err)
}
}
tc := &TiDBContext{
session: session,
currentDB: dbname,
stmts: make(map[int]*TiDBStatement),
}
return tc, nil
}
// Status implements QueryCtx Status method.
func (tc *TiDBContext) Status() uint16 {
return tc.session.Status()
}
// LastInsertID implements QueryCtx LastInsertID method.
func (tc *TiDBContext) LastInsertID() uint64 {
return tc.session.LastInsertID()
}
// Value implements QueryCtx Value method.
func (tc *TiDBContext) Value(key fmt.Stringer) interface{} {
return tc.session.Value(key)
}
// SetValue implements QueryCtx SetValue method.
func (tc *TiDBContext) SetValue(key fmt.Stringer, value interface{}) {
tc.session.SetValue(key, value)
}
// CommitTxn implements QueryCtx CommitTxn method.
func (tc *TiDBContext) CommitTxn() error {
return tc.session.CommitTxn()
}
// RollbackTxn implements QueryCtx RollbackTxn method.
func (tc *TiDBContext) RollbackTxn() error {
return tc.session.RollbackTxn()
}
// AffectedRows implements QueryCtx AffectedRows method.
func (tc *TiDBContext) AffectedRows() uint64 {
return tc.session.AffectedRows()
}
// CurrentDB implements QueryCtx CurrentDB method.
func (tc *TiDBContext) CurrentDB() string {
return tc.currentDB
}
// WarningCount implements QueryCtx WarningCount method.
func (tc *TiDBContext) WarningCount() uint16 {
return tc.session.GetSessionVars().StmtCtx.WarningCount()
}
// Execute implements QueryCtx Execute method.
func (tc *TiDBContext) Execute(sql string) (rs []ResultSet, err error) {
rsList, err := tc.session.Execute(sql)
if err != nil {
return
}
if len(rsList) == 0 { // result ok
return
}
rs = make([]ResultSet, len(rsList))
for i := 0; i < len(rsList); i++ {
rs[i] = &tidbResultSet{
recordSet: rsList[i],
}
}
return
}
// SetSessionManager implements the QueryCtx interface.
func (tc *TiDBContext) SetSessionManager(sm util.SessionManager) {
tc.session.SetSessionManager(sm)
}
// SetClientCapability implements QueryCtx SetClientCapability method.
func (tc *TiDBContext) SetClientCapability(flags uint32) {
tc.session.SetClientCapability(flags)
}
// Close implements QueryCtx Close method.
func (tc *TiDBContext) Close() (err error) {
return tc.session.Close()
}
// Auth implements QueryCtx Auth method.
func (tc *TiDBContext) Auth(user string, auth []byte, salt []byte) bool {
return tc.session.Auth(user, auth, salt)
}
// FieldList implements QueryCtx FieldList method.
func (tc *TiDBContext) FieldList(table string) (colums []*ColumnInfo, err error) {
rs, err := tc.Execute("SELECT * FROM `" + table + "` LIMIT 0")
if err != nil {
return nil, errors.Trace(err)
}
colums, err = rs[0].Columns()
if err != nil {
return nil, errors.Trace(err)
}
return
}
// GetStatement implements QueryCtx GetStatement method.
func (tc *TiDBContext) GetStatement(stmtID int) PreparedStatement {
tcStmt := tc.stmts[stmtID]
if tcStmt != nil {
return tcStmt
}
return nil
}
// Prepare implements QueryCtx Prepare method.
func (tc *TiDBContext) Prepare(sql string) (statement PreparedStatement, columns, params []*ColumnInfo, err error) {
stmtID, paramCount, fields, err := tc.session.PrepareStmt(sql)
if err != nil {
return
}
stmt := &TiDBStatement{
id: stmtID,
numParams: paramCount,
boundParams: make([][]byte, paramCount),
ctx: tc,
}
statement = stmt
columns = make([]*ColumnInfo, len(fields))
for i := range fields {
columns[i] = convertColumnInfo(fields[i])
}
params = make([]*ColumnInfo, paramCount)
for i := range params {
params[i] = &ColumnInfo{
Type: mysql.TypeBlob,
}
}
tc.stmts[int(stmtID)] = stmt
return
}
// ShowProcess implements QueryCtx ShowProcess method.
func (tc *TiDBContext) ShowProcess() util.ProcessInfo {
return tc.session.ShowProcess()
}
// Cancel implements QueryCtx Cancel method.
func (tc *TiDBContext) Cancel() {
tc.session.Cancel()
}
type tidbResultSet struct {
recordSet ast.RecordSet
}
func (trs *tidbResultSet) Next() ([]types.Datum, error) {
row, err := trs.recordSet.Next()
if err != nil {
return nil, errors.Trace(err)
}
if row != nil {
return row.Data, nil
}
return nil, nil
}
func (trs *tidbResultSet) Close() error {
return trs.recordSet.Close()
}
func (trs *tidbResultSet) Columns() ([]*ColumnInfo, error) {
fields, err := trs.recordSet.Fields()
if err != nil {
return nil, errors.Trace(err)
}
var columns []*ColumnInfo
for _, v := range fields {
columns = append(columns, convertColumnInfo(v))
}
return columns, nil
}
func convertColumnInfo(fld *ast.ResultField) (ci *ColumnInfo) {
ci = new(ColumnInfo)
ci.Name = fld.ColumnAsName.O
ci.OrgName = fld.Column.Name.O
ci.Table = fld.TableAsName.O
if fld.Table != nil {
ci.OrgTable = fld.Table.Name.O
}
ci.Schema = fld.DBName.O
ci.Flag = uint16(fld.Column.Flag)
ci.Charset = uint16(mysql.CharsetIDs[fld.Column.Charset])
if fld.Column.Flen == types.UnspecifiedLength {
ci.ColumnLength = 0
} else {
ci.ColumnLength = uint32(fld.Column.Flen)
}
if fld.Column.Decimal == types.UnspecifiedLength {
ci.Decimal = 0
} else {
ci.Decimal = uint8(fld.Column.Decimal)
}
ci.Type = uint8(fld.Column.Tp)
// Keep things compatible for old clients.
// Refer to mysql-server/sql/protocol.cc send_result_set_metadata()
if ci.Type == mysql.TypeVarchar {
ci.Type = mysql.TypeVarString
}
return
}
| server/driver_tidb.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.002478579059243202,
0.0004334005352575332,
0.00016474325093440711,
0.0001788327790563926,
0.0005318112089298666
] |
{
"id": 3,
"code_window": [
"\t\t\t}\n",
"\t\t} else {\n",
"\t\t\tdata := row[colID]\n",
"\t\t\tft := distsql.FieldTypeFromPBColumn(col)\n",
"\t\t\t// TODO: Should use session's TimeZone instead of UTC.\n",
"\t\t\tdatum, err := tablecodec.DecodeColumnValue(data, ft, time.UTC)\n",
"\t\t\tif err != nil {\n",
"\t\t\t\treturn errors.Trace(err)\n",
"\t\t\t}\n",
"\t\t\tctx.eval.Row[colID] = datum\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tdatum, err := tablecodec.DecodeColumnValue(data, ft, ctx.eval.TimeZone)\n"
],
"file_path": "store/localstore/local_region.go",
"type": "replace",
"edit_start_line_idx": 652
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package localstore
import (
"time"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/localstore/engine"
"github.com/pingcap/tidb/util/testleak"
)
var _ = Suite(&testLocalstoreCompactorSuite{})
type testLocalstoreCompactorSuite struct {
}
func count(db engine.DB) int {
var k kv.Key
totalCnt := 0
for {
var err error
k, _, err = db.Seek(k)
if err != nil {
break
}
k = k.Next()
totalCnt++
}
return totalCnt
}
func (s *testLocalstoreCompactorSuite) TestCompactor(c *C) {
defer testleak.AfterTest(c)()
store := createMemStore(time.Now().Nanosecond())
db := store.(*dbStore).db
store.(*dbStore).compactor.Stop()
policy := compactPolicy{
SafePoint: 500,
BatchDeleteCnt: 1,
TriggerInterval: 100 * time.Millisecond,
}
compactor := newLocalCompactor(policy, db)
store.(*dbStore).compactor = compactor
compactor.Start()
txn, _ := store.Begin()
txn.Set([]byte("a"), []byte("1"))
txn.Commit()
txn, _ = store.Begin()
txn.Set([]byte("a"), []byte("2"))
txn.Commit()
txn, _ = store.Begin()
txn.Set([]byte("a"), []byte("3"))
txn.Commit()
txn, _ = store.Begin()
txn.Set([]byte("a"), []byte("3"))
txn.Commit()
txn, _ = store.Begin()
txn.Set([]byte("a"), []byte("4"))
txn.Commit()
txn, _ = store.Begin()
txn.Set([]byte("a"), []byte("5"))
txn.Commit()
t := count(db)
c.Assert(t, Equals, 6)
// Simulating timeout
time.Sleep(1 * time.Second)
// Touch a, tigger GC
txn, _ = store.Begin()
txn.Set([]byte("a"), []byte("b"))
txn.Commit()
time.Sleep(1 * time.Second)
// Do background GC
t = count(db)
c.Assert(t, Equals, 2)
err := store.Close()
c.Assert(err, IsNil)
}
func (s *testLocalstoreCompactorSuite) TestGetAllVersions(c *C) {
defer testleak.AfterTest(c)()
store := createMemStore(time.Now().Nanosecond())
compactor := store.(*dbStore).compactor
txn, _ := store.Begin()
txn.Set([]byte("a"), []byte("1"))
txn.Commit()
txn, _ = store.Begin()
txn.Set([]byte("a"), []byte("2"))
txn.Commit()
txn, _ = store.Begin()
txn.Set([]byte("b"), []byte("1"))
txn.Commit()
txn, _ = store.Begin()
txn.Set([]byte("b"), []byte("2"))
txn.Commit()
keys, err := compactor.getAllVersions([]byte("a"))
c.Assert(err, IsNil)
c.Assert(keys, HasLen, 2)
err = store.Close()
c.Assert(err, IsNil)
}
// TestStartStop is to test `Panic: sync: WaitGroup is reused before previous Wait has returned`
// in Stop function.
func (s *testLocalstoreCompactorSuite) TestStartStop(c *C) {
defer testleak.AfterTest(c)()
store := createMemStore(time.Now().Nanosecond())
db := store.(*dbStore).db
for i := 0; i < 10000; i++ {
policy := compactPolicy{
SafePoint: 500,
BatchDeleteCnt: 1,
TriggerInterval: 100 * time.Millisecond,
}
compactor := newLocalCompactor(policy, db)
compactor.Start()
compactor.Stop()
c.Logf("Test compactor stop and start %d times", i)
}
err := store.Close()
c.Assert(err, IsNil)
}
| store/localstore/compactor_test.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.0001788327790563926,
0.0001728453644318506,
0.00016207392036449164,
0.00017349763948004693,
0.000004703015292761847
] |
{
"id": 3,
"code_window": [
"\t\t\t}\n",
"\t\t} else {\n",
"\t\t\tdata := row[colID]\n",
"\t\t\tft := distsql.FieldTypeFromPBColumn(col)\n",
"\t\t\t// TODO: Should use session's TimeZone instead of UTC.\n",
"\t\t\tdatum, err := tablecodec.DecodeColumnValue(data, ft, time.UTC)\n",
"\t\t\tif err != nil {\n",
"\t\t\t\treturn errors.Trace(err)\n",
"\t\t\t}\n",
"\t\t\tctx.eval.Row[colID] = datum\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tdatum, err := tablecodec.DecodeColumnValue(data, ft, ctx.eval.TimeZone)\n"
],
"file_path": "store/localstore/local_region.go",
"type": "replace",
"edit_start_line_idx": 652
} | // mksysnum_netbsd.pl
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
// +build arm,netbsd
package unix
const (
SYS_EXIT = 1 // { void|sys||exit(int rval); }
SYS_FORK = 2 // { int|sys||fork(void); }
SYS_READ = 3 // { ssize_t|sys||read(int fd, void *buf, size_t nbyte); }
SYS_WRITE = 4 // { ssize_t|sys||write(int fd, const void *buf, size_t nbyte); }
SYS_OPEN = 5 // { int|sys||open(const char *path, int flags, ... mode_t mode); }
SYS_CLOSE = 6 // { int|sys||close(int fd); }
SYS_LINK = 9 // { int|sys||link(const char *path, const char *link); }
SYS_UNLINK = 10 // { int|sys||unlink(const char *path); }
SYS_CHDIR = 12 // { int|sys||chdir(const char *path); }
SYS_FCHDIR = 13 // { int|sys||fchdir(int fd); }
SYS_CHMOD = 15 // { int|sys||chmod(const char *path, mode_t mode); }
SYS_CHOWN = 16 // { int|sys||chown(const char *path, uid_t uid, gid_t gid); }
SYS_BREAK = 17 // { int|sys||obreak(char *nsize); }
SYS_GETPID = 20 // { pid_t|sys||getpid_with_ppid(void); }
SYS_UNMOUNT = 22 // { int|sys||unmount(const char *path, int flags); }
SYS_SETUID = 23 // { int|sys||setuid(uid_t uid); }
SYS_GETUID = 24 // { uid_t|sys||getuid_with_euid(void); }
SYS_GETEUID = 25 // { uid_t|sys||geteuid(void); }
SYS_PTRACE = 26 // { int|sys||ptrace(int req, pid_t pid, void *addr, int data); }
SYS_RECVMSG = 27 // { ssize_t|sys||recvmsg(int s, struct msghdr *msg, int flags); }
SYS_SENDMSG = 28 // { ssize_t|sys||sendmsg(int s, const struct msghdr *msg, int flags); }
SYS_RECVFROM = 29 // { ssize_t|sys||recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); }
SYS_ACCEPT = 30 // { int|sys||accept(int s, struct sockaddr *name, socklen_t *anamelen); }
SYS_GETPEERNAME = 31 // { int|sys||getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); }
SYS_GETSOCKNAME = 32 // { int|sys||getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); }
SYS_ACCESS = 33 // { int|sys||access(const char *path, int flags); }
SYS_CHFLAGS = 34 // { int|sys||chflags(const char *path, u_long flags); }
SYS_FCHFLAGS = 35 // { int|sys||fchflags(int fd, u_long flags); }
SYS_SYNC = 36 // { void|sys||sync(void); }
SYS_KILL = 37 // { int|sys||kill(pid_t pid, int signum); }
SYS_GETPPID = 39 // { pid_t|sys||getppid(void); }
SYS_DUP = 41 // { int|sys||dup(int fd); }
SYS_PIPE = 42 // { int|sys||pipe(void); }
SYS_GETEGID = 43 // { gid_t|sys||getegid(void); }
SYS_PROFIL = 44 // { int|sys||profil(char *samples, size_t size, u_long offset, u_int scale); }
SYS_KTRACE = 45 // { int|sys||ktrace(const char *fname, int ops, int facs, pid_t pid); }
SYS_GETGID = 47 // { gid_t|sys||getgid_with_egid(void); }
SYS___GETLOGIN = 49 // { int|sys||__getlogin(char *namebuf, size_t namelen); }
SYS___SETLOGIN = 50 // { int|sys||__setlogin(const char *namebuf); }
SYS_ACCT = 51 // { int|sys||acct(const char *path); }
SYS_IOCTL = 54 // { int|sys||ioctl(int fd, u_long com, ... void *data); }
SYS_REVOKE = 56 // { int|sys||revoke(const char *path); }
SYS_SYMLINK = 57 // { int|sys||symlink(const char *path, const char *link); }
SYS_READLINK = 58 // { ssize_t|sys||readlink(const char *path, char *buf, size_t count); }
SYS_EXECVE = 59 // { int|sys||execve(const char *path, char * const *argp, char * const *envp); }
SYS_UMASK = 60 // { mode_t|sys||umask(mode_t newmask); }
SYS_CHROOT = 61 // { int|sys||chroot(const char *path); }
SYS_VFORK = 66 // { int|sys||vfork(void); }
SYS_SBRK = 69 // { int|sys||sbrk(intptr_t incr); }
SYS_SSTK = 70 // { int|sys||sstk(int incr); }
SYS_VADVISE = 72 // { int|sys||ovadvise(int anom); }
SYS_MUNMAP = 73 // { int|sys||munmap(void *addr, size_t len); }
SYS_MPROTECT = 74 // { int|sys||mprotect(void *addr, size_t len, int prot); }
SYS_MADVISE = 75 // { int|sys||madvise(void *addr, size_t len, int behav); }
SYS_MINCORE = 78 // { int|sys||mincore(void *addr, size_t len, char *vec); }
SYS_GETGROUPS = 79 // { int|sys||getgroups(int gidsetsize, gid_t *gidset); }
SYS_SETGROUPS = 80 // { int|sys||setgroups(int gidsetsize, const gid_t *gidset); }
SYS_GETPGRP = 81 // { int|sys||getpgrp(void); }
SYS_SETPGID = 82 // { int|sys||setpgid(pid_t pid, pid_t pgid); }
SYS_DUP2 = 90 // { int|sys||dup2(int from, int to); }
SYS_FCNTL = 92 // { int|sys||fcntl(int fd, int cmd, ... void *arg); }
SYS_FSYNC = 95 // { int|sys||fsync(int fd); }
SYS_SETPRIORITY = 96 // { int|sys||setpriority(int which, id_t who, int prio); }
SYS_CONNECT = 98 // { int|sys||connect(int s, const struct sockaddr *name, socklen_t namelen); }
SYS_GETPRIORITY = 100 // { int|sys||getpriority(int which, id_t who); }
SYS_BIND = 104 // { int|sys||bind(int s, const struct sockaddr *name, socklen_t namelen); }
SYS_SETSOCKOPT = 105 // { int|sys||setsockopt(int s, int level, int name, const void *val, socklen_t valsize); }
SYS_LISTEN = 106 // { int|sys||listen(int s, int backlog); }
SYS_GETSOCKOPT = 118 // { int|sys||getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); }
SYS_READV = 120 // { ssize_t|sys||readv(int fd, const struct iovec *iovp, int iovcnt); }
SYS_WRITEV = 121 // { ssize_t|sys||writev(int fd, const struct iovec *iovp, int iovcnt); }
SYS_FCHOWN = 123 // { int|sys||fchown(int fd, uid_t uid, gid_t gid); }
SYS_FCHMOD = 124 // { int|sys||fchmod(int fd, mode_t mode); }
SYS_SETREUID = 126 // { int|sys||setreuid(uid_t ruid, uid_t euid); }
SYS_SETREGID = 127 // { int|sys||setregid(gid_t rgid, gid_t egid); }
SYS_RENAME = 128 // { int|sys||rename(const char *from, const char *to); }
SYS_FLOCK = 131 // { int|sys||flock(int fd, int how); }
SYS_MKFIFO = 132 // { int|sys||mkfifo(const char *path, mode_t mode); }
SYS_SENDTO = 133 // { ssize_t|sys||sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); }
SYS_SHUTDOWN = 134 // { int|sys||shutdown(int s, int how); }
SYS_SOCKETPAIR = 135 // { int|sys||socketpair(int domain, int type, int protocol, int *rsv); }
SYS_MKDIR = 136 // { int|sys||mkdir(const char *path, mode_t mode); }
SYS_RMDIR = 137 // { int|sys||rmdir(const char *path); }
SYS_SETSID = 147 // { int|sys||setsid(void); }
SYS_SYSARCH = 165 // { int|sys||sysarch(int op, void *parms); }
SYS_PREAD = 173 // { ssize_t|sys||pread(int fd, void *buf, size_t nbyte, int PAD, off_t offset); }
SYS_PWRITE = 174 // { ssize_t|sys||pwrite(int fd, const void *buf, size_t nbyte, int PAD, off_t offset); }
SYS_NTP_ADJTIME = 176 // { int|sys||ntp_adjtime(struct timex *tp); }
SYS_SETGID = 181 // { int|sys||setgid(gid_t gid); }
SYS_SETEGID = 182 // { int|sys||setegid(gid_t egid); }
SYS_SETEUID = 183 // { int|sys||seteuid(uid_t euid); }
SYS_PATHCONF = 191 // { long|sys||pathconf(const char *path, int name); }
SYS_FPATHCONF = 192 // { long|sys||fpathconf(int fd, int name); }
SYS_GETRLIMIT = 194 // { int|sys||getrlimit(int which, struct rlimit *rlp); }
SYS_SETRLIMIT = 195 // { int|sys||setrlimit(int which, const struct rlimit *rlp); }
SYS_MMAP = 197 // { void *|sys||mmap(void *addr, size_t len, int prot, int flags, int fd, long PAD, off_t pos); }
SYS_LSEEK = 199 // { off_t|sys||lseek(int fd, int PAD, off_t offset, int whence); }
SYS_TRUNCATE = 200 // { int|sys||truncate(const char *path, int PAD, off_t length); }
SYS_FTRUNCATE = 201 // { int|sys||ftruncate(int fd, int PAD, off_t length); }
SYS___SYSCTL = 202 // { int|sys||__sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, const void *new, size_t newlen); }
SYS_MLOCK = 203 // { int|sys||mlock(const void *addr, size_t len); }
SYS_MUNLOCK = 204 // { int|sys||munlock(const void *addr, size_t len); }
SYS_UNDELETE = 205 // { int|sys||undelete(const char *path); }
SYS_GETPGID = 207 // { pid_t|sys||getpgid(pid_t pid); }
SYS_REBOOT = 208 // { int|sys||reboot(int opt, char *bootstr); }
SYS_POLL = 209 // { int|sys||poll(struct pollfd *fds, u_int nfds, int timeout); }
SYS_SEMGET = 221 // { int|sys||semget(key_t key, int nsems, int semflg); }
SYS_SEMOP = 222 // { int|sys||semop(int semid, struct sembuf *sops, size_t nsops); }
SYS_SEMCONFIG = 223 // { int|sys||semconfig(int flag); }
SYS_MSGGET = 225 // { int|sys||msgget(key_t key, int msgflg); }
SYS_MSGSND = 226 // { int|sys||msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); }
SYS_MSGRCV = 227 // { ssize_t|sys||msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); }
SYS_SHMAT = 228 // { void *|sys||shmat(int shmid, const void *shmaddr, int shmflg); }
SYS_SHMDT = 230 // { int|sys||shmdt(const void *shmaddr); }
SYS_SHMGET = 231 // { int|sys||shmget(key_t key, size_t size, int shmflg); }
SYS_TIMER_CREATE = 235 // { int|sys||timer_create(clockid_t clock_id, struct sigevent *evp, timer_t *timerid); }
SYS_TIMER_DELETE = 236 // { int|sys||timer_delete(timer_t timerid); }
SYS_TIMER_GETOVERRUN = 239 // { int|sys||timer_getoverrun(timer_t timerid); }
SYS_FDATASYNC = 241 // { int|sys||fdatasync(int fd); }
SYS_MLOCKALL = 242 // { int|sys||mlockall(int flags); }
SYS_MUNLOCKALL = 243 // { int|sys||munlockall(void); }
SYS_SIGQUEUEINFO = 245 // { int|sys||sigqueueinfo(pid_t pid, const siginfo_t *info); }
SYS_MODCTL = 246 // { int|sys||modctl(int cmd, void *arg); }
SYS___POSIX_RENAME = 270 // { int|sys||__posix_rename(const char *from, const char *to); }
SYS_SWAPCTL = 271 // { int|sys||swapctl(int cmd, void *arg, int misc); }
SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); }
SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); }
SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); }
SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); }
SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); }
SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); }
SYS_GETSID = 286 // { pid_t|sys||getsid(pid_t pid); }
SYS___CLONE = 287 // { pid_t|sys||__clone(int flags, void *stack); }
SYS_FKTRACE = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); }
SYS_PREADV = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); }
SYS_PWRITEV = 290 // { ssize_t|sys||pwritev(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); }
SYS___GETCWD = 296 // { int|sys||__getcwd(char *bufp, size_t length); }
SYS_FCHROOT = 297 // { int|sys||fchroot(int fd); }
SYS_LCHFLAGS = 304 // { int|sys||lchflags(const char *path, u_long flags); }
SYS_ISSETUGID = 305 // { int|sys||issetugid(void); }
SYS_UTRACE = 306 // { int|sys||utrace(const char *label, void *addr, size_t len); }
SYS_GETCONTEXT = 307 // { int|sys||getcontext(struct __ucontext *ucp); }
SYS_SETCONTEXT = 308 // { int|sys||setcontext(const struct __ucontext *ucp); }
SYS__LWP_CREATE = 309 // { int|sys||_lwp_create(const struct __ucontext *ucp, u_long flags, lwpid_t *new_lwp); }
SYS__LWP_EXIT = 310 // { int|sys||_lwp_exit(void); }
SYS__LWP_SELF = 311 // { lwpid_t|sys||_lwp_self(void); }
SYS__LWP_WAIT = 312 // { int|sys||_lwp_wait(lwpid_t wait_for, lwpid_t *departed); }
SYS__LWP_SUSPEND = 313 // { int|sys||_lwp_suspend(lwpid_t target); }
SYS__LWP_CONTINUE = 314 // { int|sys||_lwp_continue(lwpid_t target); }
SYS__LWP_WAKEUP = 315 // { int|sys||_lwp_wakeup(lwpid_t target); }
SYS__LWP_GETPRIVATE = 316 // { void *|sys||_lwp_getprivate(void); }
SYS__LWP_SETPRIVATE = 317 // { void|sys||_lwp_setprivate(void *ptr); }
SYS__LWP_KILL = 318 // { int|sys||_lwp_kill(lwpid_t target, int signo); }
SYS__LWP_DETACH = 319 // { int|sys||_lwp_detach(lwpid_t target); }
SYS__LWP_UNPARK = 321 // { int|sys||_lwp_unpark(lwpid_t target, const void *hint); }
SYS__LWP_UNPARK_ALL = 322 // { ssize_t|sys||_lwp_unpark_all(const lwpid_t *targets, size_t ntargets, const void *hint); }
SYS__LWP_SETNAME = 323 // { int|sys||_lwp_setname(lwpid_t target, const char *name); }
SYS__LWP_GETNAME = 324 // { int|sys||_lwp_getname(lwpid_t target, char *name, size_t len); }
SYS__LWP_CTL = 325 // { int|sys||_lwp_ctl(int features, struct lwpctl **address); }
SYS___SIGACTION_SIGTRAMP = 340 // { int|sys||__sigaction_sigtramp(int signum, const struct sigaction *nsa, struct sigaction *osa, const void *tramp, int vers); }
SYS_PMC_GET_INFO = 341 // { int|sys||pmc_get_info(int ctr, int op, void *args); }
SYS_PMC_CONTROL = 342 // { int|sys||pmc_control(int ctr, int op, void *args); }
SYS_RASCTL = 343 // { int|sys||rasctl(void *addr, size_t len, int op); }
SYS_KQUEUE = 344 // { int|sys||kqueue(void); }
SYS__SCHED_SETPARAM = 346 // { int|sys||_sched_setparam(pid_t pid, lwpid_t lid, int policy, const struct sched_param *params); }
SYS__SCHED_GETPARAM = 347 // { int|sys||_sched_getparam(pid_t pid, lwpid_t lid, int *policy, struct sched_param *params); }
SYS__SCHED_SETAFFINITY = 348 // { int|sys||_sched_setaffinity(pid_t pid, lwpid_t lid, size_t size, const cpuset_t *cpuset); }
SYS__SCHED_GETAFFINITY = 349 // { int|sys||_sched_getaffinity(pid_t pid, lwpid_t lid, size_t size, cpuset_t *cpuset); }
SYS_SCHED_YIELD = 350 // { int|sys||sched_yield(void); }
SYS_FSYNC_RANGE = 354 // { int|sys||fsync_range(int fd, int flags, off_t start, off_t length); }
SYS_UUIDGEN = 355 // { int|sys||uuidgen(struct uuid *store, int count); }
SYS_GETVFSSTAT = 356 // { int|sys||getvfsstat(struct statvfs *buf, size_t bufsize, int flags); }
SYS_STATVFS1 = 357 // { int|sys||statvfs1(const char *path, struct statvfs *buf, int flags); }
SYS_FSTATVFS1 = 358 // { int|sys||fstatvfs1(int fd, struct statvfs *buf, int flags); }
SYS_EXTATTRCTL = 360 // { int|sys||extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); }
SYS_EXTATTR_SET_FILE = 361 // { int|sys||extattr_set_file(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
SYS_EXTATTR_GET_FILE = 362 // { ssize_t|sys||extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FILE = 363 // { int|sys||extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
SYS_EXTATTR_SET_FD = 364 // { int|sys||extattr_set_fd(int fd, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
SYS_EXTATTR_GET_FD = 365 // { ssize_t|sys||extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FD = 366 // { int|sys||extattr_delete_fd(int fd, int attrnamespace, const char *attrname); }
SYS_EXTATTR_SET_LINK = 367 // { int|sys||extattr_set_link(const char *path, int attrnamespace, const char *attrname, const void *data, size_t nbytes); }
SYS_EXTATTR_GET_LINK = 368 // { ssize_t|sys||extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_LINK = 369 // { int|sys||extattr_delete_link(const char *path, int attrnamespace, const char *attrname); }
SYS_EXTATTR_LIST_FD = 370 // { ssize_t|sys||extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_FILE = 371 // { ssize_t|sys||extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_LINK = 372 // { ssize_t|sys||extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); }
SYS_SETXATTR = 375 // { int|sys||setxattr(const char *path, const char *name, const void *value, size_t size, int flags); }
SYS_LSETXATTR = 376 // { int|sys||lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags); }
SYS_FSETXATTR = 377 // { int|sys||fsetxattr(int fd, const char *name, const void *value, size_t size, int flags); }
SYS_GETXATTR = 378 // { int|sys||getxattr(const char *path, const char *name, void *value, size_t size); }
SYS_LGETXATTR = 379 // { int|sys||lgetxattr(const char *path, const char *name, void *value, size_t size); }
SYS_FGETXATTR = 380 // { int|sys||fgetxattr(int fd, const char *name, void *value, size_t size); }
SYS_LISTXATTR = 381 // { int|sys||listxattr(const char *path, char *list, size_t size); }
SYS_LLISTXATTR = 382 // { int|sys||llistxattr(const char *path, char *list, size_t size); }
SYS_FLISTXATTR = 383 // { int|sys||flistxattr(int fd, char *list, size_t size); }
SYS_REMOVEXATTR = 384 // { int|sys||removexattr(const char *path, const char *name); }
SYS_LREMOVEXATTR = 385 // { int|sys||lremovexattr(const char *path, const char *name); }
SYS_FREMOVEXATTR = 386 // { int|sys||fremovexattr(int fd, const char *name); }
SYS_GETDENTS = 390 // { int|sys|30|getdents(int fd, char *buf, size_t count); }
SYS_SOCKET = 394 // { int|sys|30|socket(int domain, int type, int protocol); }
SYS_GETFH = 395 // { int|sys|30|getfh(const char *fname, void *fhp, size_t *fh_size); }
SYS_MOUNT = 410 // { int|sys|50|mount(const char *type, const char *path, int flags, void *data, size_t data_len); }
SYS_MREMAP = 411 // { void *|sys||mremap(void *old_address, size_t old_size, void *new_address, size_t new_size, int flags); }
SYS_PSET_CREATE = 412 // { int|sys||pset_create(psetid_t *psid); }
SYS_PSET_DESTROY = 413 // { int|sys||pset_destroy(psetid_t psid); }
SYS_PSET_ASSIGN = 414 // { int|sys||pset_assign(psetid_t psid, cpuid_t cpuid, psetid_t *opsid); }
SYS__PSET_BIND = 415 // { int|sys||_pset_bind(idtype_t idtype, id_t first_id, id_t second_id, psetid_t psid, psetid_t *opsid); }
SYS_POSIX_FADVISE = 416 // { int|sys|50|posix_fadvise(int fd, int PAD, off_t offset, off_t len, int advice); }
SYS_SELECT = 417 // { int|sys|50|select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); }
SYS_GETTIMEOFDAY = 418 // { int|sys|50|gettimeofday(struct timeval *tp, void *tzp); }
SYS_SETTIMEOFDAY = 419 // { int|sys|50|settimeofday(const struct timeval *tv, const void *tzp); }
SYS_UTIMES = 420 // { int|sys|50|utimes(const char *path, const struct timeval *tptr); }
SYS_ADJTIME = 421 // { int|sys|50|adjtime(const struct timeval *delta, struct timeval *olddelta); }
SYS_FUTIMES = 423 // { int|sys|50|futimes(int fd, const struct timeval *tptr); }
SYS_LUTIMES = 424 // { int|sys|50|lutimes(const char *path, const struct timeval *tptr); }
SYS_SETITIMER = 425 // { int|sys|50|setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); }
SYS_GETITIMER = 426 // { int|sys|50|getitimer(int which, struct itimerval *itv); }
SYS_CLOCK_GETTIME = 427 // { int|sys|50|clock_gettime(clockid_t clock_id, struct timespec *tp); }
SYS_CLOCK_SETTIME = 428 // { int|sys|50|clock_settime(clockid_t clock_id, const struct timespec *tp); }
SYS_CLOCK_GETRES = 429 // { int|sys|50|clock_getres(clockid_t clock_id, struct timespec *tp); }
SYS_NANOSLEEP = 430 // { int|sys|50|nanosleep(const struct timespec *rqtp, struct timespec *rmtp); }
SYS___SIGTIMEDWAIT = 431 // { int|sys|50|__sigtimedwait(const sigset_t *set, siginfo_t *info, struct timespec *timeout); }
SYS__LWP_PARK = 434 // { int|sys|50|_lwp_park(const struct timespec *ts, lwpid_t unpark, const void *hint, const void *unparkhint); }
SYS_KEVENT = 435 // { int|sys|50|kevent(int fd, const struct kevent *changelist, size_t nchanges, struct kevent *eventlist, size_t nevents, const struct timespec *timeout); }
SYS_PSELECT = 436 // { int|sys|50|pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); }
SYS_POLLTS = 437 // { int|sys|50|pollts(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); }
SYS_STAT = 439 // { int|sys|50|stat(const char *path, struct stat *ub); }
SYS_FSTAT = 440 // { int|sys|50|fstat(int fd, struct stat *sb); }
SYS_LSTAT = 441 // { int|sys|50|lstat(const char *path, struct stat *ub); }
SYS___SEMCTL = 442 // { int|sys|50|__semctl(int semid, int semnum, int cmd, ... union __semun *arg); }
SYS_SHMCTL = 443 // { int|sys|50|shmctl(int shmid, int cmd, struct shmid_ds *buf); }
SYS_MSGCTL = 444 // { int|sys|50|msgctl(int msqid, int cmd, struct msqid_ds *buf); }
SYS_GETRUSAGE = 445 // { int|sys|50|getrusage(int who, struct rusage *rusage); }
SYS_TIMER_SETTIME = 446 // { int|sys|50|timer_settime(timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); }
SYS_TIMER_GETTIME = 447 // { int|sys|50|timer_gettime(timer_t timerid, struct itimerspec *value); }
SYS_NTP_GETTIME = 448 // { int|sys|50|ntp_gettime(struct ntptimeval *ntvp); }
SYS_WAIT4 = 449 // { int|sys|50|wait4(pid_t pid, int *status, int options, struct rusage *rusage); }
SYS_MKNOD = 450 // { int|sys|50|mknod(const char *path, mode_t mode, dev_t dev); }
SYS_FHSTAT = 451 // { int|sys|50|fhstat(const void *fhp, size_t fh_size, struct stat *sb); }
SYS_PIPE2 = 453 // { int|sys||pipe2(int *fildes, int flags); }
SYS_DUP3 = 454 // { int|sys||dup3(int from, int to, int flags); }
SYS_KQUEUE1 = 455 // { int|sys||kqueue1(int flags); }
SYS_PACCEPT = 456 // { int|sys||paccept(int s, struct sockaddr *name, socklen_t *anamelen, const sigset_t *mask, int flags); }
SYS_LINKAT = 457 // { int|sys||linkat(int fd1, const char *name1, int fd2, const char *name2, int flags); }
SYS_RENAMEAT = 458 // { int|sys||renameat(int fromfd, const char *from, int tofd, const char *to); }
SYS_MKFIFOAT = 459 // { int|sys||mkfifoat(int fd, const char *path, mode_t mode); }
SYS_MKNODAT = 460 // { int|sys||mknodat(int fd, const char *path, mode_t mode, uint32_t dev); }
SYS_MKDIRAT = 461 // { int|sys||mkdirat(int fd, const char *path, mode_t mode); }
SYS_FACCESSAT = 462 // { int|sys||faccessat(int fd, const char *path, int amode, int flag); }
SYS_FCHMODAT = 463 // { int|sys||fchmodat(int fd, const char *path, mode_t mode, int flag); }
SYS_FCHOWNAT = 464 // { int|sys||fchownat(int fd, const char *path, uid_t owner, gid_t group, int flag); }
SYS_FEXECVE = 465 // { int|sys||fexecve(int fd, char * const *argp, char * const *envp); }
SYS_FSTATAT = 466 // { int|sys||fstatat(int fd, const char *path, struct stat *buf, int flag); }
SYS_UTIMENSAT = 467 // { int|sys||utimensat(int fd, const char *path, const struct timespec *tptr, int flag); }
SYS_OPENAT = 468 // { int|sys||openat(int fd, const char *path, int oflags, ... mode_t mode); }
SYS_READLINKAT = 469 // { int|sys||readlinkat(int fd, const char *path, char *buf, size_t bufsize); }
SYS_SYMLINKAT = 470 // { int|sys||symlinkat(const char *path1, int fd, const char *path2); }
SYS_UNLINKAT = 471 // { int|sys||unlinkat(int fd, const char *path, int flag); }
SYS_FUTIMENS = 472 // { int|sys||futimens(int fd, const struct timespec *tptr); }
SYS___QUOTACTL = 473 // { int|sys||__quotactl(const char *path, struct quotactl_args *args); }
SYS_POSIX_SPAWN = 474 // { int|sys||posix_spawn(pid_t *pid, const char *path, const struct posix_spawn_file_actions *file_actions, const struct posix_spawnattr *attrp, char *const *argv, char *const *envp); }
SYS_RECVMMSG = 475 // { int|sys||recvmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); }
SYS_SENDMMSG = 476 // { int|sys||sendmmsg(int s, struct mmsghdr *mmsg, unsigned int vlen, unsigned int flags); }
)
| _vendor/src/golang.org/x/sys/unix/zsysnum_netbsd_arm.go | 0 | https://github.com/pingcap/tidb/commit/436eb2430309bbeaaa400a390cb50c549f6f5537 | [
0.0005201763124205172,
0.00021578301675617695,
0.00016459834296256304,
0.0001730766671244055,
0.00008954515215009451
] |
{
"id": 0,
"code_window": [
"\ttempl := template.Must(template.ParseFiles(files...))\n",
"\tengine.SetHTMLTemplate(templ)\n",
"}\n",
"\n",
"func (engine *Engine) SetHTMLTemplate(templ *template.Template) {\n",
"\tengine.HTMLRender = render.HTMLRender{\n",
"\t\tTemplate: templ,\n",
"\t}\n",
"}\n",
"\n",
"// Adds handlers for NoRoute. It return a 404 code by default.\n",
"func (engine *Engine) NoRoute(handlers ...HandlerFunc) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif gin_mode == debugCode {\n",
"\t\tengine.HTMLRender = render.HTMLDebug\n",
"\t} else {\n",
"\t\tengine.HTMLRender = render.HTMLRender{\n",
"\t\t\tTemplate: templ,\n",
"\t\t}\n"
],
"file_path": "gin.go",
"type": "replace",
"edit_start_line_idx": 94
} | package render
import (
"encoding/json"
"encoding/xml"
"fmt"
"html/template"
"net/http"
)
type (
Render interface {
Render(http.ResponseWriter, int, ...interface{}) error
}
// JSON binding
jsonRender struct{}
// XML binding
xmlRender struct{}
// Plain text
plainRender struct{}
// Redirects
redirectRender struct{}
// form binding
HTMLRender struct {
Template *template.Template
}
)
var (
JSON = jsonRender{}
XML = xmlRender{}
Plain = plainRender{}
Redirect = redirectRender{}
)
func writeHeader(w http.ResponseWriter, code int, contentType string) {
w.Header().Set("Content-Type", contentType)
w.WriteHeader(code)
}
func (_ jsonRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/json")
encoder := json.NewEncoder(w)
return encoder.Encode(data[0])
}
func (_ redirectRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
w.Header().Set("Location", data[0].(string))
w.WriteHeader(code)
return nil
}
func (_ xmlRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/xml")
encoder := xml.NewEncoder(w)
return encoder.Encode(data[0])
}
func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/html")
file := data[0].(string)
obj := data[1]
return html.Template.ExecuteTemplate(w, file, obj)
}
func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/plain")
format := data[0].(string)
args := data[1].([]interface{})
var err error
if len(args) > 0 {
_, err = w.Write([]byte(fmt.Sprintf(format, args...)))
} else {
_, err = w.Write([]byte(format))
}
return err
}
| render/render.go | 1 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.0036735369358211756,
0.001087742275558412,
0.00016729600611142814,
0.0001911449508043006,
0.0013175995554775
] |
{
"id": 0,
"code_window": [
"\ttempl := template.Must(template.ParseFiles(files...))\n",
"\tengine.SetHTMLTemplate(templ)\n",
"}\n",
"\n",
"func (engine *Engine) SetHTMLTemplate(templ *template.Template) {\n",
"\tengine.HTMLRender = render.HTMLRender{\n",
"\t\tTemplate: templ,\n",
"\t}\n",
"}\n",
"\n",
"// Adds handlers for NoRoute. It return a 404 code by default.\n",
"func (engine *Engine) NoRoute(handlers ...HandlerFunc) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif gin_mode == debugCode {\n",
"\t\tengine.HTMLRender = render.HTMLDebug\n",
"\t} else {\n",
"\t\tengine.HTMLRender = render.HTMLRender{\n",
"\t\t\tTemplate: templ,\n",
"\t\t}\n"
],
"file_path": "gin.go",
"type": "replace",
"edit_start_line_idx": 94
} | package gin
import (
"encoding/base64"
"net/http"
"net/http/httptest"
"testing"
)
func TestBasicAuthSucceed(t *testing.T) {
req, _ := http.NewRequest("GET", "/login", nil)
w := httptest.NewRecorder()
r := New()
accounts := Accounts{"admin": "password"}
r.Use(BasicAuth(accounts))
r.GET("/login", func(c *Context) {
c.String(200, "autorized")
})
req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("admin:password")))
r.ServeHTTP(w, req)
if w.Code != 200 {
t.Errorf("Response code should be Ok, was: %s", w.Code)
}
bodyAsString := w.Body.String()
if bodyAsString != "autorized" {
t.Errorf("Response body should be `autorized`, was %s", bodyAsString)
}
}
func TestBasicAuth401(t *testing.T) {
req, _ := http.NewRequest("GET", "/login", nil)
w := httptest.NewRecorder()
r := New()
accounts := Accounts{"foo": "bar"}
r.Use(BasicAuth(accounts))
r.GET("/login", func(c *Context) {
c.String(200, "autorized")
})
req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("admin:password")))
r.ServeHTTP(w, req)
if w.Code != 401 {
t.Errorf("Response code should be Not autorized, was: %s", w.Code)
}
if w.HeaderMap.Get("WWW-Authenticate") != "Basic realm=\"Authorization Required\"" {
t.Errorf("WWW-Authenticate header is incorrect: %s", w.HeaderMap.Get("Content-Type"))
}
}
| auth_test.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.0001702500449027866,
0.00016865755605977029,
0.0001670623169047758,
0.0001686607429292053,
0.0000014593291552955634
] |
{
"id": 0,
"code_window": [
"\ttempl := template.Must(template.ParseFiles(files...))\n",
"\tengine.SetHTMLTemplate(templ)\n",
"}\n",
"\n",
"func (engine *Engine) SetHTMLTemplate(templ *template.Template) {\n",
"\tengine.HTMLRender = render.HTMLRender{\n",
"\t\tTemplate: templ,\n",
"\t}\n",
"}\n",
"\n",
"// Adds handlers for NoRoute. It return a 404 code by default.\n",
"func (engine *Engine) NoRoute(handlers ...HandlerFunc) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif gin_mode == debugCode {\n",
"\t\tengine.HTMLRender = render.HTMLDebug\n",
"\t} else {\n",
"\t\tengine.HTMLRender = render.HTMLRender{\n",
"\t\t\tTemplate: templ,\n",
"\t\t}\n"
],
"file_path": "gin.go",
"type": "replace",
"edit_start_line_idx": 94
} | # Guide to run Gin under App Engine LOCAL Development Server
1. Download, install and setup Go in your computer. (That includes setting your `$GOPATH`.)
2. Download SDK for your platform from here: `https://developers.google.com/appengine/downloads?hl=es#Google_App_Engine_SDK_for_Go`
3. Download Gin source code using: `$ go get github.com/gin-gonic/gin`
4. Navigate to examples folder: `$ cd $GOPATH/src/github.com/gin-gonic/gin/examples/`
5. Run it: `$ goapp serve app-engine/` | examples/app-engine/README.md | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.00016879320901352912,
0.00016879320901352912,
0.00016879320901352912,
0.00016879320901352912,
0
] |
{
"id": 0,
"code_window": [
"\ttempl := template.Must(template.ParseFiles(files...))\n",
"\tengine.SetHTMLTemplate(templ)\n",
"}\n",
"\n",
"func (engine *Engine) SetHTMLTemplate(templ *template.Template) {\n",
"\tengine.HTMLRender = render.HTMLRender{\n",
"\t\tTemplate: templ,\n",
"\t}\n",
"}\n",
"\n",
"// Adds handlers for NoRoute. It return a 404 code by default.\n",
"func (engine *Engine) NoRoute(handlers ...HandlerFunc) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif gin_mode == debugCode {\n",
"\t\tengine.HTMLRender = render.HTMLDebug\n",
"\t} else {\n",
"\t\tengine.HTMLRender = render.HTMLRender{\n",
"\t\t\tTemplate: templ,\n",
"\t\t}\n"
],
"file_path": "gin.go",
"type": "replace",
"edit_start_line_idx": 94
} | #Gin Web Framework
[](https://godoc.org/github.com/gin-gonic/gin)
[](https://travis-ci.org/gin-gonic/gin)
Gin is a web framework written in Golang. It features a martini-like API with much better performance, up to 40 times faster. If you need performance and good productivity, you will love Gin.

##Gin is new, will it be supported?
Yes, Gin is an internal project of [my](https://github.com/manucorporat) upcoming startup. We developed it and we are going to continue using and improve it.
##Roadmap for v1.0
- [x] Performance improments, reduce allocation and garbage collection overhead
- [x] Fix bugs
- [ ] Stable API
- [ ] Ask our designer for a cool logo
- [ ] Add tons of unit tests
- [ ] Add internal benchmarks suite
- [x] Improve logging system
- [x] Improve JSON/XML validation using bindings
- [x] Improve XML support
- [x] Flexible rendering system
- [ ] More powerful validation API
- [ ] Improve documentation
- [ ] Add more cool middlewares, for example redis caching (this also helps developers to understand the framework).
- [x] Continuous integration
## Start using it
Obviously, you need to have Git and Go! already installed to run Gin.
Run this in your terminal
```
go get github.com/gin-gonic/gin
```
Then import it in your Go! code:
```
import "github.com/gin-gonic/gin"
```
##Community
If you'd like to help out with the project, there's a mailing list and IRC channel where Gin discussions normally happen.
* IRC
* [irc.freenode.net #getgin](irc://irc.freenode.net:6667/getgin)
* [Webchat](http://webchat.freenode.net?randomnick=1&channels=%23getgin)
* Mailing List
* Subscribe: [[email protected]](mailto:[email protected])
* [Archives](http://librelist.com/browser/getgin/)
##API Examples
#### Create most basic PING/PONG HTTP endpoint
```go
package main
import "github.com/gin-gonic/gin"
func main() {
r := gin.Default()
r.GET("/ping", func(c *gin.Context) {
c.String(200, "pong")
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Using GET, POST, PUT, PATCH, DELETE and OPTIONS
```go
func main() {
// Creates a gin router + logger and recovery (crash-free) middlewares
r := gin.Default()
r.GET("/someGet", getting)
r.POST("/somePost", posting)
r.PUT("/somePut", putting)
r.DELETE("/someDelete", deleting)
r.PATCH("/somePatch", patching)
r.HEAD("/someHead", head)
r.OPTIONS("/someOptions", options)
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Parameters in path
```go
func main() {
r := gin.Default()
// This handler will match /user/john but will not match neither /user/ or /user
r.GET("/user/:name", func(c *gin.Context) {
name := c.Params.ByName("name")
message := "Hello "+name
c.String(200, message)
})
// However, this one will match /user/john and also /user/john/send
r.GET("/user/:name/*action", func(c *gin.Context) {
name := c.Params.ByName("name")
action := c.Params.ByName("action")
message := name + " is " + action
c.String(200, message)
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Grouping routes
```go
func main() {
r := gin.Default()
// Simple group: v1
v1 := r.Group("/v1")
{
v1.POST("/login", loginEndpoint)
v1.POST("/submit", submitEndpoint)
v1.POST("/read", readEndpoint)
}
// Simple group: v2
v2 := r.Group("/v2")
{
v2.POST("/login", loginEndpoint)
v2.POST("/submit", submitEndpoint)
v2.POST("/read", readEndpoint)
}
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Blank Gin without middlewares by default
Use
```go
r := gin.New()
```
instead of
```go
r := gin.Default()
```
#### Using middlewares
```go
func main() {
// Creates a router without any middleware by default
r := gin.New()
// Global middlewares
r.Use(gin.Logger())
r.Use(gin.Recovery())
// Per route middlewares, you can add as many as you desire.
r.GET("/benchmark", MyBenchLogger(), benchEndpoint)
// Authorization group
// authorized := r.Group("/", AuthRequired())
// exactly the same than:
authorized := r.Group("/")
// per group middlewares! in this case we use the custom created
// AuthRequired() middleware just in the "authorized" group.
authorized.Use(AuthRequired())
{
authorized.POST("/login", loginEndpoint)
authorized.POST("/submit", submitEndpoint)
authorized.POST("/read", readEndpoint)
// nested group
testing := authorized.Group("testing")
testing.GET("/analytics", analyticsEndpoint)
}
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Model binding and validation
To bind a request body into a type, use model binding. We currently support binding of JSON, XML and standard form values (foo=bar&boo=baz).
Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`.
When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use BindWith.
You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, the current request will fail with an error.
```go
// Binding from JSON
type LoginJSON struct {
User string `json:"user" binding:"required"`
Password string `json:"password" binding:"required"`
}
// Binding from form values
type LoginForm struct {
User string `form:"user" binding:"required"`
Password string `form:"password" binding:"required"`
}
func main() {
r := gin.Default()
// Example for binding JSON ({"user": "manu", "password": "123"})
r.POST("/login", func(c *gin.Context) {
var json LoginJSON
c.Bind(&json) // This will infer what binder to use depending on the content-type header.
if json.User == "manu" && json.Password == "123" {
c.JSON(200, gin.H{"status": "you are logged in"})
} else {
c.JSON(401, gin.H{"status": "unauthorized"})
}
})
// Example for binding a HTLM form (user=manu&password=123)
r.POST("/login", func(c *gin.Context) {
var form LoginForm
c.BindWith(&form, binding.Form) // You can also specify which binder to use. We support binding.Form, binding.JSON and binding.XML.
if form.User == "manu" && form.Password == "123" {
c.JSON(200, gin.H{"status": "you are logged in"})
} else {
c.JSON(401, gin.H{"status": "unauthorized"})
}
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### XML and JSON rendering
```go
func main() {
r := gin.Default()
// gin.H is a shortcup for map[string]interface{}
r.GET("/someJSON", func(c *gin.Context) {
c.JSON(200, gin.H{"message": "hey", "status": 200})
})
r.GET("/moreJSON", func(c *gin.Context) {
// You also can use a struct
var msg struct {
Name string `json:"user"`
Message string
Number int
}
msg.Name = "Lena"
msg.Message = "hey"
msg.Number = 123
// Note that msg.Name becomes "user" in the JSON
// Will output : {"user": "Lena", "Message": "hey", "Number": 123}
c.JSON(200, msg)
})
r.GET("/someXML", func(c *gin.Context) {
c.XML(200, gin.H{"message": "hey", "status": 200})
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
####HTML rendering
Using LoadHTMLTemplates()
```go
func main() {
r := gin.Default()
r.LoadHTMLTemplates("templates/*")
r.GET("/index", func(c *gin.Context) {
obj := gin.H{"title": "Main website"}
c.HTML(200, "index.tmpl", obj)
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
You can also use your own html template render
```go
import "html/template"
func main() {
r := gin.Default()
html := template.Must(template.ParseFiles("file1", "file2"))
r.HTMLTemplates = html
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Redirects
Issuing a HTTP redirect is easy:
```r.GET("/test", func(c *gin.Context) {
c.Redirect(301, "http://www.google.com/")
})
Both internal and external locations are supported.
```
#### Custom Middlewares
```go
func Logger() gin.HandlerFunc {
return func(c *gin.Context) {
t := time.Now()
// Set example variable
c.Set("example", "12345")
// before request
c.Next()
// after request
latency := time.Since(t)
log.Print(latency)
// access the status we are sending
status := c.Writer.Status()
log.Println(status)
}
}
func main() {
r := gin.New()
r.Use(Logger())
r.GET("/test", func(c *gin.Context) {
example := c.MustGet("example").(string)
// it would print: "12345"
log.Println(example)
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Using BasicAuth() middleware
```go
// similate some private data
var secrets = gin.H{
"foo": gin.H{"email": "[email protected]", "phone": "123433"},
"austin": gin.H{"email": "[email protected]", "phone": "666"},
"lena": gin.H{"email": "[email protected]", "phone": "523443"},
}
func main() {
r := gin.Default()
// Group using gin.BasicAuth() middleware
// gin.Accounts is a shortcut for map[string]string
authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{
"foo": "bar",
"austin": "1234",
"lena": "hello2",
"manu": "4321",
}))
// /admin/secrets endpoint
// hit "localhost:8080/admin/secrets
authorized.GET("/secrets", func(c *gin.Context) {
// get user, it was setted by the BasicAuth middleware
user := c.Get(gin.AuthUserKey).(string)
if secret, ok := secrets[user]; ok {
c.JSON(200, gin.H{"user": user, "secret": secret})
} else {
c.JSON(200, gin.H{"user": user, "secret": "NO SECRET :("})
}
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Goroutines inside a middleware
When starting inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy.
```go
func main() {
r := gin.Default()
r.GET("/long_async", func(c *gin.Context) {
// create copy to be used inside the goroutine
c_cp := c.Copy()
go func() {
// simulate a long task with time.Sleep(). 5 seconds
time.Sleep(5 * time.Second)
// note than you are using the copied context "c_cp", IMPORTANT
log.Println("Done! in path " + c_cp.Request.URL.Path)
}()
})
r.GET("/long_sync", func(c *gin.Context) {
// simulate a long task with time.Sleep(). 5 seconds
time.Sleep(5 * time.Second)
// since we are NOT using a goroutine, we do not have to copy the context
log.Println("Done! in path " + c.Request.URL.Path)
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Custom HTTP configuration
Use `http.ListenAndServe()` directly, like this:
```go
func main() {
router := gin.Default()
http.ListenAndServe(":8080", router)
}
```
or
```go
func main() {
router := gin.Default()
s := &http.Server{
Addr: ":8080",
Handler: router,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
s.ListenAndServe()
}
```
| README.md | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.0032839346677064896,
0.0002900325634982437,
0.00016385482740588486,
0.00016926095122471452,
0.0005596374394372106
] |
{
"id": 1,
"code_window": [
"\tplainRender struct{}\n",
"\n",
"\t// Redirects\n",
"\tredirectRender struct{}\n",
"\n",
"\t// form binding\n",
"\tHTMLRender struct {\n",
"\t\tTemplate *template.Template\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Redirects\n",
"\thtmlDebugRender struct{}\n",
"\n"
],
"file_path": "render/render.go",
"type": "add",
"edit_start_line_idx": 27
} | package render
import (
"encoding/json"
"encoding/xml"
"fmt"
"html/template"
"net/http"
)
type (
Render interface {
Render(http.ResponseWriter, int, ...interface{}) error
}
// JSON binding
jsonRender struct{}
// XML binding
xmlRender struct{}
// Plain text
plainRender struct{}
// Redirects
redirectRender struct{}
// form binding
HTMLRender struct {
Template *template.Template
}
)
var (
JSON = jsonRender{}
XML = xmlRender{}
Plain = plainRender{}
Redirect = redirectRender{}
)
func writeHeader(w http.ResponseWriter, code int, contentType string) {
w.Header().Set("Content-Type", contentType)
w.WriteHeader(code)
}
func (_ jsonRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/json")
encoder := json.NewEncoder(w)
return encoder.Encode(data[0])
}
func (_ redirectRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
w.Header().Set("Location", data[0].(string))
w.WriteHeader(code)
return nil
}
func (_ xmlRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/xml")
encoder := xml.NewEncoder(w)
return encoder.Encode(data[0])
}
func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/html")
file := data[0].(string)
obj := data[1]
return html.Template.ExecuteTemplate(w, file, obj)
}
func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/plain")
format := data[0].(string)
args := data[1].([]interface{})
var err error
if len(args) > 0 {
_, err = w.Write([]byte(fmt.Sprintf(format, args...)))
} else {
_, err = w.Write([]byte(format))
}
return err
}
| render/render.go | 1 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.9953038692474365,
0.2150801718235016,
0.00016997366037685424,
0.002678008982911706,
0.3894558250904083
] |
{
"id": 1,
"code_window": [
"\tplainRender struct{}\n",
"\n",
"\t// Redirects\n",
"\tredirectRender struct{}\n",
"\n",
"\t// form binding\n",
"\tHTMLRender struct {\n",
"\t\tTemplate *template.Template\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Redirects\n",
"\thtmlDebugRender struct{}\n",
"\n"
],
"file_path": "render/render.go",
"type": "add",
"edit_start_line_idx": 27
} | ##Changelog
###Gin 0.4 (??)
- [NEW] Unit tests
- [NEW] Add Content.Redirect()
- [FIX] Deferring WriteHeader()
- [FIX] Improved documentation for model binding
###Gin 0.3 (Jul 18, 2014)
- [PERFORMANCE] Normal log and error log are printed in the same call.
- [PERFORMANCE] Improve performance of NoRouter()
- [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults.
- [NEW] Flexible rendering API
- [NEW] Add Context.File()
- [NEW] Add shorcut RunTLS() for http.ListenAndServeTLS
- [FIX] Rename NotFound404() to NoRoute()
- [FIX] Errors in context are purged
- [FIX] Adds HEAD method in Static file serving
- [FIX] Refactors Static() file serving
- [FIX] Using keyed initialization to fix app-engine integration
- [FIX] Can't unmarshal JSON array, #63
- [FIX] Renaming Context.Req to Context.Request
- [FIX] Check application/x-www-form-urlencoded when parsing form
###Gin 0.2b (Jul 08, 2014)
- [PERFORMANCE] Using sync.Pool to allocatio/gc overhead
- [NEW] Travis CI integration
- [NEW] Completely new logger
- [NEW] New API for serving static files. gin.Static()
- [NEW] gin.H() can be serialized into XML
- [NEW] Typed errors. Errors can be typed. Internet/external/custom.
- [NEW] Support for Godebs
- [NEW] Travis/Godocs badges in README
- [NEW] New Bind() and BindWith() methods for parsing request body.
- [NEW] Add Content.Copy()
- [NEW] Add context.LastError()
- [NEW] Add shorcut for OPTIONS HTTP method
- [FIX] Tons of README fixes
- [FIX] Header is written before body
- [FIX] BasicAuth() and changes API a little bit
- [FIX] Recovery() middleware only prints panics
- [FIX] Context.Get() does not panic anymore. Use MustGet() instead.
- [FIX] Multiple http.WriteHeader() in NotFound handlers
- [FIX] Engine.Run() panics if http server can't be setted up
- [FIX] Crash when route path doesn't start with '/'
- [FIX] Do not update header when status code is negative
- [FIX] Setting response headers before calling WriteHeader in context.String()
- [FIX] Add MIT license
- [FIX] Changes behaviour of ErrorLogger() and Logger()
| CHANGELOG.md | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.0007827160297892988,
0.0003290065797045827,
0.00016408013470936567,
0.00023995003721211106,
0.0002172377280658111
] |
{
"id": 1,
"code_window": [
"\tplainRender struct{}\n",
"\n",
"\t// Redirects\n",
"\tredirectRender struct{}\n",
"\n",
"\t// form binding\n",
"\tHTMLRender struct {\n",
"\t\tTemplate *template.Template\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Redirects\n",
"\thtmlDebugRender struct{}\n",
"\n"
],
"file_path": "render/render.go",
"type": "add",
"edit_start_line_idx": 27
} | package gin
import (
"github.com/gin-gonic/gin/binding"
"net/http"
)
// DEPRECATED, use Bind() instead.
// Like ParseBody() but this method also writes a 400 error if the json is not valid.
func (c *Context) EnsureBody(item interface{}) bool {
return c.Bind(item)
}
// DEPRECATED use bindings directly
// Parses the body content as a JSON input. It decodes the json payload into the struct specified as a pointer.
func (c *Context) ParseBody(item interface{}) error {
return binding.JSON.Bind(c.Request, item)
}
// DEPRECATED use gin.Static() instead
// ServeFiles serves files from the given file system root.
// The path must end with "/*filepath", files are then served from the local
// path /defined/root/dir/*filepath.
// For example if root is "/etc" and *filepath is "passwd", the local file
// "/etc/passwd" would be served.
// Internally a http.FileServer is used, therefore http.NotFound is used instead
// of the Router's NotFound handler.
// To use the operating system's file system implementation,
// use http.Dir:
// router.ServeFiles("/src/*filepath", http.Dir("/var/www"))
func (engine *Engine) ServeFiles(path string, root http.FileSystem) {
engine.router.ServeFiles(path, root)
}
// DEPRECATED use gin.LoadHTMLGlob() or gin.LoadHTMLFiles() instead
func (engine *Engine) LoadHTMLTemplates(pattern string) {
engine.LoadHTMLGlob(pattern)
}
// DEPRECATED. Use NotFound() instead
func (engine *Engine) NotFound404(handlers ...HandlerFunc) {
engine.NoRoute(handlers...)
}
| deprecated.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.00029961951076984406,
0.00020541902631521225,
0.00016584969125688076,
0.00018738173821475357,
0.00004904337401967496
] |
{
"id": 1,
"code_window": [
"\tplainRender struct{}\n",
"\n",
"\t// Redirects\n",
"\tredirectRender struct{}\n",
"\n",
"\t// form binding\n",
"\tHTMLRender struct {\n",
"\t\tTemplate *template.Template\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Redirects\n",
"\thtmlDebugRender struct{}\n",
"\n"
],
"file_path": "render/render.go",
"type": "add",
"edit_start_line_idx": 27
} | List of all the awesome people working to make Gin the best Web Framework in Go!
##gin 0.x series authors
**Lead Developer:** Manu Martinez-Almeida (@manucorporat)
**Staff:**
Javier Provecho (@javierprovecho)
People and companies, who have contributed, in alphabetical order.
**@adammck (Adam Mckaig)**
- Add MIT license
**@AlexanderChen1989 (Alexander)**
- Typos in README
**@alexandernyquist (Alexander Nyquist)**
- Using template.Must to fix multiple return issue
- ★ Added support for OPTIONS verb
- ★ Setting response headers before calling WriteHeader
- Improved documentation for model binding
- ★ Added Content.Redirect()
- ★ Added tons of Unit tests
**@austinheap (Austin Heap)**
- Added travis CI integration
**@bluele (Jun Kimura)**
- Fixes code examples in README
**@chad-russell**
- ★ Support for serializing gin.H into XML
**@dickeyxxx (Jeff Dickey)**
- Typos in README
**@fmd (Fareed Dudhia)**
- Fix typo. SetHTTPTemplate -> SetHTMLTemplate
**@jasonrhansen**
- Fix spelling and grammar errors in documentation
**@julienschmidt (Julien Schmidt)**
- gofmt the code examples
**@kyledinh (Kyle Dinh)**
- Adds RunTLS()
**@LinusU (Linus Unnebäck)**
- Small fixes in README
**@lucas-clemente (Lucas Clemente)**
- ★ work around path.Join removing trailing slashes from routes
**@mdigger (Dmitry Sedykh)**
- Fixes Form binding when content-type is x-www-form-urlencoded
- No repeat call c.Writer.Status() in gin.Logger
- Fixes Content-Type for json render
**@mopemope (Yutaka Matsubara)**
- ★ Adds Godep support (Dependencies Manager)
- Fix variadic parameter in the flexible render API
- Fix Corrupted plain render
**@msemenistyi (Mykyta Semenistyi)**
- update Readme.md. Add code to String method
**@msoedov (Sasha Myasoedov)**
- ★ Adds tons of unit tests.
**@ngerakines (Nick Gerakines)**
- ★ Improves API, c.GET() doesn't panic
- Adds MustGet() method
**@r8k (Rajiv Kilaparti)**
- Fix Port usage in README.
**@silasb (Silas Baronda)**
- Fixing quotes in README
**@SkuliOskarsson (Skuli Oskarsson)**
- Fixes some texts in README II | AUTHORS.md | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.0002686615625862032,
0.00018039686256088316,
0.00016384529590141028,
0.00017032913456205279,
0.00002858614971046336
] |
{
"id": 2,
"code_window": [
")\n",
"\n",
"var (\n",
"\tJSON = jsonRender{}\n",
"\tXML = xmlRender{}\n",
"\tPlain = plainRender{}\n",
"\tRedirect = redirectRender{}\n",
")\n",
"\n",
"func writeHeader(w http.ResponseWriter, code int, contentType string) {\n",
"\tw.Header().Set(\"Content-Type\", contentType)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tJSON = jsonRender{}\n",
"\tXML = xmlRender{}\n",
"\tPlain = plainRender{}\n",
"\tRedirect = redirectRender{}\n",
"\tHTMLDebug = htmlDebugRender{}\n"
],
"file_path": "render/render.go",
"type": "replace",
"edit_start_line_idx": 34
} | package render
import (
"encoding/json"
"encoding/xml"
"fmt"
"html/template"
"net/http"
)
type (
Render interface {
Render(http.ResponseWriter, int, ...interface{}) error
}
// JSON binding
jsonRender struct{}
// XML binding
xmlRender struct{}
// Plain text
plainRender struct{}
// Redirects
redirectRender struct{}
// form binding
HTMLRender struct {
Template *template.Template
}
)
var (
JSON = jsonRender{}
XML = xmlRender{}
Plain = plainRender{}
Redirect = redirectRender{}
)
func writeHeader(w http.ResponseWriter, code int, contentType string) {
w.Header().Set("Content-Type", contentType)
w.WriteHeader(code)
}
func (_ jsonRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/json")
encoder := json.NewEncoder(w)
return encoder.Encode(data[0])
}
func (_ redirectRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
w.Header().Set("Location", data[0].(string))
w.WriteHeader(code)
return nil
}
func (_ xmlRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/xml")
encoder := xml.NewEncoder(w)
return encoder.Encode(data[0])
}
func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/html")
file := data[0].(string)
obj := data[1]
return html.Template.ExecuteTemplate(w, file, obj)
}
func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/plain")
format := data[0].(string)
args := data[1].([]interface{})
var err error
if len(args) > 0 {
_, err = w.Write([]byte(fmt.Sprintf(format, args...)))
} else {
_, err = w.Write([]byte(format))
}
return err
}
| render/render.go | 1 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.9987360835075378,
0.5539880990982056,
0.00016886483354028314,
0.9811139702796936,
0.4920014441013336
] |
{
"id": 2,
"code_window": [
")\n",
"\n",
"var (\n",
"\tJSON = jsonRender{}\n",
"\tXML = xmlRender{}\n",
"\tPlain = plainRender{}\n",
"\tRedirect = redirectRender{}\n",
")\n",
"\n",
"func writeHeader(w http.ResponseWriter, code int, contentType string) {\n",
"\tw.Header().Set(\"Content-Type\", contentType)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tJSON = jsonRender{}\n",
"\tXML = xmlRender{}\n",
"\tPlain = plainRender{}\n",
"\tRedirect = redirectRender{}\n",
"\tHTMLDebug = htmlDebugRender{}\n"
],
"file_path": "render/render.go",
"type": "replace",
"edit_start_line_idx": 34
} | package gin
import (
"bytes"
"errors"
"fmt"
"github.com/gin-gonic/gin/binding"
"github.com/gin-gonic/gin/render"
"github.com/julienschmidt/httprouter"
"log"
"net/http"
)
const (
ErrorTypeInternal = 1 << iota
ErrorTypeExternal = 1 << iota
ErrorTypeAll = 0xffffffff
)
// Used internally to collect errors that occurred during an http request.
type errorMsg struct {
Err string `json:"error"`
Type uint32 `json:"-"`
Meta interface{} `json:"meta"`
}
type errorMsgs []errorMsg
func (a errorMsgs) ByType(typ uint32) errorMsgs {
if len(a) == 0 {
return a
}
result := make(errorMsgs, 0, len(a))
for _, msg := range a {
if msg.Type&typ > 0 {
result = append(result, msg)
}
}
return result
}
func (a errorMsgs) String() string {
if len(a) == 0 {
return ""
}
var buffer bytes.Buffer
for i, msg := range a {
text := fmt.Sprintf("Error #%02d: %s \n Meta: %v\n", (i + 1), msg.Err, msg.Meta)
buffer.WriteString(text)
}
return buffer.String()
}
// Context is the most important part of gin. It allows us to pass variables between middleware,
// manage the flow, validate the JSON of a request and render a JSON response for example.
type Context struct {
writermem responseWriter
Request *http.Request
Writer ResponseWriter
Keys map[string]interface{}
Errors errorMsgs
Params httprouter.Params
Engine *Engine
handlers []HandlerFunc
index int8
}
/************************************/
/********** ROUTES GROUPING *********/
/************************************/
func (engine *Engine) createContext(w http.ResponseWriter, req *http.Request, params httprouter.Params, handlers []HandlerFunc) *Context {
c := engine.cache.Get().(*Context)
c.writermem.reset(w)
c.Request = req
c.Params = params
c.handlers = handlers
c.Keys = nil
c.index = -1
c.Errors = c.Errors[0:0]
return c
}
/************************************/
/****** FLOW AND ERROR MANAGEMENT****/
/************************************/
func (c *Context) Copy() *Context {
var cp Context = *c
cp.index = AbortIndex
cp.handlers = nil
return &cp
}
// Next should be used only in the middlewares.
// It executes the pending handlers in the chain inside the calling handler.
// See example in github.
func (c *Context) Next() {
c.index++
s := int8(len(c.handlers))
for ; c.index < s; c.index++ {
c.handlers[c.index](c)
}
}
// Forces the system to do not continue calling the pending handlers.
// For example, the first handler checks if the request is authorized. If it's not, context.Abort(401) should be called.
// The rest of pending handlers would never be called for that request.
func (c *Context) Abort(code int) {
if code >= 0 {
c.Writer.WriteHeader(code)
}
c.index = AbortIndex
}
// Fail is the same as Abort plus an error message.
// Calling `context.Fail(500, err)` is equivalent to:
// ```
// context.Error("Operation aborted", err)
// context.Abort(500)
// ```
func (c *Context) Fail(code int, err error) {
c.Error(err, "Operation aborted")
c.Abort(code)
}
func (c *Context) ErrorTyped(err error, typ uint32, meta interface{}) {
c.Errors = append(c.Errors, errorMsg{
Err: err.Error(),
Type: typ,
Meta: meta,
})
}
// Attaches an error to the current context. The error is pushed to a list of errors.
// It's a good idea to call Error for each error that occurred during the resolution of a request.
// A middleware can be used to collect all the errors and push them to a database together, print a log, or append it in the HTTP response.
func (c *Context) Error(err error, meta interface{}) {
c.ErrorTyped(err, ErrorTypeExternal, meta)
}
func (c *Context) LastError() error {
s := len(c.Errors)
if s > 0 {
return errors.New(c.Errors[s-1].Err)
} else {
return nil
}
}
/************************************/
/******** METADATA MANAGEMENT********/
/************************************/
// Sets a new pair key/value just for the specified context.
// It also lazy initializes the hashmap.
func (c *Context) Set(key string, item interface{}) {
if c.Keys == nil {
c.Keys = make(map[string]interface{})
}
c.Keys[key] = item
}
// Get returns the value for the given key or an error if the key does not exist.
func (c *Context) Get(key string) (interface{}, error) {
if c.Keys != nil {
item, ok := c.Keys[key]
if ok {
return item, nil
}
}
return nil, errors.New("Key does not exist.")
}
// MustGet returns the value for the given key or panics if the value doesn't exist.
func (c *Context) MustGet(key string) interface{} {
value, err := c.Get(key)
if err != nil || value == nil {
log.Panicf("Key %s doesn't exist", key)
}
return value
}
/************************************/
/******** ENCOGING MANAGEMENT********/
/************************************/
// This function checks the Content-Type to select a binding engine automatically,
// Depending the "Content-Type" header different bindings are used:
// "application/json" --> JSON binding
// "application/xml" --> XML binding
// else --> returns an error
// if Parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. It decodes the json payload into the struct specified as a pointer.Like ParseBody() but this method also writes a 400 error if the json is not valid.
func (c *Context) Bind(obj interface{}) bool {
var b binding.Binding
ctype := filterFlags(c.Request.Header.Get("Content-Type"))
switch {
case c.Request.Method == "GET" || ctype == MIMEPOSTForm:
b = binding.Form
case ctype == MIMEJSON:
b = binding.JSON
case ctype == MIMEXML || ctype == MIMEXML2:
b = binding.XML
default:
c.Fail(400, errors.New("unknown content-type: "+ctype))
return false
}
return c.BindWith(obj, b)
}
func (c *Context) BindWith(obj interface{}, b binding.Binding) bool {
if err := b.Bind(c.Request, obj); err != nil {
c.Fail(400, err)
return false
}
return true
}
func (c *Context) Render(code int, render render.Render, obj ...interface{}) {
if err := render.Render(c.Writer, code, obj...); err != nil {
c.ErrorTyped(err, ErrorTypeInternal, obj)
c.Abort(500)
}
}
// Serializes the given struct as JSON into the response body in a fast and efficient way.
// It also sets the Content-Type as "application/json".
func (c *Context) JSON(code int, obj interface{}) {
c.Render(code, render.JSON, obj)
}
// Serializes the given struct as XML into the response body in a fast and efficient way.
// It also sets the Content-Type as "application/xml".
func (c *Context) XML(code int, obj interface{}) {
c.Render(code, render.XML, obj)
}
// Renders the HTTP template specified by its file name.
// It also updates the HTTP code and sets the Content-Type as "text/html".
// See http://golang.org/doc/articles/wiki/
func (c *Context) HTML(code int, name string, obj interface{}) {
c.Render(code, c.Engine.HTMLRender, name, obj)
}
// Writes the given string into the response body and sets the Content-Type to "text/plain".
func (c *Context) String(code int, format string, values ...interface{}) {
c.Render(code, render.Plain, format, values)
}
// Returns a HTTP redirect to the specific location.
func (c *Context) Redirect(code int, location string) {
if code >= 300 && code <= 308 {
c.Render(code, render.Redirect, location)
} else {
panic(fmt.Sprintf("Cannot send a redirect with status code %d", code))
}
}
// Writes some data into the body stream and updates the HTTP code.
func (c *Context) Data(code int, contentType string, data []byte) {
if len(contentType) > 0 {
c.Writer.Header().Set("Content-Type", contentType)
}
if code >= 0 {
c.Writer.WriteHeader(code)
}
c.Writer.Write(data)
}
// Writes the specified file into the body stream
func (c *Context) File(filepath string) {
http.ServeFile(c.Writer, c.Request, filepath)
}
| context.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.9974392652511597,
0.10001243650913239,
0.00016806120402179658,
0.0009905635379254818,
0.268801212310791
] |
{
"id": 2,
"code_window": [
")\n",
"\n",
"var (\n",
"\tJSON = jsonRender{}\n",
"\tXML = xmlRender{}\n",
"\tPlain = plainRender{}\n",
"\tRedirect = redirectRender{}\n",
")\n",
"\n",
"func writeHeader(w http.ResponseWriter, code int, contentType string) {\n",
"\tw.Header().Set(\"Content-Type\", contentType)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tJSON = jsonRender{}\n",
"\tXML = xmlRender{}\n",
"\tPlain = plainRender{}\n",
"\tRedirect = redirectRender{}\n",
"\tHTMLDebug = htmlDebugRender{}\n"
],
"file_path": "render/render.go",
"type": "replace",
"edit_start_line_idx": 34
} | package gin
import (
"log"
"net/http"
)
type (
ResponseWriter interface {
http.ResponseWriter
Status() int
Written() bool
WriteHeaderNow()
}
responseWriter struct {
http.ResponseWriter
status int
written bool
}
)
func (w *responseWriter) reset(writer http.ResponseWriter) {
w.ResponseWriter = writer
w.status = 200
w.written = false
}
func (w *responseWriter) WriteHeader(code int) {
if code != 0 {
w.status = code
if w.written {
log.Println("[GIN] WARNING. Headers were already written!")
}
}
}
func (w *responseWriter) WriteHeaderNow() {
if !w.written {
w.written = true
w.ResponseWriter.WriteHeader(w.status)
}
}
func (w *responseWriter) Write(data []byte) (n int, err error) {
w.WriteHeaderNow()
return w.ResponseWriter.Write(data)
}
func (w *responseWriter) Status() int {
return w.status
}
func (w *responseWriter) Written() bool {
return w.written
}
| response_writer.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.9936421513557434,
0.4867015779018402,
0.0007537395576946437,
0.47640344500541687,
0.48101216554641724
] |
{
"id": 2,
"code_window": [
")\n",
"\n",
"var (\n",
"\tJSON = jsonRender{}\n",
"\tXML = xmlRender{}\n",
"\tPlain = plainRender{}\n",
"\tRedirect = redirectRender{}\n",
")\n",
"\n",
"func writeHeader(w http.ResponseWriter, code int, contentType string) {\n",
"\tw.Header().Set(\"Content-Type\", contentType)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tJSON = jsonRender{}\n",
"\tXML = xmlRender{}\n",
"\tPlain = plainRender{}\n",
"\tRedirect = redirectRender{}\n",
"\tHTMLDebug = htmlDebugRender{}\n"
],
"file_path": "render/render.go",
"type": "replace",
"edit_start_line_idx": 34
} | package gin
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"net/http"
"runtime"
)
var (
dunno = []byte("???")
centerDot = []byte("·")
dot = []byte(".")
slash = []byte("/")
)
// stack returns a nicely formated stack frame, skipping skip frames
func stack(skip int) []byte {
buf := new(bytes.Buffer) // the returned data
// As we loop, we open files and read them. These variables record the currently
// loaded file.
var lines [][]byte
var lastFile string
for i := skip; ; i++ { // Skip the expected number of frames
pc, file, line, ok := runtime.Caller(i)
if !ok {
break
}
// Print this much at least. If we can't find the source, it won't show.
fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
if file != lastFile {
data, err := ioutil.ReadFile(file)
if err != nil {
continue
}
lines = bytes.Split(data, []byte{'\n'})
lastFile = file
}
fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
}
return buf.Bytes()
}
// source returns a space-trimmed slice of the n'th line.
func source(lines [][]byte, n int) []byte {
n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
if n < 0 || n >= len(lines) {
return dunno
}
return bytes.TrimSpace(lines[n])
}
// function returns, if possible, the name of the function containing the PC.
func function(pc uintptr) []byte {
fn := runtime.FuncForPC(pc)
if fn == nil {
return dunno
}
name := []byte(fn.Name())
// The name includes the path name to the package, which is unnecessary
// since the file name is already included. Plus, it has center dots.
// That is, we see
// runtime/debug.*T·ptrmethod
// and want
// *T.ptrmethod
// Also the package path might contains dot (e.g. code.google.com/...),
// so first eliminate the path prefix
if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
name = name[lastslash+1:]
}
if period := bytes.Index(name, dot); period >= 0 {
name = name[period+1:]
}
name = bytes.Replace(name, centerDot, dot, -1)
return name
}
// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
// While Martini is in development mode, Recovery will also output the panic as HTML.
func Recovery() HandlerFunc {
return func(c *Context) {
defer func() {
if err := recover(); err != nil {
stack := stack(3)
log.Printf("PANIC: %s\n%s", err, stack)
c.Writer.WriteHeader(http.StatusInternalServerError)
}
}()
c.Next()
}
}
| recovery.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.995525062084198,
0.09973541647195816,
0.00016725659952498972,
0.00017301333718933165,
0.2985965609550476
] |
{
"id": 3,
"code_window": [
"\twriteHeader(w, code, \"application/xml\")\n",
"\tencoder := xml.NewEncoder(w)\n",
"\treturn encoder.Encode(data[0])\n",
"}\n",
"\n",
"func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn html.Template.ExecuteTemplate(w, file, obj)\n",
"}\n",
"\n",
"func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/plain\")\n",
"\tformat := data[0].(string)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "render/render.go",
"type": "replace",
"edit_start_line_idx": 63
} | package render
import (
"encoding/json"
"encoding/xml"
"fmt"
"html/template"
"net/http"
)
type (
Render interface {
Render(http.ResponseWriter, int, ...interface{}) error
}
// JSON binding
jsonRender struct{}
// XML binding
xmlRender struct{}
// Plain text
plainRender struct{}
// Redirects
redirectRender struct{}
// form binding
HTMLRender struct {
Template *template.Template
}
)
var (
JSON = jsonRender{}
XML = xmlRender{}
Plain = plainRender{}
Redirect = redirectRender{}
)
func writeHeader(w http.ResponseWriter, code int, contentType string) {
w.Header().Set("Content-Type", contentType)
w.WriteHeader(code)
}
func (_ jsonRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/json")
encoder := json.NewEncoder(w)
return encoder.Encode(data[0])
}
func (_ redirectRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
w.Header().Set("Location", data[0].(string))
w.WriteHeader(code)
return nil
}
func (_ xmlRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/xml")
encoder := xml.NewEncoder(w)
return encoder.Encode(data[0])
}
func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/html")
file := data[0].(string)
obj := data[1]
return html.Template.ExecuteTemplate(w, file, obj)
}
func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/plain")
format := data[0].(string)
args := data[1].([]interface{})
var err error
if len(args) > 0 {
_, err = w.Write([]byte(fmt.Sprintf(format, args...)))
} else {
_, err = w.Write([]byte(format))
}
return err
}
| render/render.go | 1 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.9968480467796326,
0.44247114658355713,
0.00016668661555740982,
0.0542050339281559,
0.48143473267555237
] |
{
"id": 3,
"code_window": [
"\twriteHeader(w, code, \"application/xml\")\n",
"\tencoder := xml.NewEncoder(w)\n",
"\treturn encoder.Encode(data[0])\n",
"}\n",
"\n",
"func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn html.Template.ExecuteTemplate(w, file, obj)\n",
"}\n",
"\n",
"func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/plain\")\n",
"\tformat := data[0].(string)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "render/render.go",
"type": "replace",
"edit_start_line_idx": 63
} | package hello
import (
"net/http"
"github.com/gin-gonic/gin"
)
// This function's name is a must. App Engine uses it to drive the requests properly.
func init() {
// Starts a new Gin instance with no middle-ware
r := gin.New()
// Define your handlers
r.GET("/", func(c *gin.Context){
c.String(200, "Hello World!")
})
r.GET("/ping", func(c *gin.Context){
c.String(200, "pong")
})
// Handle all requests using net/http
http.Handle("/", r)
} | examples/app-engine/hello.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.00016713289369363338,
0.00016401194443460554,
0.0001622135314391926,
0.00016268939361907542,
0.0000022153819827508414
] |
{
"id": 3,
"code_window": [
"\twriteHeader(w, code, \"application/xml\")\n",
"\tencoder := xml.NewEncoder(w)\n",
"\treturn encoder.Encode(data[0])\n",
"}\n",
"\n",
"func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn html.Template.ExecuteTemplate(w, file, obj)\n",
"}\n",
"\n",
"func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/plain\")\n",
"\tformat := data[0].(string)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "render/render.go",
"type": "replace",
"edit_start_line_idx": 63
} | language: go
go:
- 1.3
- tip
| .travis.yml | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.00016687746392562985,
0.00016687746392562985,
0.00016687746392562985,
0.00016687746392562985,
0
] |
{
"id": 3,
"code_window": [
"\twriteHeader(w, code, \"application/xml\")\n",
"\tencoder := xml.NewEncoder(w)\n",
"\treturn encoder.Encode(data[0])\n",
"}\n",
"\n",
"func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn html.Template.ExecuteTemplate(w, file, obj)\n",
"}\n",
"\n",
"func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/plain\")\n",
"\tformat := data[0].(string)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "render/render.go",
"type": "replace",
"edit_start_line_idx": 63
} | List of all the awesome people working to make Gin the best Web Framework in Go!
##gin 0.x series authors
**Lead Developer:** Manu Martinez-Almeida (@manucorporat)
**Staff:**
Javier Provecho (@javierprovecho)
People and companies, who have contributed, in alphabetical order.
**@adammck (Adam Mckaig)**
- Add MIT license
**@AlexanderChen1989 (Alexander)**
- Typos in README
**@alexandernyquist (Alexander Nyquist)**
- Using template.Must to fix multiple return issue
- ★ Added support for OPTIONS verb
- ★ Setting response headers before calling WriteHeader
- Improved documentation for model binding
- ★ Added Content.Redirect()
- ★ Added tons of Unit tests
**@austinheap (Austin Heap)**
- Added travis CI integration
**@bluele (Jun Kimura)**
- Fixes code examples in README
**@chad-russell**
- ★ Support for serializing gin.H into XML
**@dickeyxxx (Jeff Dickey)**
- Typos in README
**@fmd (Fareed Dudhia)**
- Fix typo. SetHTTPTemplate -> SetHTMLTemplate
**@jasonrhansen**
- Fix spelling and grammar errors in documentation
**@julienschmidt (Julien Schmidt)**
- gofmt the code examples
**@kyledinh (Kyle Dinh)**
- Adds RunTLS()
**@LinusU (Linus Unnebäck)**
- Small fixes in README
**@lucas-clemente (Lucas Clemente)**
- ★ work around path.Join removing trailing slashes from routes
**@mdigger (Dmitry Sedykh)**
- Fixes Form binding when content-type is x-www-form-urlencoded
- No repeat call c.Writer.Status() in gin.Logger
- Fixes Content-Type for json render
**@mopemope (Yutaka Matsubara)**
- ★ Adds Godep support (Dependencies Manager)
- Fix variadic parameter in the flexible render API
- Fix Corrupted plain render
**@msemenistyi (Mykyta Semenistyi)**
- update Readme.md. Add code to String method
**@msoedov (Sasha Myasoedov)**
- ★ Adds tons of unit tests.
**@ngerakines (Nick Gerakines)**
- ★ Improves API, c.GET() doesn't panic
- Adds MustGet() method
**@r8k (Rajiv Kilaparti)**
- Fix Port usage in README.
**@silasb (Silas Baronda)**
- Fixing quotes in README
**@SkuliOskarsson (Skuli Oskarsson)**
- Fixes some texts in README II | AUTHORS.md | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.00026034473557956517,
0.00017760613991413265,
0.0001652422797633335,
0.00016684118600096554,
0.00002716138624236919
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\treturn err\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func (_ htmlDebugRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn template.New(file).Execute(w, obj)\n",
"}\n",
"\n",
"func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn html.Template.ExecuteTemplate(w, file, obj)\n",
"}"
],
"file_path": "render/render.go",
"type": "add",
"edit_start_line_idx": 82
} | package render
import (
"encoding/json"
"encoding/xml"
"fmt"
"html/template"
"net/http"
)
type (
Render interface {
Render(http.ResponseWriter, int, ...interface{}) error
}
// JSON binding
jsonRender struct{}
// XML binding
xmlRender struct{}
// Plain text
plainRender struct{}
// Redirects
redirectRender struct{}
// form binding
HTMLRender struct {
Template *template.Template
}
)
var (
JSON = jsonRender{}
XML = xmlRender{}
Plain = plainRender{}
Redirect = redirectRender{}
)
func writeHeader(w http.ResponseWriter, code int, contentType string) {
w.Header().Set("Content-Type", contentType)
w.WriteHeader(code)
}
func (_ jsonRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/json")
encoder := json.NewEncoder(w)
return encoder.Encode(data[0])
}
func (_ redirectRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
w.Header().Set("Location", data[0].(string))
w.WriteHeader(code)
return nil
}
func (_ xmlRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "application/xml")
encoder := xml.NewEncoder(w)
return encoder.Encode(data[0])
}
func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/html")
file := data[0].(string)
obj := data[1]
return html.Template.ExecuteTemplate(w, file, obj)
}
func (_ plainRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {
writeHeader(w, code, "text/plain")
format := data[0].(string)
args := data[1].([]interface{})
var err error
if len(args) > 0 {
_, err = w.Write([]byte(fmt.Sprintf(format, args...)))
} else {
_, err = w.Write([]byte(format))
}
return err
}
| render/render.go | 1 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.31885871291160583,
0.038719162344932556,
0.00016966018301900476,
0.000733608496375382,
0.09932562708854675
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\treturn err\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func (_ htmlDebugRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn template.New(file).Execute(w, obj)\n",
"}\n",
"\n",
"func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn html.Template.ExecuteTemplate(w, file, obj)\n",
"}"
],
"file_path": "render/render.go",
"type": "add",
"edit_start_line_idx": 82
} | package gin
import (
"bytes"
"errors"
"fmt"
"github.com/gin-gonic/gin/binding"
"github.com/gin-gonic/gin/render"
"github.com/julienschmidt/httprouter"
"log"
"net/http"
)
const (
ErrorTypeInternal = 1 << iota
ErrorTypeExternal = 1 << iota
ErrorTypeAll = 0xffffffff
)
// Used internally to collect errors that occurred during an http request.
type errorMsg struct {
Err string `json:"error"`
Type uint32 `json:"-"`
Meta interface{} `json:"meta"`
}
type errorMsgs []errorMsg
func (a errorMsgs) ByType(typ uint32) errorMsgs {
if len(a) == 0 {
return a
}
result := make(errorMsgs, 0, len(a))
for _, msg := range a {
if msg.Type&typ > 0 {
result = append(result, msg)
}
}
return result
}
func (a errorMsgs) String() string {
if len(a) == 0 {
return ""
}
var buffer bytes.Buffer
for i, msg := range a {
text := fmt.Sprintf("Error #%02d: %s \n Meta: %v\n", (i + 1), msg.Err, msg.Meta)
buffer.WriteString(text)
}
return buffer.String()
}
// Context is the most important part of gin. It allows us to pass variables between middleware,
// manage the flow, validate the JSON of a request and render a JSON response for example.
type Context struct {
writermem responseWriter
Request *http.Request
Writer ResponseWriter
Keys map[string]interface{}
Errors errorMsgs
Params httprouter.Params
Engine *Engine
handlers []HandlerFunc
index int8
}
/************************************/
/********** ROUTES GROUPING *********/
/************************************/
func (engine *Engine) createContext(w http.ResponseWriter, req *http.Request, params httprouter.Params, handlers []HandlerFunc) *Context {
c := engine.cache.Get().(*Context)
c.writermem.reset(w)
c.Request = req
c.Params = params
c.handlers = handlers
c.Keys = nil
c.index = -1
c.Errors = c.Errors[0:0]
return c
}
/************************************/
/****** FLOW AND ERROR MANAGEMENT****/
/************************************/
func (c *Context) Copy() *Context {
var cp Context = *c
cp.index = AbortIndex
cp.handlers = nil
return &cp
}
// Next should be used only in the middlewares.
// It executes the pending handlers in the chain inside the calling handler.
// See example in github.
func (c *Context) Next() {
c.index++
s := int8(len(c.handlers))
for ; c.index < s; c.index++ {
c.handlers[c.index](c)
}
}
// Forces the system to do not continue calling the pending handlers.
// For example, the first handler checks if the request is authorized. If it's not, context.Abort(401) should be called.
// The rest of pending handlers would never be called for that request.
func (c *Context) Abort(code int) {
if code >= 0 {
c.Writer.WriteHeader(code)
}
c.index = AbortIndex
}
// Fail is the same as Abort plus an error message.
// Calling `context.Fail(500, err)` is equivalent to:
// ```
// context.Error("Operation aborted", err)
// context.Abort(500)
// ```
func (c *Context) Fail(code int, err error) {
c.Error(err, "Operation aborted")
c.Abort(code)
}
func (c *Context) ErrorTyped(err error, typ uint32, meta interface{}) {
c.Errors = append(c.Errors, errorMsg{
Err: err.Error(),
Type: typ,
Meta: meta,
})
}
// Attaches an error to the current context. The error is pushed to a list of errors.
// It's a good idea to call Error for each error that occurred during the resolution of a request.
// A middleware can be used to collect all the errors and push them to a database together, print a log, or append it in the HTTP response.
func (c *Context) Error(err error, meta interface{}) {
c.ErrorTyped(err, ErrorTypeExternal, meta)
}
func (c *Context) LastError() error {
s := len(c.Errors)
if s > 0 {
return errors.New(c.Errors[s-1].Err)
} else {
return nil
}
}
/************************************/
/******** METADATA MANAGEMENT********/
/************************************/
// Sets a new pair key/value just for the specified context.
// It also lazy initializes the hashmap.
func (c *Context) Set(key string, item interface{}) {
if c.Keys == nil {
c.Keys = make(map[string]interface{})
}
c.Keys[key] = item
}
// Get returns the value for the given key or an error if the key does not exist.
func (c *Context) Get(key string) (interface{}, error) {
if c.Keys != nil {
item, ok := c.Keys[key]
if ok {
return item, nil
}
}
return nil, errors.New("Key does not exist.")
}
// MustGet returns the value for the given key or panics if the value doesn't exist.
func (c *Context) MustGet(key string) interface{} {
value, err := c.Get(key)
if err != nil || value == nil {
log.Panicf("Key %s doesn't exist", key)
}
return value
}
/************************************/
/******** ENCOGING MANAGEMENT********/
/************************************/
// This function checks the Content-Type to select a binding engine automatically,
// Depending the "Content-Type" header different bindings are used:
// "application/json" --> JSON binding
// "application/xml" --> XML binding
// else --> returns an error
// if Parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. It decodes the json payload into the struct specified as a pointer.Like ParseBody() but this method also writes a 400 error if the json is not valid.
func (c *Context) Bind(obj interface{}) bool {
var b binding.Binding
ctype := filterFlags(c.Request.Header.Get("Content-Type"))
switch {
case c.Request.Method == "GET" || ctype == MIMEPOSTForm:
b = binding.Form
case ctype == MIMEJSON:
b = binding.JSON
case ctype == MIMEXML || ctype == MIMEXML2:
b = binding.XML
default:
c.Fail(400, errors.New("unknown content-type: "+ctype))
return false
}
return c.BindWith(obj, b)
}
func (c *Context) BindWith(obj interface{}, b binding.Binding) bool {
if err := b.Bind(c.Request, obj); err != nil {
c.Fail(400, err)
return false
}
return true
}
func (c *Context) Render(code int, render render.Render, obj ...interface{}) {
if err := render.Render(c.Writer, code, obj...); err != nil {
c.ErrorTyped(err, ErrorTypeInternal, obj)
c.Abort(500)
}
}
// Serializes the given struct as JSON into the response body in a fast and efficient way.
// It also sets the Content-Type as "application/json".
func (c *Context) JSON(code int, obj interface{}) {
c.Render(code, render.JSON, obj)
}
// Serializes the given struct as XML into the response body in a fast and efficient way.
// It also sets the Content-Type as "application/xml".
func (c *Context) XML(code int, obj interface{}) {
c.Render(code, render.XML, obj)
}
// Renders the HTTP template specified by its file name.
// It also updates the HTTP code and sets the Content-Type as "text/html".
// See http://golang.org/doc/articles/wiki/
func (c *Context) HTML(code int, name string, obj interface{}) {
c.Render(code, c.Engine.HTMLRender, name, obj)
}
// Writes the given string into the response body and sets the Content-Type to "text/plain".
func (c *Context) String(code int, format string, values ...interface{}) {
c.Render(code, render.Plain, format, values)
}
// Returns a HTTP redirect to the specific location.
func (c *Context) Redirect(code int, location string) {
if code >= 300 && code <= 308 {
c.Render(code, render.Redirect, location)
} else {
panic(fmt.Sprintf("Cannot send a redirect with status code %d", code))
}
}
// Writes some data into the body stream and updates the HTTP code.
func (c *Context) Data(code int, contentType string, data []byte) {
if len(contentType) > 0 {
c.Writer.Header().Set("Content-Type", contentType)
}
if code >= 0 {
c.Writer.WriteHeader(code)
}
c.Writer.Write(data)
}
// Writes the specified file into the body stream
func (c *Context) File(filepath string) {
http.ServeFile(c.Writer, c.Request, filepath)
}
| context.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.025932980701327324,
0.0030900926794856787,
0.0001648889301577583,
0.00028073813882656395,
0.006222571711987257
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\treturn err\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func (_ htmlDebugRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn template.New(file).Execute(w, obj)\n",
"}\n",
"\n",
"func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn html.Template.ExecuteTemplate(w, file, obj)\n",
"}"
],
"file_path": "render/render.go",
"type": "add",
"edit_start_line_idx": 82
} | package gin
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"strings"
"testing"
)
func PerformRequest(r http.Handler, method, path string) *httptest.ResponseRecorder {
req, _ := http.NewRequest(method, path, nil)
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
return w
}
// TestSingleRouteOK tests that POST route is correctly invoked.
func testRouteOK(method string, t *testing.T) {
// SETUP
passed := false
r := New()
r.Handle(method, "/test", []HandlerFunc{func(c *Context) {
passed = true
}})
// RUN
w := PerformRequest(r, method, "/test")
// TEST
if passed == false {
t.Errorf(method + " route handler was not invoked.")
}
if w.Code != http.StatusOK {
t.Errorf("Status code should be %v, was %d", http.StatusOK, w.Code)
}
}
func TestRouterGroupRouteOK(t *testing.T) {
testRouteOK("POST", t)
testRouteOK("DELETE", t)
testRouteOK("PATCH", t)
testRouteOK("PUT", t)
testRouteOK("OPTIONS", t)
testRouteOK("HEAD", t)
}
// TestSingleRouteOK tests that POST route is correctly invoked.
func testRouteNotOK(method string, t *testing.T) {
// SETUP
passed := false
r := New()
r.Handle(method, "/test_2", []HandlerFunc{func(c *Context) {
passed = true
}})
// RUN
w := PerformRequest(r, method, "/test")
// TEST
if passed == true {
t.Errorf(method + " route handler was invoked, when it should not")
}
if w.Code != http.StatusNotFound {
// If this fails, it's because httprouter needs to be updated to at least f78f58a0db
t.Errorf("Status code should be %v, was %d. Location: %s", http.StatusNotFound, w.Code, w.HeaderMap.Get("Location"))
}
}
// TestSingleRouteOK tests that POST route is correctly invoked.
func TestRouteNotOK(t *testing.T) {
testRouteNotOK("POST", t)
testRouteNotOK("DELETE", t)
testRouteNotOK("PATCH", t)
testRouteNotOK("PUT", t)
testRouteNotOK("OPTIONS", t)
testRouteNotOK("HEAD", t)
}
// TestSingleRouteOK tests that POST route is correctly invoked.
func testRouteNotOK2(method string, t *testing.T) {
// SETUP
passed := false
r := New()
var methodRoute string
if method == "POST" {
methodRoute = "GET"
} else {
methodRoute = "POST"
}
r.Handle(methodRoute, "/test", []HandlerFunc{func(c *Context) {
passed = true
}})
// RUN
w := PerformRequest(r, method, "/test")
// TEST
if passed == true {
t.Errorf(method + " route handler was invoked, when it should not")
}
if w.Code != http.StatusNotFound {
// If this fails, it's because httprouter needs to be updated to at least f78f58a0db
t.Errorf("Status code should be %v, was %d. Location: %s", http.StatusNotFound, w.Code, w.HeaderMap.Get("Location"))
}
}
// TestSingleRouteOK tests that POST route is correctly invoked.
func TestRouteNotOK2(t *testing.T) {
testRouteNotOK2("POST", t)
testRouteNotOK2("DELETE", t)
testRouteNotOK2("PATCH", t)
testRouteNotOK2("PUT", t)
testRouteNotOK2("OPTIONS", t)
testRouteNotOK2("HEAD", t)
}
// TestHandleStaticFile - ensure the static file handles properly
func TestHandleStaticFile(t *testing.T) {
// SETUP file
testRoot, _ := os.Getwd()
f, err := ioutil.TempFile(testRoot, "")
if err != nil {
t.Error(err)
}
defer os.Remove(f.Name())
filePath := path.Join("/", path.Base(f.Name()))
f.WriteString("Gin Web Framework")
f.Close()
// SETUP gin
r := New()
r.Static("./", testRoot)
// RUN
w := PerformRequest(r, "GET", filePath)
// TEST
if w.Code != 200 {
t.Errorf("Response code should be Ok, was: %s", w.Code)
}
if w.Body.String() != "Gin Web Framework" {
t.Errorf("Response should be test, was: %s", w.Body.String())
}
if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" {
t.Errorf("Content-Type should be text/plain, was %s", w.HeaderMap.Get("Content-Type"))
}
}
// TestHandleStaticDir - ensure the root/sub dir handles properly
func TestHandleStaticDir(t *testing.T) {
// SETUP
r := New()
r.Static("/", "./")
// RUN
w := PerformRequest(r, "GET", "/")
// TEST
bodyAsString := w.Body.String()
if w.Code != 200 {
t.Errorf("Response code should be Ok, was: %s", w.Code)
}
if len(bodyAsString) == 0 {
t.Errorf("Got empty body instead of file tree")
}
if !strings.Contains(bodyAsString, "gin.go") {
t.Errorf("Can't find:`gin.go` in file tree: %s", bodyAsString)
}
if w.HeaderMap.Get("Content-Type") != "text/html; charset=utf-8" {
t.Errorf("Content-Type should be text/plain, was %s", w.HeaderMap.Get("Content-Type"))
}
}
// TestHandleHeadToDir - ensure the root/sub dir handles properly
func TestHandleHeadToDir(t *testing.T) {
// SETUP
r := New()
r.Static("/", "./")
// RUN
w := PerformRequest(r, "HEAD", "/")
// TEST
bodyAsString := w.Body.String()
if w.Code != 200 {
t.Errorf("Response code should be Ok, was: %s", w.Code)
}
if len(bodyAsString) == 0 {
t.Errorf("Got empty body instead of file tree")
}
if !strings.Contains(bodyAsString, "gin.go") {
t.Errorf("Can't find:`gin.go` in file tree: %s", bodyAsString)
}
if w.HeaderMap.Get("Content-Type") != "text/html; charset=utf-8" {
t.Errorf("Content-Type should be text/plain, was %s", w.HeaderMap.Get("Content-Type"))
}
}
| gin_test.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.003315326990559697,
0.00034628662979230285,
0.0001646524906391278,
0.00016985335969366133,
0.0006824463489465415
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\treturn err\n",
"}\n"
],
"labels": [
"keep",
"keep",
"add"
],
"after_edit": [
"\n",
"func (_ htmlDebugRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn template.New(file).Execute(w, obj)\n",
"}\n",
"\n",
"func (html HTMLRender) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n",
"\twriteHeader(w, code, \"text/html\")\n",
"\tfile := data[0].(string)\n",
"\tobj := data[1]\n",
"\treturn html.Template.ExecuteTemplate(w, file, obj)\n",
"}"
],
"file_path": "render/render.go",
"type": "add",
"edit_start_line_idx": 82
} | package gin
import (
"crypto/subtle"
"encoding/base64"
"errors"
"sort"
)
const (
AuthUserKey = "user"
)
type (
BasicAuthPair struct {
Code string
User string
}
Accounts map[string]string
Pairs []BasicAuthPair
)
func (a Pairs) Len() int { return len(a) }
func (a Pairs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Pairs) Less(i, j int) bool { return a[i].Code < a[j].Code }
func processCredentials(accounts Accounts) (Pairs, error) {
if len(accounts) == 0 {
return nil, errors.New("Empty list of authorized credentials.")
}
pairs := make(Pairs, 0, len(accounts))
for user, password := range accounts {
if len(user) == 0 || len(password) == 0 {
return nil, errors.New("User or password is empty")
}
base := user + ":" + password
code := "Basic " + base64.StdEncoding.EncodeToString([]byte(base))
pairs = append(pairs, BasicAuthPair{code, user})
}
// We have to sort the credentials in order to use bsearch later.
sort.Sort(pairs)
return pairs, nil
}
func secureCompare(given, actual string) bool {
if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 {
return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1
} else {
/* Securely compare actual to itself to keep constant time, but always return false */
return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false
}
}
func searchCredential(pairs Pairs, auth string) string {
if len(auth) == 0 {
return ""
}
// Search user in the slice of allowed credentials
r := sort.Search(len(pairs), func(i int) bool { return pairs[i].Code >= auth })
if r < len(pairs) && secureCompare(pairs[r].Code, auth) {
return pairs[r].User
} else {
return ""
}
}
// Implements a basic Basic HTTP Authorization. It takes as argument a map[string]string where
// the key is the user name and the value is the password.
func BasicAuth(accounts Accounts) HandlerFunc {
pairs, err := processCredentials(accounts)
if err != nil {
panic(err)
}
return func(c *Context) {
// Search user in the slice of allowed credentials
user := searchCredential(pairs, c.Request.Header.Get("Authorization"))
if len(user) == 0 {
// Credentials doesn't match, we return 401 Unauthorized and abort request.
c.Writer.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
c.Fail(401, errors.New("Unauthorized"))
} else {
// user is allowed, set UserId to key "user" in this context, the userId can be read later using
// c.Get(gin.AuthUserKey)
c.Set(AuthUserKey, user)
}
}
}
| auth.go | 0 | https://github.com/gin-gonic/gin/commit/378610b3b215bc8d07c4c4b6dc42c59a8fe20b1a | [
0.05329414829611778,
0.006723156198859215,
0.00016717193648219109,
0.00030887883622199297,
0.01654873602092266
] |
{
"id": 2,
"code_window": [
"\tif err != nil {\n",
"\t\treturn auth.Credentials{}, err\n",
"\t}\n",
"\tcred.ParentUser = parentUser\n",
"\tcred.Groups = groups\n",
"\tcred.Status = string(madmin.AccountEnabled)\n",
"\n",
"\tu := newUserIdentity(cred)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcred.Status = string(auth.AccountOn)\n"
],
"file_path": "cmd/iam.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"path"
"sort"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
var cred auth.Credentials
var adminAPIErr APIErrorCode
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil, cred
}
// Validate request signature.
cred, adminAPIErr = checkAdminRequestAuth(ctx, r, action, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil, cred
}
return objectAPI, cred
}
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveUser")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
ok, _, err := globalIAMSys.IsTempUser(accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if ok {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
return
}
if err := globalIAMSys.DeleteUser(accessKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to delete user.
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// ListUsers - GET /minio/admin/v3/list-users
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListUsers")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
if objectAPI == nil {
return
}
password := cred.SecretKey
allCredentials, err := globalIAMSys.ListUsers()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(allCredentials)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// GetUserInfo - GET /minio/admin/v3/user-info
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetUserInfo")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
name := vars["accessKey"]
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
accessKey := cred.AccessKey
if cred.ParentUser != "" {
accessKey = cred.ParentUser
}
implicitPerm := name == accessKey
if !implicitPerm {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Groups: cred.Groups,
Action: iampolicy.GetUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
userInfo, err := globalIAMSys.GetUserInfo(name)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(userInfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, data)
}
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateGroupMembers")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
if objectAPI == nil {
return
}
defer r.Body.Close()
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
var updReq madmin.GroupAddRemove
err = json.Unmarshal(data, &updReq)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if updReq.IsRemove {
err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members)
} else {
err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members)
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to load group.
for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// GetGroup - /minio/admin/v3/group?group=mygroup1
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetGroup")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
group := vars["group"]
gdesc, err := globalIAMSys.GetGroupDescription(group)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(gdesc)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// ListGroups - GET /minio/admin/v3/groups
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListGroups")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
if objectAPI == nil {
return
}
groups, err := globalIAMSys.ListGroups()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(groups)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetGroupStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
group := vars["group"]
status := vars["status"]
var err error
if status == statusEnabled {
err = globalIAMSys.SetGroupStatus(group, true)
} else if status == statusDisabled {
err = globalIAMSys.SetGroupStatus(group, false)
} else {
err = errInvalidArgument
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload user.
for _, nerr := range globalNotificationSys.LoadGroup(group) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetUserStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
status := vars["status"]
// This API is not allowed to lookup accessKey user status
if accessKey == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if err := globalIAMSys.SetUserStatus(accessKey, madmin.AccountStatus(status)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload user.
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddUser")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
accessKey := path.Clean(vars["accessKey"])
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Not allowed to add a user with same access key as root credential
if owner && accessKey == cred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
// Incoming access key matches parent user then we should
// reject password change requests.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
implicitPerm := accessKey == cred.AccessKey
if !implicitPerm {
parentUser := cred.ParentUser
if parentUser == "" {
parentUser = cred.AccessKey
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: parentUser,
Groups: cred.Groups,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", parentUser, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Groups: cred.Groups,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
Claims: claims,
DenyOnly: true, // check if changing password is explicitly denied.
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
var uinfo madmin.UserInfo
if err = json.Unmarshal(configBytes, &uinfo); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
if err = globalIAMSys.CreateUser(accessKey, uinfo); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var createReq madmin.AddServiceAccountReq
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
// Disallow creating service accounts for root user.
if createReq.TargetUser == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
var (
targetUser string
targetGroups []string
)
targetUser = createReq.TargetUser
// Need permission if we are creating a service acccount
// for a user <> to the request sender
if targetUser != "" && targetUser != cred.AccessKey {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.CreateServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
if globalLDAPConfig.Enabled && targetUser != "" {
// If LDAP enabled, service accounts need
// to be created only for LDAP users.
var err error
_, targetGroups, err = globalLDAPConfig.LookupUserDN(targetUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
} else {
if targetUser == "" {
targetUser = cred.AccessKey
}
if cred.ParentUser != "" {
targetUser = cred.ParentUser
}
targetGroups = cred.Groups
}
opts := newServiceAccountOpts{sessionPolicy: createReq.Policy, accessKey: createReq.AccessKey, secretKey: createReq.SecretKey}
newCred, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user the service account
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
var createResp = madmin.AddServiceAccountResp{
Credentials: auth.Credentials{
AccessKey: newCred.AccessKey,
SecretKey: newCred.SecretKey,
},
}
data, err := json.Marshal(createResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// UpdateServiceAccount - POST /minio/admin/v3/update-service-account
func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
accessKey := mux.Vars(r)["accessKey"]
if accessKey == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
// Disallow editing service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.UpdateServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
requestUser := cred.AccessKey
if cred.ParentUser != "" {
requestUser = cred.ParentUser
}
if requestUser != svcAccount.ParentUser {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var updateReq madmin.UpdateServiceAccountReq
if err = json.Unmarshal(reqBytes, &updateReq); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
opts := updateServiceAccountOpts{sessionPolicy: updateReq.NewPolicy, secretKey: updateReq.NewSecretKey, status: updateReq.NewStatus}
err = globalIAMSys.UpdateServiceAccount(ctx, accessKey, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user the service account
for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
writeSuccessNoContent(w)
}
// InfoServiceAccount - GET /minio/admin/v3/info-service-account
func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
accessKey := mux.Vars(r)["accessKey"]
if accessKey == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
svcAccount, policy, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
requestUser := cred.AccessKey
if cred.ParentUser != "" {
requestUser = cred.ParentUser
}
if requestUser != svcAccount.ParentUser {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
var svcAccountPolicy iampolicy.Policy
impliedPolicy := policy == nil
// If policy is empty, check for policy of the parent user
if !impliedPolicy {
svcAccountPolicy.Merge(*policy)
} else {
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.AccessKey, false)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
svcAccountPolicy.Merge(globalIAMSys.GetCombinedPolicy(policiesNames...))
}
policyJSON, err := json.Marshal(svcAccountPolicy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var infoResp = madmin.InfoServiceAccountResp{
ParentUser: svcAccount.ParentUser,
AccountStatus: svcAccount.Status,
ImpliedPolicy: impliedPolicy,
Policy: string(policyJSON),
}
data, err := json.Marshal(infoResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListServiceAccounts")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
var targetAccount string
user := r.URL.Query().Get("user")
if user != "" {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
targetAccount = user
} else {
targetAccount = cred.AccessKey
if cred.ParentUser != "" {
targetAccount = cred.ParentUser
}
}
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, targetAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var serviceAccountsNames []string
for _, svc := range serviceAccounts {
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
}
var listResp = madmin.ListServiceAccountsResp{
Accounts: serviceAccountsNames,
}
data, err := json.Marshal(listResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
serviceAccount := mux.Vars(r)["accessKey"]
if serviceAccount == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
return
}
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
adminPrivilege := globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.RemoveServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
})
if !adminPrivilege {
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
if parentUser != svcAccount.ParentUser {
// The service account belongs to another user but return not
// found error to mitigate brute force attacks. or the
// serviceAccount doesn't exist.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminServiceAccountNotFound), r.URL)
return
}
}
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessNoContent(w)
}
// AccountInfoHandler returns usage
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AccountInfo")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
isAllowedAccess := func(bucketName string) (rd, wr bool) {
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
rd = true
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
wr = true
}
return rd, wr
}
// Load the latest calculated data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
// log the error, continue with the accounting response
logger.LogIf(ctx, err)
}
// If etcd, dns federation configured list buckets from etcd.
var buckets []BucketInfo
if globalDNSConfig != nil && globalBucketFederation {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && !IsErrIgnored(err,
dns.ErrNoEntriesFound,
dns.ErrDomainMissing) {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for _, dnsRecords := range dnsBuckets {
buckets = append(buckets, BucketInfo{
Name: dnsRecords[0].Key,
Created: dnsRecords[0].CreationDate,
})
}
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Name < buckets[j].Name
})
} else {
buckets, err = objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
accountName := cred.AccessKey
var policies []string
switch globalIAMSys.usersSysType {
case MinIOUsersSysType:
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
case LDAPUsersSysType:
parentUser := accountName
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
default:
err = errors.New("should not happen!")
}
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
acctInfo := madmin.AccountInfo{
AccountName: accountName,
Policy: globalIAMSys.GetCombinedPolicy(policies...),
}
for _, bucket := range buckets {
rd, wr := isAllowedAccess(bucket.Name)
if rd || wr {
var size uint64
// Fetch the data usage of the current bucket
if !dataUsageInfo.LastUpdate.IsZero() {
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
}
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketAccessInfo{
Name: bucket.Name,
Created: bucket.Created,
Size: size,
Access: madmin.AccountAccess{
Read: rd,
Write: wr,
},
})
}
}
usageInfoJSON, err := json.Marshal(acctInfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, usageInfoJSON)
}
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicyV2")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.Write(data)
w.(http.Flusher).Flush()
}
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = json.NewEncoder(w).Encode(policy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPoliciesV2")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
policies, err := globalIAMSys.ListPolicies()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
policyMap := make(map[string][]byte, len(policies))
for k, p := range policies {
var err error
policyMap[k], err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
}
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPolicies")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
policies, err := globalIAMSys.ListPolicies()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var newPolicies = make(map[string]iampolicy.Policy)
for name, p := range policies {
_, err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
newPolicies[name] = p
}
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["name"]
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to delete policy
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["name"]
// Error out if Content-Length is missing.
if r.ContentLength <= 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
// Error out if Content-Length is beyond allowed size.
if r.ContentLength > maxBucketPolicySize {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
iamPolicy, err := iampolicy.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Version in policy must not be empty
if iamPolicy.Version == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL)
return
}
if err = globalIAMSys.SetPolicy(policyName, *iamPolicy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload policy
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["policyName"]
entityName := vars["userOrGroup"]
isGroup := vars["isGroup"] == "true"
if !isGroup {
ok, _, err := globalIAMSys.IsTempUser(entityName)
if err != nil && err != errNoSuchUser {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if ok {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
return
}
}
if err := globalIAMSys.PolicyDBSet(entityName, policyName, isGroup); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload policy
for _, nerr := range globalNotificationSys.LoadPolicyMapping(entityName, isGroup) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
| cmd/admin-handlers-users.go | 1 | https://github.com/minio/minio/commit/b6f5785a6d627ebc1597073dd8864ad2647b7d9d | [
0.075638048350811,
0.001883347169496119,
0.00015906956105027348,
0.0001760363084031269,
0.007523870095610619
] |
{
"id": 2,
"code_window": [
"\tif err != nil {\n",
"\t\treturn auth.Credentials{}, err\n",
"\t}\n",
"\tcred.ParentUser = parentUser\n",
"\tcred.Groups = groups\n",
"\tcred.Status = string(madmin.AccountEnabled)\n",
"\n",
"\tu := newUserIdentity(cred)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcred.Status = string(auth.AccountOn)\n"
],
"file_path": "cmd/iam.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // +build windows
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sys
// GetMaxOpenFileLimit - returns maximum file descriptor number that can be opened by this process.
func GetMaxOpenFileLimit() (curLimit, maxLimit uint64, err error) {
// Nothing to do for windows.
return curLimit, maxLimit, err
}
// SetMaxOpenFileLimit - sets maximum file descriptor number that can be opened by this process.
func SetMaxOpenFileLimit(curLimit, maxLimit uint64) error {
// Nothing to do for windows.
return nil
}
| pkg/sys/rlimit-file_windows.go | 0 | https://github.com/minio/minio/commit/b6f5785a6d627ebc1597073dd8864ad2647b7d9d | [
0.00021857983665540814,
0.00018909299978986382,
0.00017578880942892283,
0.00018100166926160455,
0.00001721711305435747
] |
{
"id": 2,
"code_window": [
"\tif err != nil {\n",
"\t\treturn auth.Credentials{}, err\n",
"\t}\n",
"\tcred.ParentUser = parentUser\n",
"\tcred.Groups = groups\n",
"\tcred.Status = string(madmin.AccountEnabled)\n",
"\n",
"\tu := newUserIdentity(cred)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcred.Status = string(auth.AccountOn)\n"
],
"file_path": "cmd/iam.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lifecycle
import (
"encoding/xml"
"fmt"
"testing"
)
// TestUnsupportedFilters checks if parsing Filter xml with
// unsupported elements returns appropriate errors
func TestUnsupportedFilters(t *testing.T) {
testCases := []struct {
inputXML string
expectedErr error
}{
{ // Filter with And tags
inputXML: ` <Filter>
<And>
<Prefix>key-prefix</Prefix>
</And>
</Filter>`,
expectedErr: errXMLNotWellFormed,
},
{ // Filter with Tag tags
inputXML: ` <Filter>
<Tag>
<Key>key1</Key>
<Value>value1</Value>
</Tag>
</Filter>`,
expectedErr: nil,
},
{ // Filter with Prefix tag
inputXML: ` <Filter>
<Prefix>key-prefix</Prefix>
</Filter>`,
expectedErr: nil,
},
{ // Filter without And and multiple Tag tags
inputXML: ` <Filter>
<Prefix>key-prefix</Prefix>
<Tag>
<Key>key1</Key>
<Value>value1</Value>
</Tag>
<Tag>
<Key>key2</Key>
<Value>value2</Value>
</Tag>
</Filter>`,
expectedErr: errInvalidFilter,
},
{ // Filter with And, Prefix & multiple Tag tags
inputXML: ` <Filter>
<And>
<Prefix>key-prefix</Prefix>
<Tag>
<Key>key1</Key>
<Value>value1</Value>
</Tag>
<Tag>
<Key>key2</Key>
<Value>value2</Value>
</Tag>
</And>
</Filter>`,
expectedErr: nil,
},
{ // Filter with And and multiple Tag tags
inputXML: ` <Filter>
<And>
<Prefix></Prefix>
<Tag>
<Key>key1</Key>
<Value>value1</Value>
</Tag>
<Tag>
<Key>key2</Key>
<Value>value2</Value>
</Tag>
</And>
</Filter>`,
expectedErr: nil,
},
{ // Filter without And and single Tag tag
inputXML: ` <Filter>
<Prefix>key-prefix</Prefix>
<Tag>
<Key>key1</Key>
<Value>value1</Value>
</Tag>
</Filter>`,
expectedErr: errInvalidFilter,
},
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
var filter Filter
err := xml.Unmarshal([]byte(tc.inputXML), &filter)
if err != nil {
t.Fatalf("%d: Expected no error but got %v", i+1, err)
}
err = filter.Validate()
if err != tc.expectedErr {
t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err)
}
})
}
}
| pkg/bucket/lifecycle/filter_test.go | 0 | https://github.com/minio/minio/commit/b6f5785a6d627ebc1597073dd8864ad2647b7d9d | [
0.00017867921269498765,
0.00017347096581943333,
0.00016532566223759204,
0.0001733830285957083,
0.00000335095160153287
] |
{
"id": 2,
"code_window": [
"\tif err != nil {\n",
"\t\treturn auth.Credentials{}, err\n",
"\t}\n",
"\tcred.ParentUser = parentUser\n",
"\tcred.Groups = groups\n",
"\tcred.Status = string(madmin.AccountEnabled)\n",
"\n",
"\tu := newUserIdentity(cred)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcred.Status = string(auth.AccountOn)\n"
],
"file_path": "cmd/iam.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { shallow, mount } from "enzyme"
import { BucketDropdown } from "../BucketDropdown"
describe("BucketDropdown", () => {
it("should render without crashing", () => {
shallow(<BucketDropdown />)
})
it("should call toggleDropdown on dropdown toggle", () => {
const spy = jest.spyOn(BucketDropdown.prototype, 'toggleDropdown')
const wrapper = shallow(
<BucketDropdown />
)
wrapper
.find("Uncontrolled(Dropdown)")
.simulate("toggle")
expect(spy).toHaveBeenCalled()
spy.mockReset()
spy.mockRestore()
})
it("should call showBucketPolicy when Edit Policy link is clicked", () => {
const showBucketPolicy = jest.fn()
const wrapper = shallow(
<BucketDropdown showBucketPolicy={showBucketPolicy} />
)
wrapper
.find("li a")
.at(0)
.simulate("click", { stopPropagation: jest.fn() })
expect(showBucketPolicy).toHaveBeenCalled()
})
it("should call deleteBucket when Delete link is clicked", () => {
const deleteBucket = jest.fn()
const wrapper = shallow(
<BucketDropdown bucket={"test"} deleteBucket={deleteBucket} />
)
wrapper
.find("li a")
.at(1)
.simulate("click", { stopPropagation: jest.fn() })
expect(deleteBucket).toHaveBeenCalledWith("test")
})
})
| browser/app/js/buckets/__tests__/BucketDropdown.test.js | 0 | https://github.com/minio/minio/commit/b6f5785a6d627ebc1597073dd8864ad2647b7d9d | [
0.00018175243167206645,
0.0001773627009242773,
0.00017289057723246515,
0.00017835444305092096,
0.000002641610308273812
] |
{
"id": 4,
"code_window": [
"\t\t\tif err == nil {\n",
"\t\t\t\tembeddedPolicy = &iampolicy.Policy{}\n",
"\t\t\t\tembeddedPolicy.Merge(*p)\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tp, err := iampolicy.ParseConfig(bytes.NewReader(policyBytes))\n",
"\t\t\t\tif err == nil {\n",
"\t\t\t\t\tpolicy := iampolicy.Policy{}.Merge(*p)\n",
"\t\t\t\t\tembeddedPolicy = &policy\n",
"\t\t\t\t}\n"
],
"file_path": "cmd/iam.go",
"type": "replace",
"edit_start_line_idx": 1121
} | /*
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"path"
"sort"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
var cred auth.Credentials
var adminAPIErr APIErrorCode
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil, cred
}
// Validate request signature.
cred, adminAPIErr = checkAdminRequestAuth(ctx, r, action, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil, cred
}
return objectAPI, cred
}
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveUser")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
ok, _, err := globalIAMSys.IsTempUser(accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if ok {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
return
}
if err := globalIAMSys.DeleteUser(accessKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to delete user.
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// ListUsers - GET /minio/admin/v3/list-users
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListUsers")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
if objectAPI == nil {
return
}
password := cred.SecretKey
allCredentials, err := globalIAMSys.ListUsers()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(allCredentials)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// GetUserInfo - GET /minio/admin/v3/user-info
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetUserInfo")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
name := vars["accessKey"]
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
accessKey := cred.AccessKey
if cred.ParentUser != "" {
accessKey = cred.ParentUser
}
implicitPerm := name == accessKey
if !implicitPerm {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Groups: cred.Groups,
Action: iampolicy.GetUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
userInfo, err := globalIAMSys.GetUserInfo(name)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(userInfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, data)
}
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateGroupMembers")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
if objectAPI == nil {
return
}
defer r.Body.Close()
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
var updReq madmin.GroupAddRemove
err = json.Unmarshal(data, &updReq)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if updReq.IsRemove {
err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members)
} else {
err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members)
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to load group.
for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// GetGroup - /minio/admin/v3/group?group=mygroup1
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetGroup")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
group := vars["group"]
gdesc, err := globalIAMSys.GetGroupDescription(group)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(gdesc)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// ListGroups - GET /minio/admin/v3/groups
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListGroups")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
if objectAPI == nil {
return
}
groups, err := globalIAMSys.ListGroups()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(groups)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetGroupStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
group := vars["group"]
status := vars["status"]
var err error
if status == statusEnabled {
err = globalIAMSys.SetGroupStatus(group, true)
} else if status == statusDisabled {
err = globalIAMSys.SetGroupStatus(group, false)
} else {
err = errInvalidArgument
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload user.
for _, nerr := range globalNotificationSys.LoadGroup(group) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetUserStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
status := vars["status"]
// This API is not allowed to lookup accessKey user status
if accessKey == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if err := globalIAMSys.SetUserStatus(accessKey, madmin.AccountStatus(status)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload user.
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddUser")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
accessKey := path.Clean(vars["accessKey"])
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Not allowed to add a user with same access key as root credential
if owner && accessKey == cred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
// Incoming access key matches parent user then we should
// reject password change requests.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
implicitPerm := accessKey == cred.AccessKey
if !implicitPerm {
parentUser := cred.ParentUser
if parentUser == "" {
parentUser = cred.AccessKey
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: parentUser,
Groups: cred.Groups,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", parentUser, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Groups: cred.Groups,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
Claims: claims,
DenyOnly: true, // check if changing password is explicitly denied.
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
var uinfo madmin.UserInfo
if err = json.Unmarshal(configBytes, &uinfo); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
if err = globalIAMSys.CreateUser(accessKey, uinfo); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var createReq madmin.AddServiceAccountReq
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
// Disallow creating service accounts for root user.
if createReq.TargetUser == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
var (
targetUser string
targetGroups []string
)
targetUser = createReq.TargetUser
// Need permission if we are creating a service acccount
// for a user <> to the request sender
if targetUser != "" && targetUser != cred.AccessKey {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.CreateServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
if globalLDAPConfig.Enabled && targetUser != "" {
// If LDAP enabled, service accounts need
// to be created only for LDAP users.
var err error
_, targetGroups, err = globalLDAPConfig.LookupUserDN(targetUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
} else {
if targetUser == "" {
targetUser = cred.AccessKey
}
if cred.ParentUser != "" {
targetUser = cred.ParentUser
}
targetGroups = cred.Groups
}
opts := newServiceAccountOpts{sessionPolicy: createReq.Policy, accessKey: createReq.AccessKey, secretKey: createReq.SecretKey}
newCred, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user the service account
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
var createResp = madmin.AddServiceAccountResp{
Credentials: auth.Credentials{
AccessKey: newCred.AccessKey,
SecretKey: newCred.SecretKey,
},
}
data, err := json.Marshal(createResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// UpdateServiceAccount - POST /minio/admin/v3/update-service-account
func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
accessKey := mux.Vars(r)["accessKey"]
if accessKey == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
// Disallow editing service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.UpdateServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
requestUser := cred.AccessKey
if cred.ParentUser != "" {
requestUser = cred.ParentUser
}
if requestUser != svcAccount.ParentUser {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var updateReq madmin.UpdateServiceAccountReq
if err = json.Unmarshal(reqBytes, &updateReq); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
opts := updateServiceAccountOpts{sessionPolicy: updateReq.NewPolicy, secretKey: updateReq.NewSecretKey, status: updateReq.NewStatus}
err = globalIAMSys.UpdateServiceAccount(ctx, accessKey, opts)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user the service account
for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
writeSuccessNoContent(w)
}
// InfoServiceAccount - GET /minio/admin/v3/info-service-account
func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
accessKey := mux.Vars(r)["accessKey"]
if accessKey == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
svcAccount, policy, err := globalIAMSys.GetServiceAccount(ctx, accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
requestUser := cred.AccessKey
if cred.ParentUser != "" {
requestUser = cred.ParentUser
}
if requestUser != svcAccount.ParentUser {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
var svcAccountPolicy iampolicy.Policy
impliedPolicy := policy == nil
// If policy is empty, check for policy of the parent user
if !impliedPolicy {
svcAccountPolicy.Merge(*policy)
} else {
policiesNames, err := globalIAMSys.PolicyDBGet(svcAccount.AccessKey, false)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
svcAccountPolicy.Merge(globalIAMSys.GetCombinedPolicy(policiesNames...))
}
policyJSON, err := json.Marshal(svcAccountPolicy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var infoResp = madmin.InfoServiceAccountResp{
ParentUser: svcAccount.ParentUser,
AccountStatus: svcAccount.Status,
ImpliedPolicy: impliedPolicy,
Policy: string(policyJSON),
}
data, err := json.Marshal(infoResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListServiceAccounts")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
var targetAccount string
user := r.URL.Query().Get("user")
if user != "" {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
targetAccount = user
} else {
targetAccount = cred.AccessKey
if cred.ParentUser != "" {
targetAccount = cred.ParentUser
}
}
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, targetAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var serviceAccountsNames []string
for _, svc := range serviceAccounts {
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
}
var listResp = madmin.ListServiceAccountsResp{
Accounts: serviceAccountsNames,
}
data, err := json.Marshal(listResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteServiceAccount")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
serviceAccount := mux.Vars(r)["accessKey"]
if serviceAccount == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
return
}
svcAccount, _, err := globalIAMSys.GetServiceAccount(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
adminPrivilege := globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.RemoveServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
})
if !adminPrivilege {
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
if parentUser != svcAccount.ParentUser {
// The service account belongs to another user but return not
// found error to mitigate brute force attacks. or the
// serviceAccount doesn't exist.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminServiceAccountNotFound), r.URL)
return
}
}
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessNoContent(w)
}
// AccountInfoHandler returns usage
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AccountInfo")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
isAllowedAccess := func(bucketName string) (rd, wr bool) {
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
rd = true
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
wr = true
}
return rd, wr
}
// Load the latest calculated data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
// log the error, continue with the accounting response
logger.LogIf(ctx, err)
}
// If etcd, dns federation configured list buckets from etcd.
var buckets []BucketInfo
if globalDNSConfig != nil && globalBucketFederation {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && !IsErrIgnored(err,
dns.ErrNoEntriesFound,
dns.ErrDomainMissing) {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for _, dnsRecords := range dnsBuckets {
buckets = append(buckets, BucketInfo{
Name: dnsRecords[0].Key,
Created: dnsRecords[0].CreationDate,
})
}
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Name < buckets[j].Name
})
} else {
buckets, err = objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
accountName := cred.AccessKey
var policies []string
switch globalIAMSys.usersSysType {
case MinIOUsersSysType:
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
case LDAPUsersSysType:
parentUser := accountName
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
default:
err = errors.New("should not happen!")
}
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
acctInfo := madmin.AccountInfo{
AccountName: accountName,
Policy: globalIAMSys.GetCombinedPolicy(policies...),
}
for _, bucket := range buckets {
rd, wr := isAllowedAccess(bucket.Name)
if rd || wr {
var size uint64
// Fetch the data usage of the current bucket
if !dataUsageInfo.LastUpdate.IsZero() {
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
}
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketAccessInfo{
Name: bucket.Name,
Created: bucket.Created,
Size: size,
Access: madmin.AccountAccess{
Read: rd,
Write: wr,
},
})
}
}
usageInfoJSON, err := json.Marshal(acctInfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, usageInfoJSON)
}
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicyV2")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.Write(data)
w.(http.Flusher).Flush()
}
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = json.NewEncoder(w).Encode(policy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPoliciesV2")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
policies, err := globalIAMSys.ListPolicies()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
policyMap := make(map[string][]byte, len(policies))
for k, p := range policies {
var err error
policyMap[k], err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
}
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPolicies")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
policies, err := globalIAMSys.ListPolicies()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var newPolicies = make(map[string]iampolicy.Policy)
for name, p := range policies {
_, err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
newPolicies[name] = p
}
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["name"]
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to delete policy
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddCannedPolicy")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["name"]
// Error out if Content-Length is missing.
if r.ContentLength <= 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
// Error out if Content-Length is beyond allowed size.
if r.ContentLength > maxBucketPolicySize {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
iamPolicy, err := iampolicy.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Version in policy must not be empty
if iamPolicy.Version == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL)
return
}
if err = globalIAMSys.SetPolicy(policyName, *iamPolicy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload policy
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["policyName"]
entityName := vars["userOrGroup"]
isGroup := vars["isGroup"] == "true"
if !isGroup {
ok, _, err := globalIAMSys.IsTempUser(entityName)
if err != nil && err != errNoSuchUser {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if ok {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
return
}
}
if err := globalIAMSys.PolicyDBSet(entityName, policyName, isGroup); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload policy
for _, nerr := range globalNotificationSys.LoadPolicyMapping(entityName, isGroup) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
| cmd/admin-handlers-users.go | 1 | https://github.com/minio/minio/commit/b6f5785a6d627ebc1597073dd8864ad2647b7d9d | [
0.012071940116584301,
0.0005708307144232094,
0.00016103121743071824,
0.00017396158364135772,
0.0012127013178542256
] |
{
"id": 4,
"code_window": [
"\t\t\tif err == nil {\n",
"\t\t\t\tembeddedPolicy = &iampolicy.Policy{}\n",
"\t\t\t\tembeddedPolicy.Merge(*p)\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tp, err := iampolicy.ParseConfig(bytes.NewReader(policyBytes))\n",
"\t\t\t\tif err == nil {\n",
"\t\t\t\t\tpolicy := iampolicy.Policy{}.Merge(*p)\n",
"\t\t\t\t\tembeddedPolicy = &policy\n",
"\t\t\t\t}\n"
],
"file_path": "cmd/iam.go",
"type": "replace",
"edit_start_line_idx": 1121
} | /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"testing"
)
func TestHTTPRequestRangeSpec(t *testing.T) {
resourceSize := int64(10)
validRangeSpecs := []struct {
spec string
expOffset, expLength int64
}{
{"bytes=0-", 0, 10},
{"bytes=1-", 1, 9},
{"bytes=0-9", 0, 10},
{"bytes=1-10", 1, 9},
{"bytes=1-1", 1, 1},
{"bytes=2-5", 2, 4},
{"bytes=-5", 5, 5},
{"bytes=-1", 9, 1},
{"bytes=-1000", 0, 10},
}
for i, testCase := range validRangeSpecs {
rs, err := parseRequestRangeSpec(testCase.spec)
if err != nil {
t.Errorf("unexpected err: %v", err)
}
o, l, err := rs.GetOffsetLength(resourceSize)
if err != nil {
t.Errorf("unexpected err: %v", err)
}
if o != testCase.expOffset || l != testCase.expLength {
t.Errorf("Case %d: got bad offset/length: %d,%d expected: %d,%d",
i, o, l, testCase.expOffset, testCase.expLength)
}
}
unparsableRangeSpecs := []string{
"bytes=-",
"bytes==",
"bytes==1-10",
"bytes=",
"bytes=aa",
"aa",
"",
"bytes=1-10-",
"bytes=1--10",
"bytes=-1-10",
"bytes=0-+3",
"bytes=+3-+5",
"bytes=10-11,12-10", // Unsupported by S3/MinIO (valid in RFC)
}
for i, urs := range unparsableRangeSpecs {
rs, err := parseRequestRangeSpec(urs)
if err == nil {
t.Errorf("Case %d: Did not get an expected error - got %v", i, rs)
}
if err == errInvalidRange {
t.Errorf("Case %d: Got invalid range error instead of a parse error", i)
}
if rs != nil {
t.Errorf("Case %d: Got non-nil rs though err != nil: %v", i, rs)
}
}
invalidRangeSpecs := []string{
"bytes=5-3",
"bytes=10-10",
"bytes=10-",
"bytes=100-",
"bytes=-0",
}
for i, irs := range invalidRangeSpecs {
var err1, err2 error
var rs *HTTPRangeSpec
var o, l int64
rs, err1 = parseRequestRangeSpec(irs)
if err1 == nil {
o, l, err2 = rs.GetOffsetLength(resourceSize)
}
if err1 == errInvalidRange || (err1 == nil && err2 == errInvalidRange) {
continue
}
t.Errorf("Case %d: Expected errInvalidRange but: %v %v %d %d %v", i, rs, err1, o, l, err2)
}
}
| cmd/httprange_test.go | 0 | https://github.com/minio/minio/commit/b6f5785a6d627ebc1597073dd8864ad2647b7d9d | [
0.0013885549269616604,
0.0003535323776304722,
0.00015957043797243387,
0.00017787229444365948,
0.0003593066649045795
] |
{
"id": 4,
"code_window": [
"\t\t\tif err == nil {\n",
"\t\t\t\tembeddedPolicy = &iampolicy.Policy{}\n",
"\t\t\t\tembeddedPolicy.Merge(*p)\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tp, err := iampolicy.ParseConfig(bytes.NewReader(policyBytes))\n",
"\t\t\t\tif err == nil {\n",
"\t\t\t\t\tpolicy := iampolicy.Policy{}.Merge(*p)\n",
"\t\t\t\t\tembeddedPolicy = &policy\n",
"\t\t\t\t}\n"
],
"file_path": "cmd/iam.go",
"type": "replace",
"edit_start_line_idx": 1121
} | /*
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package opa
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
xnet "github.com/minio/minio/pkg/net"
)
// Env IAM OPA URL
const (
URL = "url"
AuthToken = "auth_token"
EnvPolicyOpaURL = "MINIO_POLICY_OPA_URL"
EnvPolicyOpaAuthToken = "MINIO_POLICY_OPA_AUTH_TOKEN"
)
// DefaultKVS - default config for OPA config
var (
DefaultKVS = config.KVS{
config.KV{
Key: URL,
Value: "",
},
config.KV{
Key: AuthToken,
Value: "",
},
}
)
// Args opa general purpose policy engine configuration.
type Args struct {
URL *xnet.URL `json:"url"`
AuthToken string `json:"authToken"`
Transport http.RoundTripper `json:"-"`
CloseRespFn func(r io.ReadCloser) `json:"-"`
}
// Validate - validate opa configuration params.
func (a *Args) Validate() error {
req, err := http.NewRequest(http.MethodPost, a.URL.String(), bytes.NewReader([]byte("")))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
if a.AuthToken != "" {
req.Header.Set("Authorization", a.AuthToken)
}
client := &http.Client{Transport: a.Transport}
resp, err := client.Do(req)
if err != nil {
return err
}
defer a.CloseRespFn(resp.Body)
return nil
}
// UnmarshalJSON - decodes JSON data.
func (a *Args) UnmarshalJSON(data []byte) error {
// subtype to avoid recursive call to UnmarshalJSON()
type subArgs Args
var so subArgs
if err := json.Unmarshal(data, &so); err != nil {
return err
}
oa := Args(so)
if oa.URL == nil || oa.URL.String() == "" {
*a = oa
return nil
}
*a = oa
return nil
}
// Opa - implements opa policy agent calls.
type Opa struct {
args Args
client *http.Client
}
// Enabled returns if opa is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(URL) != ""
}
// LookupConfig lookup Opa from config, override with any ENVs.
func LookupConfig(kv config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (Args, error) {
args := Args{}
if err := config.CheckValidKeys(config.PolicyOPASubSys, kv, DefaultKVS); err != nil {
return args, err
}
opaURL := env.Get(EnvIamOpaURL, "")
if opaURL == "" {
opaURL = env.Get(EnvPolicyOpaURL, kv.Get(URL))
if opaURL == "" {
return args, nil
}
}
authToken := env.Get(EnvIamOpaAuthToken, "")
if authToken == "" {
authToken = env.Get(EnvPolicyOpaAuthToken, kv.Get(AuthToken))
}
u, err := xnet.ParseHTTPURL(opaURL)
if err != nil {
return args, err
}
args = Args{
URL: u,
AuthToken: authToken,
Transport: transport,
CloseRespFn: closeRespFn,
}
if err = args.Validate(); err != nil {
return args, err
}
return args, nil
}
// New - initializes opa policy engine connector.
func New(args Args) *Opa {
// No opa args.
if args.URL == nil || args.URL.Scheme == "" && args.AuthToken == "" {
return nil
}
return &Opa{
args: args,
client: &http.Client{Transport: args.Transport},
}
}
// IsAllowed - checks given policy args is allowed to continue the REST API.
func (o *Opa) IsAllowed(args iampolicy.Args) (bool, error) {
if o == nil {
return false, nil
}
// OPA input
body := make(map[string]interface{})
body["input"] = args
inputBytes, err := json.Marshal(body)
if err != nil {
return false, err
}
req, err := http.NewRequest(http.MethodPost, o.args.URL.String(), bytes.NewReader(inputBytes))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/json")
if o.args.AuthToken != "" {
req.Header.Set("Authorization", o.args.AuthToken)
}
resp, err := o.client.Do(req)
if err != nil {
return false, err
}
defer o.args.CloseRespFn(resp.Body)
// Read the body to be saved later.
opaRespBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err
}
// Handle large OPA responses when OPA URL is of
// form http://localhost:8181/v1/data/httpapi/authz
type opaResultAllow struct {
Result struct {
Allow bool `json:"allow"`
} `json:"result"`
}
// Handle simpler OPA responses when OPA URL is of
// form http://localhost:8181/v1/data/httpapi/authz/allow
type opaResult struct {
Result bool `json:"result"`
}
respBody := bytes.NewReader(opaRespBytes)
var result opaResult
if err = json.NewDecoder(respBody).Decode(&result); err != nil {
respBody.Seek(0, 0)
var resultAllow opaResultAllow
if err = json.NewDecoder(respBody).Decode(&resultAllow); err != nil {
return false, err
}
return resultAllow.Result.Allow, nil
}
return result.Result, nil
}
| cmd/config/policy/opa/config.go | 0 | https://github.com/minio/minio/commit/b6f5785a6d627ebc1597073dd8864ad2647b7d9d | [
0.0032199581619352102,
0.00038251758087426424,
0.0001625749428058043,
0.00017297665181104094,
0.0006796119851060212
] |
{
"id": 4,
"code_window": [
"\t\t\tif err == nil {\n",
"\t\t\t\tembeddedPolicy = &iampolicy.Policy{}\n",
"\t\t\t\tembeddedPolicy.Merge(*p)\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tp, err := iampolicy.ParseConfig(bytes.NewReader(policyBytes))\n",
"\t\t\t\tif err == nil {\n",
"\t\t\t\t\tpolicy := iampolicy.Policy{}.Merge(*p)\n",
"\t\t\t\t\tembeddedPolicy = &policy\n",
"\t\t\t\t}\n"
],
"file_path": "cmd/iam.go",
"type": "replace",
"edit_start_line_idx": 1121
} | #!/bin/bash
#
# Mint (C) 2017 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# handle command line arguments
if [ $# -ne 2 ]; then
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
exit 1
fi
output_log_file="$1"
error_log_file="$2"
# run tests
/mint/run/core/aws-sdk-go/aws-sdk-go 1>>"$output_log_file" 2>"$error_log_file"
| mint/run/core/aws-sdk-go/run.sh | 0 | https://github.com/minio/minio/commit/b6f5785a6d627ebc1597073dd8864ad2647b7d9d | [
0.00017771869897842407,
0.00017365686653647572,
0.00016911103739403188,
0.00017414087778888643,
0.000003530690037223394
] |
{
"id": 0,
"code_window": [
"\t\t// TODO(dt): If we audit exactly what not-exists error each ExternalStorage\n",
"\t\t// returns (and then wrap/tag them), we could narrow this check.\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupDescriptorName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 940
} | // Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"net/url"
"sort"
"time"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/covering"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/interval"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
const (
// BackupDescriptorName is the file name used for serialized
// BackupDescriptor protos.
BackupDescriptorName = "BACKUP"
// BackupManifestName is a future name for the serialized
// BackupDescriptor proto.
BackupManifestName = "BACKUP_MANIFEST"
// BackupPartitionDescriptorPrefix is the file name prefix for serialized
// BackupPartitionDescriptor protos.
BackupPartitionDescriptorPrefix = "BACKUP_PART"
// BackupDescriptorCheckpointName is the file name used to store the
// serialized BackupDescriptor proto while the backup is in progress.
BackupDescriptorCheckpointName = "BACKUP-CHECKPOINT"
// BackupFormatDescriptorTrackingVersion added tracking of complete DBs.
BackupFormatDescriptorTrackingVersion uint32 = 1
)
const (
backupOptRevisionHistory = "revision_history"
localityURLParam = "COCKROACH_LOCALITY"
defaultLocalityValue = "default"
)
var useTBI = settings.RegisterBoolSetting(
"kv.bulk_io_write.experimental_incremental_export_enabled",
"use experimental time-bound file filter when exporting in BACKUP",
false,
)
var backupOptionExpectValues = map[string]sql.KVStringOptValidate{
backupOptRevisionHistory: sql.KVStringOptRequireNoValue,
}
// BackupCheckpointInterval is the interval at which backup progress is saved
// to durable storage.
var BackupCheckpointInterval = time.Minute
// ReadBackupDescriptorFromURI creates an export store from the given URI, then
// reads and unmarshals a BackupDescriptor at the standard location in the
// export storage.
func ReadBackupDescriptorFromURI(
ctx context.Context, uri string, makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory,
) (BackupDescriptor, error) {
exportStore, err := makeExternalStorageFromURI(ctx, uri)
if err != nil {
return BackupDescriptor{}, err
}
defer exportStore.Close()
backupDesc, err := readBackupDescriptor(ctx, exportStore, BackupDescriptorName)
if err != nil {
backupManifest, manifestErr := readBackupDescriptor(ctx, exportStore, BackupManifestName)
if manifestErr != nil {
return BackupDescriptor{}, err
}
backupDesc = backupManifest
}
backupDesc.Dir = exportStore.Conf()
// TODO(dan): Sanity check this BackupDescriptor: non-empty EndTime,
// non-empty Paths, and non-overlapping Spans and keyranges in Files.
return backupDesc, nil
}
// readBackupDescriptor reads and unmarshals a BackupDescriptor from filename in
// the provided export store.
func readBackupDescriptor(
ctx context.Context, exportStore cloud.ExternalStorage, filename string,
) (BackupDescriptor, error) {
r, err := exportStore.ReadFile(ctx, filename)
if err != nil {
return BackupDescriptor{}, err
}
defer r.Close()
descBytes, err := ioutil.ReadAll(r)
if err != nil {
return BackupDescriptor{}, err
}
var backupDesc BackupDescriptor
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupDescriptor{}, err
}
for _, d := range backupDesc.Descriptors {
// Calls to GetTable are generally frowned upon.
// This specific call exists to provide backwards compatibility with
// backups created prior to version 19.1. Starting in v19.1 the
// ModificationTime is always written in backups for all versions
// of table descriptors. In earlier cockroach versions only later
// table descriptor versions contain a non-empty ModificationTime.
// Later versions of CockroachDB use the MVCC timestamp to fill in
// the ModificationTime for table descriptors. When performing a restore
// we no longer have access to that MVCC timestamp but we can set it
// to a value we know will be safe.
if t := d.GetTable(); t == nil {
continue
} else if t.Version == 1 && t.ModificationTime.IsEmpty() {
t.ModificationTime = hlc.Timestamp{WallTime: 1}
}
}
return backupDesc, err
}
func readBackupPartitionDescriptor(
ctx context.Context, exportStore cloud.ExternalStorage, filename string,
) (BackupPartitionDescriptor, error) {
r, err := exportStore.ReadFile(ctx, filename)
if err != nil {
return BackupPartitionDescriptor{}, err
}
defer r.Close()
descBytes, err := ioutil.ReadAll(r)
if err != nil {
return BackupPartitionDescriptor{}, err
}
var backupDesc BackupPartitionDescriptor
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupPartitionDescriptor{}, err
}
return backupDesc, err
}
// getRelevantDescChanges finds the changes between start and end time to the
// SQL descriptors matching `descs` or `expandedDBs`, ordered by time. A
// descriptor revision matches if it is an earlier revision of a descriptor in
// descs (same ID) or has parentID in `expanded`. Deleted descriptors are
// represented as nil. Fills in the `priorIDs` map in the process, which maps
// a descriptor the the ID by which it was previously known (e.g pre-TRUNCATE).
func getRelevantDescChanges(
ctx context.Context,
db *client.DB,
startTime, endTime hlc.Timestamp,
descs []sqlbase.Descriptor,
expanded []sqlbase.ID,
priorIDs map[sqlbase.ID]sqlbase.ID,
) ([]BackupDescriptor_DescriptorRevision, error) {
allChanges, err := getAllDescChanges(ctx, db, startTime, endTime, priorIDs)
if err != nil {
return nil, err
}
// If no descriptors changed, we can just stop now and have RESTORE use the
// normal list of descs (i.e. as of endTime).
if len(allChanges) == 0 {
return nil, nil
}
// interestingChanges will be every descriptor change relevant to the backup.
var interestingChanges []BackupDescriptor_DescriptorRevision
// interestingIDs are the descriptor for which we're interested in capturing
// changes. This is initially the descriptors matched (as of endTime) by our
// target spec, plus those that belonged to a DB that our spec expanded at any
// point in the interval.
interestingIDs := make(map[sqlbase.ID]struct{}, len(descs))
// The descriptors that currently (endTime) match the target spec (desc) are
// obviously interesting to our backup.
for _, i := range descs {
interestingIDs[i.GetID()] = struct{}{}
if t := i.Table(hlc.Timestamp{}); t != nil {
for j := t.ReplacementOf.ID; j != sqlbase.InvalidID; j = priorIDs[j] {
interestingIDs[j] = struct{}{}
}
}
}
// We're also interested in any desc that belonged to a DB we're backing up.
// We'll start by looking at all descriptors as of the beginning of the
// interval and add to the set of IDs that we are interested any descriptor that
// belongs to one of the parents we care about.
interestingParents := make(map[sqlbase.ID]struct{}, len(expanded))
for _, i := range expanded {
interestingParents[i] = struct{}{}
}
if !startTime.IsEmpty() {
starting, err := loadAllDescs(ctx, db, startTime)
if err != nil {
return nil, err
}
for _, i := range starting {
if table := i.Table(hlc.Timestamp{}); table != nil {
// We need to add to interestingIDs so that if we later see a delete for
// this ID we still know it is interesting to us, even though we will not
// have a parentID at that point (since the delete is a nil desc).
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
}
}
if _, ok := interestingIDs[i.GetID()]; ok {
desc := i
// We inject a fake "revision" that captures the starting state for
// matched descriptor, to allow restoring to times before its first rev
// actually inside the window. This likely ends up duplicating the last
// version in the previous BACKUP descriptor, but avoids adding more
// complicated special-cases in RESTORE, so it only needs to look in a
// single BACKUP to restore to a particular time.
initial := BackupDescriptor_DescriptorRevision{Time: startTime, ID: i.GetID(), Desc: &desc}
interestingChanges = append(interestingChanges, initial)
}
}
}
for _, change := range allChanges {
// A change to an ID that we are interested in is obviously interesting --
// a change is also interesting if it is to a table that has a parent that
// we are interested and thereafter it also becomes an ID in which we are
// interested in changes (since, as mentioned above, to decide if deletes
// are interesting).
if _, ok := interestingIDs[change.ID]; ok {
interestingChanges = append(interestingChanges, change)
} else if change.Desc != nil {
if table := change.Desc.Table(hlc.Timestamp{}); table != nil {
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
interestingChanges = append(interestingChanges, change)
}
}
}
}
sort.Slice(interestingChanges, func(i, j int) bool {
return interestingChanges[i].Time.Less(interestingChanges[j].Time)
})
return interestingChanges, nil
}
// getAllDescChanges gets every sql descriptor change between start and end time
// returning its ID, content and the change time (with deletions represented as
// nil content).
func getAllDescChanges(
ctx context.Context,
db *client.DB,
startTime, endTime hlc.Timestamp,
priorIDs map[sqlbase.ID]sqlbase.ID,
) ([]BackupDescriptor_DescriptorRevision, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
allRevs, err := getAllRevisions(ctx, db, startKey, endKey, startTime, endTime)
if err != nil {
return nil, err
}
var res []BackupDescriptor_DescriptorRevision
for _, revs := range allRevs {
id, err := keys.DecodeDescMetadataID(revs.Key)
if err != nil {
return nil, err
}
for _, rev := range revs.Values {
r := BackupDescriptor_DescriptorRevision{ID: sqlbase.ID(id), Time: rev.Timestamp}
if len(rev.RawBytes) != 0 {
var desc sqlbase.Descriptor
if err := rev.GetProto(&desc); err != nil {
return nil, err
}
r.Desc = &desc
t := desc.Table(rev.Timestamp)
if t != nil && t.ReplacementOf.ID != sqlbase.InvalidID {
priorIDs[t.ID] = t.ReplacementOf.ID
}
}
res = append(res, r)
}
}
return res, nil
}
func allSQLDescriptors(ctx context.Context, txn *client.Txn) ([]sqlbase.Descriptor, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
rows, err := txn.Scan(ctx, startKey, endKey, 0)
if err != nil {
return nil, err
}
sqlDescs := make([]sqlbase.Descriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&sqlDescs[i]); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal SQL descriptor", row.Key)
}
if row.Value != nil {
sqlDescs[i].Table(row.Value.Timestamp)
}
}
return sqlDescs, nil
}
func ensureInterleavesIncluded(tables []*sqlbase.TableDescriptor) error {
inBackup := make(map[sqlbase.ID]bool, len(tables))
for _, t := range tables {
inBackup[t.ID] = true
}
for _, table := range tables {
if err := table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error {
for _, a := range index.Interleave.Ancestors {
if !inBackup[a.TableID] {
return errors.Errorf(
"cannot backup table %q without interleave parent (ID %d)", table.Name, a.TableID,
)
}
}
for _, c := range index.InterleavedBy {
if !inBackup[c.Table] {
return errors.Errorf(
"cannot backup table %q without interleave child table (ID %d)", table.Name, c.Table,
)
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
func allRangeDescriptors(ctx context.Context, txn *client.Txn) ([]roachpb.RangeDescriptor, error) {
rows, err := txn.Scan(ctx, keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return nil, errors.Wrapf(err,
"unable to scan range descriptors")
}
rangeDescs := make([]roachpb.RangeDescriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&rangeDescs[i]); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal range descriptor", row.Key)
}
}
return rangeDescs, nil
}
type tableAndIndex struct {
tableID sqlbase.ID
indexID sqlbase.IndexID
}
// spansForAllTableIndexes returns non-overlapping spans for every index and
// table passed in. They would normally overlap if any of them are interleaved.
func spansForAllTableIndexes(
tables []*sqlbase.TableDescriptor, revs []BackupDescriptor_DescriptorRevision,
) []roachpb.Span {
added := make(map[tableAndIndex]bool, len(tables))
sstIntervalTree := interval.NewTree(interval.ExclusiveOverlapper)
for _, table := range tables {
for _, index := range table.AllNonDropIndexes() {
if err := sstIntervalTree.Insert(intervalSpan(table.IndexSpan(index.ID)), false); err != nil {
panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
}
added[tableAndIndex{tableID: table.ID, indexID: index.ID}] = true
}
}
// If there are desc revisions, ensure that we also add any index spans
// in them that we didn't already get above e.g. indexes or tables that are
// not in latest because they were dropped during the time window in question.
for _, rev := range revs {
if tbl := rev.Desc.Table(hlc.Timestamp{}); tbl != nil {
for _, idx := range tbl.AllNonDropIndexes() {
key := tableAndIndex{tableID: tbl.ID, indexID: idx.ID}
if !added[key] {
if err := sstIntervalTree.Insert(intervalSpan(tbl.IndexSpan(idx.ID)), false); err != nil {
panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
}
added[key] = true
}
}
}
}
var spans []roachpb.Span
_ = sstIntervalTree.Do(func(r interval.Interface) bool {
spans = append(spans, roachpb.Span{
Key: roachpb.Key(r.Range().Start),
EndKey: roachpb.Key(r.Range().End),
})
return false
})
return spans
}
// coveringFromSpans creates an interval.Covering with a fixed payload from a
// slice of roachpb.Spans.
func coveringFromSpans(spans []roachpb.Span, payload interface{}) covering.Covering {
var c covering.Covering
for _, span := range spans {
c = append(c, covering.Range{
Start: []byte(span.Key),
End: []byte(span.EndKey),
Payload: payload,
})
}
return c
}
// splitAndFilterSpans returns the spans that represent the set difference
// (includes - excludes) while also guaranteeing that each output span does not
// cross the endpoint of a RangeDescriptor in ranges.
func splitAndFilterSpans(
includes []roachpb.Span, excludes []roachpb.Span, ranges []roachpb.RangeDescriptor,
) []roachpb.Span {
type includeMarker struct{}
type excludeMarker struct{}
includeCovering := coveringFromSpans(includes, includeMarker{})
excludeCovering := coveringFromSpans(excludes, excludeMarker{})
var rangeCovering covering.Covering
for _, rangeDesc := range ranges {
rangeCovering = append(rangeCovering, covering.Range{
Start: []byte(rangeDesc.StartKey),
End: []byte(rangeDesc.EndKey),
})
}
splits := covering.OverlapCoveringMerge(
[]covering.Covering{includeCovering, excludeCovering, rangeCovering},
)
var out []roachpb.Span
for _, split := range splits {
include := false
exclude := false
for _, payload := range split.Payload.([]interface{}) {
switch payload.(type) {
case includeMarker:
include = true
case excludeMarker:
exclude = true
}
}
if include && !exclude {
out = append(out, roachpb.Span{
Key: roachpb.Key(split.Start),
EndKey: roachpb.Key(split.End),
})
}
}
return out
}
func optsToKVOptions(opts map[string]string) tree.KVOptions {
if len(opts) == 0 {
return nil
}
sortedOpts := make([]string, 0, len(opts))
for k := range opts {
sortedOpts = append(sortedOpts, k)
}
sort.Strings(sortedOpts)
kvopts := make(tree.KVOptions, 0, len(opts))
for _, k := range sortedOpts {
opt := tree.KVOption{Key: tree.Name(k)}
if v := opts[k]; v != "" {
opt.Value = tree.NewDString(v)
}
kvopts = append(kvopts, opt)
}
return kvopts
}
func backupJobDescription(
p sql.PlanHookState,
backup *tree.Backup,
to []string,
incrementalFrom []string,
opts map[string]string,
) (string, error) {
b := &tree.Backup{
AsOf: backup.AsOf,
Options: optsToKVOptions(opts),
Targets: backup.Targets,
}
for _, t := range to {
sanitizedTo, err := cloud.SanitizeExternalStorageURI(t)
if err != nil {
return "", err
}
b.To = append(b.To, tree.NewDString(sanitizedTo))
}
for _, from := range incrementalFrom {
sanitizedFrom, err := cloud.SanitizeExternalStorageURI(from)
if err != nil {
return "", err
}
b.IncrementalFrom = append(b.IncrementalFrom, tree.NewDString(sanitizedFrom))
}
ann := p.ExtendedEvalContext().Annotations
return tree.AsStringWithFQNames(b, ann), nil
}
// clusterNodeCount returns the approximate number of nodes in the cluster.
func clusterNodeCount(g *gossip.Gossip) int {
var nodes int
_ = g.IterateInfos(gossip.KeyNodeIDPrefix, func(_ string, _ gossip.Info) error {
nodes++
return nil
})
return nodes
}
// BackupFileDescriptors is an alias on which to implement sort's interface.
type BackupFileDescriptors []BackupDescriptor_File
func (r BackupFileDescriptors) Len() int { return len(r) }
func (r BackupFileDescriptors) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r BackupFileDescriptors) Less(i, j int) bool {
if cmp := bytes.Compare(r[i].Span.Key, r[j].Span.Key); cmp != 0 {
return cmp < 0
}
return bytes.Compare(r[i].Span.EndKey, r[j].Span.EndKey) < 0
}
func writeBackupDescriptor(
ctx context.Context,
settings *cluster.Settings,
exportStore cloud.ExternalStorage,
filename string,
desc *BackupDescriptor,
) error {
sort.Sort(BackupFileDescriptors(desc.Files))
descBuf, err := protoutil.Marshal(desc)
if err != nil {
return err
}
return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
}
// writeBackupPartitionDescriptor writes metadata (containing a locality KV and
// partial file listing) for a partitioned BACKUP to one of the stores in the
// backup.
func writeBackupPartitionDescriptor(
ctx context.Context,
exportStore cloud.ExternalStorage,
filename string,
desc *BackupPartitionDescriptor,
) error {
descBuf, err := protoutil.Marshal(desc)
if err != nil {
return err
}
return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
}
func loadAllDescs(
ctx context.Context, db *client.DB, asOf hlc.Timestamp,
) ([]sqlbase.Descriptor, error) {
var allDescs []sqlbase.Descriptor
if err := db.Txn(
ctx,
func(ctx context.Context, txn *client.Txn) error {
var err error
txn.SetFixedTimestamp(ctx, asOf)
allDescs, err = allSQLDescriptors(ctx, txn)
return err
}); err != nil {
return nil, err
}
return allDescs, nil
}
// ResolveTargetsToDescriptors performs name resolution on a set of targets and
// returns the resulting descriptors.
func ResolveTargetsToDescriptors(
ctx context.Context, p sql.PlanHookState, endTime hlc.Timestamp, targets tree.TargetList,
) ([]sqlbase.Descriptor, []sqlbase.ID, error) {
allDescs, err := loadAllDescs(ctx, p.ExecCfg().DB, endTime)
if err != nil {
return nil, nil, err
}
var matched descriptorsMatched
if matched, err = descriptorsMatchingTargets(ctx,
p.CurrentDatabase(), p.CurrentSearchPath(), allDescs, targets); err != nil {
return nil, nil, err
}
// Ensure interleaved tables appear after their parent. Since parents must be
// created before their children, simply sorting by ID accomplishes this.
sort.Slice(matched.descs, func(i, j int) bool { return matched.descs[i].GetID() < matched.descs[j].GetID() })
return matched.descs, matched.expandedDB, nil
}
type spanAndTime struct {
span roachpb.Span
start, end hlc.Timestamp
}
// backup exports a snapshot of every kv entry into ranged sstables.
//
// The output is an sstable per range with files in the following locations:
// - <dir>/<unique_int>.sst
// - <dir> is given by the user and may be cloud storage
// - Each file contains data for a key range that doesn't overlap with any other
// file.
func backup(
ctx context.Context,
db *client.DB,
gossip *gossip.Gossip,
settings *cluster.Settings,
defaultStore cloud.ExternalStorage,
storageByLocalityKV map[string]*roachpb.ExternalStorage,
job *jobs.Job,
backupDesc *BackupDescriptor,
checkpointDesc *BackupDescriptor,
resultsCh chan<- tree.Datums,
makeExternalStorage cloud.ExternalStorageFactory,
) (roachpb.BulkOpSummary, error) {
// TODO(dan): Figure out how permissions should work. #6713 is tracking this
// for grpc.
mu := struct {
syncutil.Mutex
files []BackupDescriptor_File
exported roachpb.BulkOpSummary
lastCheckpoint time.Time
}{}
var checkpointMu syncutil.Mutex
var ranges []roachpb.RangeDescriptor
if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error {
var err error
// TODO(benesch): limit the range descriptors we fetch to the ranges that
// are actually relevant in the backup to speed up small backups on large
// clusters.
ranges, err = allRangeDescriptors(ctx, txn)
return err
}); err != nil {
return mu.exported, err
}
var completedSpans, completedIntroducedSpans []roachpb.Span
if checkpointDesc != nil {
// TODO(benesch): verify these files, rather than accepting them as truth
// blindly.
// No concurrency yet, so these assignments are safe.
mu.files = checkpointDesc.Files
mu.exported = checkpointDesc.EntryCounts
for _, file := range checkpointDesc.Files {
if file.StartTime.IsEmpty() && !file.EndTime.IsEmpty() {
completedIntroducedSpans = append(completedIntroducedSpans, file.Span)
} else {
completedSpans = append(completedSpans, file.Span)
}
}
}
// Subtract out any completed spans and split the remaining spans into
// range-sized pieces so that we can use the number of completed requests as a
// rough measure of progress.
spans := splitAndFilterSpans(backupDesc.Spans, completedSpans, ranges)
introducedSpans := splitAndFilterSpans(backupDesc.IntroducedSpans, completedIntroducedSpans, ranges)
allSpans := make([]spanAndTime, 0, len(spans)+len(introducedSpans))
for _, s := range introducedSpans {
allSpans = append(allSpans, spanAndTime{span: s, start: hlc.Timestamp{}, end: backupDesc.StartTime})
}
for _, s := range spans {
allSpans = append(allSpans, spanAndTime{span: s, start: backupDesc.StartTime, end: backupDesc.EndTime})
}
// Sequential ranges may have clustered leaseholders, for example a
// geo-partitioned table likely has all the leaseholders for some contiguous
// span of the table (i.e. a partition) pinned to just the nodes in a region.
// In such cases, sending spans sequentially may under-utilize the rest of the
// cluster given that we have a limit on the number of spans we send out at
// a given time. Randomizing the order of spans should help ensure a more even
// distribution of work across the cluster regardless of how leaseholders may
// or may not be clustered.
rand.Shuffle(len(allSpans), func(i, j int) {
allSpans[i], allSpans[j] = allSpans[j], allSpans[i]
})
progressLogger := jobs.NewChunkProgressLogger(job, len(spans), job.FractionCompleted(), jobs.ProgressUpdateOnly)
// We're already limiting these on the server-side, but sending all the
// Export requests at once would fill up distsender/grpc/something and cause
// all sorts of badness (node liveness timeouts leading to mass leaseholder
// transfers, poor performance on SQL workloads, etc) as well as log spam
// about slow distsender requests. Rate limit them here, too.
//
// Each node limits the number of running Export & Import requests it serves
// to avoid overloading the network, so multiply that by the number of nodes
// in the cluster and use that as the number of outstanding Export requests
// for the rate limiting. This attempts to strike a balance between
// simplicity, not getting slow distsender log spam, and keeping the server
// side limiter full.
//
// TODO(dan): Make this limiting per node.
//
// TODO(dan): See if there's some better solution than rate-limiting #14798.
maxConcurrentExports := clusterNodeCount(gossip) * int(storage.ExportRequestsLimit.Get(&settings.SV)) * 10
exportsSem := make(chan struct{}, maxConcurrentExports)
g := ctxgroup.WithContext(ctx)
requestFinishedCh := make(chan struct{}, len(spans)) // enough buffer to never block
// Only start the progress logger if there are spans, otherwise this will
// block forever. This is needed for TestBackupRestoreResume which doesn't
// have any spans. Users should never hit this.
if len(spans) > 0 {
g.GoCtx(func(ctx context.Context) error {
return progressLogger.Loop(ctx, requestFinishedCh)
})
}
g.GoCtx(func(ctx context.Context) error {
for i := range allSpans {
{
select {
case exportsSem <- struct{}{}:
case <-ctx.Done():
// Break the for loop to avoid creating more work - the backup
// has failed because either the context has been canceled or an
// error has been returned. Either way, Wait() is guaranteed to
// return an error now.
return ctx.Err()
}
}
span := allSpans[i]
g.GoCtx(func(ctx context.Context) error {
defer func() { <-exportsSem }()
header := roachpb.Header{Timestamp: span.end}
req := &roachpb.ExportRequest{
RequestHeader: roachpb.RequestHeaderFromSpan(span.span),
Storage: defaultStore.Conf(),
StorageByLocalityKV: storageByLocalityKV,
StartTime: span.start,
EnableTimeBoundIteratorOptimization: useTBI.Get(&settings.SV),
MVCCFilter: roachpb.MVCCFilter(backupDesc.MVCCFilter),
}
rawRes, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req)
if pErr != nil {
return pErr.GoError()
}
res := rawRes.(*roachpb.ExportResponse)
mu.Lock()
if backupDesc.RevisionStartTime.Less(res.StartTime) {
backupDesc.RevisionStartTime = res.StartTime
}
for _, file := range res.Files {
f := BackupDescriptor_File{
Span: file.Span,
Path: file.Path,
Sha512: file.Sha512,
EntryCounts: file.Exported,
LocalityKV: file.LocalityKV,
}
if span.start != backupDesc.StartTime {
f.StartTime = span.start
f.EndTime = span.end
}
mu.files = append(mu.files, f)
mu.exported.Add(file.Exported)
}
var checkpointFiles BackupFileDescriptors
if timeutil.Since(mu.lastCheckpoint) > BackupCheckpointInterval {
// We optimistically assume the checkpoint will succeed to prevent
// multiple threads from attempting to checkpoint.
mu.lastCheckpoint = timeutil.Now()
checkpointFiles = append(checkpointFiles, mu.files...)
}
mu.Unlock()
requestFinishedCh <- struct{}{}
if checkpointFiles != nil {
checkpointMu.Lock()
backupDesc.Files = checkpointFiles
err := writeBackupDescriptor(
ctx, settings, defaultStore, BackupDescriptorCheckpointName, backupDesc,
)
checkpointMu.Unlock()
if err != nil {
log.Errorf(ctx, "unable to checkpoint backup descriptor: %+v", err)
}
}
return nil
})
}
return nil
})
if err := g.Wait(); err != nil {
return mu.exported, errors.Wrapf(err, "exporting %d ranges", errors.Safe(len(spans)))
}
// No more concurrency, so no need to acquire locks below.
backupDesc.Files = mu.files
backupDesc.EntryCounts = mu.exported
backupID := uuid.MakeV4()
backupDesc.ID = backupID
// Write additional partial descriptors to each node for partitioned backups.
if len(storageByLocalityKV) > 0 {
filesByLocalityKV := make(map[string][]BackupDescriptor_File)
for i := range mu.files {
file := &mu.files[i]
filesByLocalityKV[file.LocalityKV] = append(filesByLocalityKV[file.LocalityKV], *file)
}
nextPartitionedDescFilenameID := 1
for kv, conf := range storageByLocalityKV {
backupDesc.LocalityKVs = append(backupDesc.LocalityKVs, kv)
// Set a unique filename for each partition backup descriptor. The ID
// ensures uniqueness, and the kv string appended to the end is for
// readability.
filename := fmt.Sprintf("%s_%d_%s",
BackupPartitionDescriptorPrefix, nextPartitionedDescFilenameID, sanitizeLocalityKV(kv))
nextPartitionedDescFilenameID++
backupDesc.PartitionDescriptorFilenames = append(backupDesc.PartitionDescriptorFilenames, filename)
desc := BackupPartitionDescriptor{
LocalityKV: kv,
Files: filesByLocalityKV[kv],
BackupID: backupID,
}
if err := func() error {
store, err := makeExternalStorage(ctx, *conf)
if err != nil {
return err
}
defer store.Close()
return writeBackupPartitionDescriptor(ctx, store, filename, &desc)
}(); err != nil {
return mu.exported, err
}
}
}
if err := writeBackupDescriptor(ctx, settings, defaultStore, BackupDescriptorName, backupDesc); err != nil {
return mu.exported, err
}
return mu.exported, nil
}
// sanitizeLocalityKV returns a sanitized version of the input string where all
// characters that are not alphanumeric or -, =, or _ are replaced with _.
func sanitizeLocalityKV(kv string) string {
sanitizedKV := make([]byte, len(kv))
for i := 0; i < len(kv); i++ {
if (kv[i] >= 'a' && kv[i] <= 'z') ||
(kv[i] >= 'A' && kv[i] <= 'Z') ||
(kv[i] >= '0' && kv[i] <= '9') || kv[i] == '-' || kv[i] == '=' {
sanitizedKV[i] = kv[i]
} else {
sanitizedKV[i] = '_'
}
}
return string(sanitizedKV)
}
// VerifyUsableExportTarget ensures that the target location does not already
// contain a BACKUP or checkpoint and writes an empty checkpoint, both verifying
// that the location is writable and locking out accidental concurrent
// operations on that location if subsequently try this check. Callers must
// clean up the written checkpoint file (BackupDescriptorCheckpointName) only
// after writing to the backup file location (BackupDescriptorName).
func VerifyUsableExportTarget(
ctx context.Context,
settings *cluster.Settings,
exportStore cloud.ExternalStorage,
readable string,
) error {
if r, err := exportStore.ReadFile(ctx, BackupDescriptorName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file",
readable, BackupDescriptorName)
}
if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file",
readable, BackupManifestName)
}
if r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file (is another operation already in progress?)",
readable, BackupDescriptorCheckpointName)
}
if err := writeBackupDescriptor(
ctx, settings, exportStore, BackupDescriptorCheckpointName, &BackupDescriptor{},
); err != nil {
return errors.Wrapf(err, "cannot write to %s", readable)
}
return nil
}
// backupPlanHook implements PlanHookFn.
func backupPlanHook(
_ context.Context, stmt tree.Statement, p sql.PlanHookState,
) (sql.PlanHookRowFn, sqlbase.ResultColumns, []sql.PlanNode, bool, error) {
backupStmt, ok := stmt.(*tree.Backup)
if !ok {
return nil, nil, nil, false, nil
}
toFn, err := p.TypeAsStringArray(tree.Exprs(backupStmt.To), "BACKUP")
if err != nil {
return nil, nil, nil, false, err
}
incrementalFromFn, err := p.TypeAsStringArray(backupStmt.IncrementalFrom, "BACKUP")
if err != nil {
return nil, nil, nil, false, err
}
optsFn, err := p.TypeAsStringOpts(backupStmt.Options, backupOptionExpectValues)
if err != nil {
return nil, nil, nil, false, err
}
header := sqlbase.ResultColumns{
{Name: "job_id", Typ: types.Int},
{Name: "status", Typ: types.String},
{Name: "fraction_completed", Typ: types.Float},
{Name: "rows", Typ: types.Int},
{Name: "index_entries", Typ: types.Int},
{Name: "system_records", Typ: types.Int},
{Name: "bytes", Typ: types.Int},
}
fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error {
// TODO(dan): Move this span into sql.
ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag())
defer tracing.FinishSpan(span)
if err := utilccl.CheckEnterpriseEnabled(
p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "BACKUP",
); err != nil {
return err
}
if err := p.RequireAdminRole(ctx, "BACKUP"); err != nil {
return err
}
if !p.ExtendedEvalContext().TxnImplicit {
return errors.Errorf("BACKUP cannot be used inside a transaction")
}
to, err := toFn()
if err != nil {
return err
}
if len(to) > 1 &&
!cluster.Version.IsActive(ctx, p.ExecCfg().Settings, cluster.VersionPartitionedBackup) {
return errors.Errorf("partitioned backups can only be made on a cluster that has been fully upgraded to version 19.2")
}
incrementalFrom, err := incrementalFromFn()
if err != nil {
return err
}
endTime := p.ExecCfg().Clock.Now()
if backupStmt.AsOf.Expr != nil {
var err error
if endTime, err = p.EvalAsOfTimestamp(backupStmt.AsOf); err != nil {
return err
}
}
defaultURI, urisByLocalityKV, err := getURIsByLocalityKV(to)
if err != nil {
return nil
}
defaultStore, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, defaultURI)
if err != nil {
return err
}
defer defaultStore.Close()
opts, err := optsFn()
if err != nil {
return err
}
mvccFilter := MVCCFilter_Latest
if _, ok := opts[backupOptRevisionHistory]; ok {
mvccFilter = MVCCFilter_All
}
targetDescs, completeDBs, err := ResolveTargetsToDescriptors(ctx, p, endTime, backupStmt.Targets)
if err != nil {
return err
}
statsCache := p.ExecCfg().TableStatsCache
tableStatistics := make([]*stats.TableStatisticProto, 0)
var tables []*sqlbase.TableDescriptor
for _, desc := range targetDescs {
if dbDesc := desc.GetDatabase(); dbDesc != nil {
if err := p.CheckPrivilege(ctx, dbDesc, privilege.SELECT); err != nil {
return err
}
}
if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
if err := p.CheckPrivilege(ctx, tableDesc, privilege.SELECT); err != nil {
return err
}
tables = append(tables, tableDesc)
// Collect all the table stats for this table.
tableStatisticsAcc, err := statsCache.GetTableStats(ctx, tableDesc.GetID())
if err != nil {
return err
}
for i := range tableStatisticsAcc {
tableStatistics = append(tableStatistics, &tableStatisticsAcc[i].TableStatisticProto)
}
}
}
if err := ensureInterleavesIncluded(tables); err != nil {
return err
}
var prevBackups []BackupDescriptor
if len(incrementalFrom) > 0 {
clusterID := p.ExecCfg().ClusterID()
prevBackups = make([]BackupDescriptor, len(incrementalFrom))
for i, uri := range incrementalFrom {
// TODO(lucy): We may want to upgrade the table descs to the newer
// foreign key representation here, in case there are backups from an
// older cluster. Keeping the descriptors as they are works for now
// since all we need to do is get the past backups' table/index spans,
// but it will be safer for future code to avoid having older-style
// descriptors around.
desc, err := ReadBackupDescriptorFromURI(ctx, uri, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI)
if err != nil {
return errors.Wrapf(err, "failed to read backup from %q", uri)
}
// IDs are how we identify tables, and those are only meaningful in the
// context of their own cluster, so we need to ensure we only allow
// incremental previous backups that we created.
if !desc.ClusterID.Equal(clusterID) {
return errors.Newf("previous BACKUP %q belongs to cluster %s", uri, desc.ClusterID.String())
}
prevBackups[i] = desc
}
}
var startTime hlc.Timestamp
var newSpans roachpb.Spans
if len(prevBackups) > 0 {
startTime = prevBackups[len(prevBackups)-1].EndTime
}
var priorIDs map[sqlbase.ID]sqlbase.ID
var revs []BackupDescriptor_DescriptorRevision
if mvccFilter == MVCCFilter_All {
priorIDs = make(map[sqlbase.ID]sqlbase.ID)
revs, err = getRelevantDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, targetDescs, completeDBs, priorIDs)
if err != nil {
return err
}
}
spans := spansForAllTableIndexes(tables, revs)
if len(prevBackups) > 0 {
tablesInPrev := make(map[sqlbase.ID]struct{})
dbsInPrev := make(map[sqlbase.ID]struct{})
for _, d := range prevBackups[len(prevBackups)-1].Descriptors {
if t := d.Table(hlc.Timestamp{}); t != nil {
tablesInPrev[t.ID] = struct{}{}
}
}
for _, d := range prevBackups[len(prevBackups)-1].CompleteDbs {
dbsInPrev[d] = struct{}{}
}
for _, d := range targetDescs {
if t := d.Table(hlc.Timestamp{}); t != nil {
// If we're trying to use a previous backup for this table, ideally it
// actually contains this table.
if _, ok := tablesInPrev[t.ID]; ok {
continue
}
// This table isn't in the previous backup... maybe was added to a
// DB that the previous backup captured?
if _, ok := dbsInPrev[t.ParentID]; ok {
continue
}
// Maybe this table is missing from the previous backup because it was
// truncated?
if t.ReplacementOf.ID != sqlbase.InvalidID {
// Check if we need to lazy-load the priorIDs (i.e. if this is the first
// truncate we've encountered in non-MVCC backup).
if priorIDs == nil {
priorIDs = make(map[sqlbase.ID]sqlbase.ID)
_, err := getAllDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, priorIDs)
if err != nil {
return err
}
}
found := false
for was := t.ReplacementOf.ID; was != sqlbase.InvalidID && !found; was = priorIDs[was] {
_, found = tablesInPrev[was]
}
if found {
continue
}
}
return errors.Errorf("previous backup does not contain table %q", t.Name)
}
}
var err error
_, coveredTime, err := makeImportSpans(
spans,
prevBackups,
nil, /*backupLocalityInfo*/
keys.MinKey,
func(span covering.Range, start, end hlc.Timestamp) error {
if (start == hlc.Timestamp{}) {
newSpans = append(newSpans, roachpb.Span{Key: span.Start, EndKey: span.End})
return nil
}
return errOnMissingRange(span, start, end)
},
)
if err != nil {
return errors.Wrapf(err, "invalid previous backups (a new full backup may be required if a table has been created, dropped or truncated)")
}
if coveredTime != startTime {
return errors.Wrapf(err, "expected previous backups to cover until time %v, got %v", startTime, coveredTime)
}
}
// if CompleteDbs is lost by a 1.x node, FormatDescriptorTrackingVersion
// means that a 2.0 node will disallow `RESTORE DATABASE foo`, but `RESTORE
// foo.table1, foo.table2...` will still work. MVCCFilter would be
// mis-handled, but is disallowed above. IntroducedSpans may also be lost by
// a 1.x node, meaning that if 1.1 nodes may resume a backup, the limitation
// of requiring full backups after schema changes remains.
backupDesc := BackupDescriptor{
StartTime: startTime,
EndTime: endTime,
MVCCFilter: mvccFilter,
Descriptors: targetDescs,
DescriptorChanges: revs,
CompleteDbs: completeDBs,
Spans: spans,
IntroducedSpans: newSpans,
FormatVersion: BackupFormatDescriptorTrackingVersion,
BuildInfo: build.GetInfo(),
NodeID: p.ExecCfg().NodeID.Get(),
ClusterID: p.ExecCfg().ClusterID(),
Statistics: tableStatistics,
}
// Sanity check: re-run the validation that RESTORE will do, but this time
// including this backup, to ensure that the this backup plus any previous
// backups does cover the interval expected.
if _, coveredEnd, err := makeImportSpans(
spans,
append(prevBackups, backupDesc),
nil, /*backupLocalityInfo*/
keys.MinKey,
errOnMissingRange,
); err != nil {
return err
} else if coveredEnd != endTime {
return errors.Errorf("expected backup (along with any previous backups) to cover to %v, not %v", endTime, coveredEnd)
}
descBytes, err := protoutil.Marshal(&backupDesc)
if err != nil {
return err
}
description, err := backupJobDescription(p, backupStmt, to, incrementalFrom, opts)
if err != nil {
return err
}
// TODO (lucy): For partitioned backups, also add verification for other
// stores we are writing to in addition to the default.
if err := VerifyUsableExportTarget(ctx, p.ExecCfg().Settings, defaultStore, defaultURI); err != nil {
return err
}
_, errCh, err := p.ExecCfg().JobRegistry.CreateAndStartJob(ctx, resultsCh, jobs.Record{
Description: description,
Username: p.User(),
DescriptorIDs: func() (sqlDescIDs []sqlbase.ID) {
for _, sqlDesc := range backupDesc.Descriptors {
sqlDescIDs = append(sqlDescIDs, sqlDesc.GetID())
}
return sqlDescIDs
}(),
Details: jobspb.BackupDetails{
StartTime: startTime,
EndTime: endTime,
URI: defaultURI,
URIsByLocalityKV: urisByLocalityKV,
BackupDescriptor: descBytes,
},
Progress: jobspb.BackupProgress{},
})
if err != nil {
return err
}
return <-errCh
}
return fn, header, nil, false, nil
}
type backupResumer struct {
job *jobs.Job
settings *cluster.Settings
res roachpb.BulkOpSummary
makeExternalStorage cloud.ExternalStorageFactory
}
// Resume is part of the jobs.Resumer interface.
func (b *backupResumer) Resume(
ctx context.Context, phs interface{}, resultsCh chan<- tree.Datums,
) error {
details := b.job.Details().(jobspb.BackupDetails)
p := phs.(sql.PlanHookState)
b.makeExternalStorage = p.ExecCfg().DistSQLSrv.ExternalStorage
if len(details.BackupDescriptor) == 0 {
return errors.Newf("missing backup descriptor; cannot resume a backup from an older version")
}
var backupDesc BackupDescriptor
if err := protoutil.Unmarshal(details.BackupDescriptor, &backupDesc); err != nil {
return pgerror.Wrapf(err, pgcode.DataCorrupted,
"unmarshal backup descriptor")
}
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
defaultConf, err := cloud.ExternalStorageConfFromURI(details.URI)
if err != nil {
return errors.Wrapf(err, "export configuration")
}
defaultStore, err := b.makeExternalStorage(ctx, defaultConf)
if err != nil {
return errors.Wrapf(err, "make storage")
}
storageByLocalityKV := make(map[string]*roachpb.ExternalStorage)
for kv, uri := range details.URIsByLocalityKV {
conf, err := cloud.ExternalStorageConfFromURI(uri)
if err != nil {
return err
}
storageByLocalityKV[kv] = &conf
}
var checkpointDesc *BackupDescriptor
// We don't read the table descriptors from the backup descriptor, but
// they could be using either the new or the old foreign key
// representations. We should just preserve whatever representation the
// table descriptors were using and leave them alone.
if desc, err := readBackupDescriptor(ctx, defaultStore, BackupDescriptorCheckpointName); err == nil {
// If the checkpoint is from a different cluster, it's meaningless to us.
// More likely though are dummy/lock-out checkpoints with no ClusterID.
if desc.ClusterID.Equal(p.ExecCfg().ClusterID()) {
checkpointDesc = &desc
}
} else {
// TODO(benesch): distinguish between a missing checkpoint, which simply
// indicates the prior backup attempt made no progress, and a corrupted
// checkpoint, which is more troubling. Sadly, storageccl doesn't provide a
// "not found" error that's consistent across all ExternalStorage
// implementations.
log.Warningf(ctx, "unable to load backup checkpoint while resuming job %d: %v", *b.job.ID(), err)
}
res, err := backup(
ctx,
p.ExecCfg().DB,
p.ExecCfg().Gossip,
p.ExecCfg().Settings,
defaultStore,
storageByLocalityKV,
b.job,
&backupDesc,
checkpointDesc,
resultsCh,
b.makeExternalStorage,
)
b.res = res
return err
}
// OnFailOrCancel is part of the jobs.Resumer interface.
func (b *backupResumer) OnFailOrCancel(context.Context, *client.Txn) error {
return nil
}
// OnSuccess is part of the jobs.Resumer interface.
func (b *backupResumer) OnSuccess(context.Context, *client.Txn) error { return nil }
// OnTerminal is part of the jobs.Resumer interface.
func (b *backupResumer) OnTerminal(
ctx context.Context, status jobs.Status, resultsCh chan<- tree.Datums,
) {
// Attempt to delete BACKUP-CHECKPOINT.
if err := func() error {
details := b.job.Details().(jobspb.BackupDetails)
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
conf, err := cloud.ExternalStorageConfFromURI(details.URI)
if err != nil {
return err
}
exportStore, err := b.makeExternalStorage(ctx, conf)
if err != nil {
return err
}
return exportStore.Delete(ctx, BackupDescriptorCheckpointName)
}(); err != nil {
log.Warningf(ctx, "unable to delete checkpointed backup descriptor: %+v", err)
}
if status == jobs.StatusSucceeded {
// TODO(benesch): emit periodic progress updates.
// TODO(mjibson): if a restore was resumed, then these counts will only have
// the current coordinator's counts.
resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(*b.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(b.res.Rows)),
tree.NewDInt(tree.DInt(b.res.IndexEntries)),
tree.NewDInt(tree.DInt(b.res.SystemRecords)),
tree.NewDInt(tree.DInt(b.res.DataSize)),
}
}
}
type versionedValues struct {
Key roachpb.Key
Values []roachpb.Value
}
// getAllRevisions scans all keys between startKey and endKey getting all
// revisions between startTime and endTime.
// TODO(dt): if/when client gets a ScanRevisionsRequest or similar, use that.
func getAllRevisions(
ctx context.Context,
db *client.DB,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
) ([]versionedValues, error) {
// TODO(dt): version check.
header := roachpb.Header{Timestamp: endTime}
req := &roachpb.ExportRequest{
RequestHeader: roachpb.RequestHeader{Key: startKey, EndKey: endKey},
StartTime: startTime,
MVCCFilter: roachpb.MVCCFilter_All,
ReturnSST: true,
OmitChecksum: true,
}
resp, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req)
if pErr != nil {
return nil, pErr.GoError()
}
var res []versionedValues
for _, file := range resp.(*roachpb.ExportResponse).Files {
sst := engine.MakeRocksDBSstFileReader()
defer sst.Close()
if err := sst.IngestExternalFile(file.SST); err != nil {
return nil, err
}
if err := sst.Iterate(startKey, endKey, func(kv engine.MVCCKeyValue) (bool, error) {
if len(res) == 0 || !res[len(res)-1].Key.Equal(kv.Key.Key) {
res = append(res, versionedValues{Key: kv.Key.Key})
}
res[len(res)-1].Values = append(res[len(res)-1].Values, roachpb.Value{Timestamp: kv.Key.Timestamp, RawBytes: kv.Value})
return false, nil
}); err != nil {
return nil, err
}
}
return res, nil
}
var _ jobs.Resumer = &backupResumer{}
func init() {
sql.AddPlanHook(backupPlanHook)
jobs.RegisterConstructor(
jobspb.TypeBackup,
func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer {
return &backupResumer{
job: job,
settings: settings,
}
},
)
}
// getURIsByLocalityKV takes a slice of URIs for a single (possibly partitioned)
// backup, and returns the default backup destination URI and a map of all other
// URIs by locality KV. The URIs in the result do not include the
// COCKROACH_LOCALITY parameter.
func getURIsByLocalityKV(to []string) (string, map[string]string, error) {
localityAndBaseURI := func(uri string) (string, string, error) {
parsedURI, err := url.Parse(uri)
if err != nil {
return "", "", err
}
q := parsedURI.Query()
localityKV := q.Get(localityURLParam)
// Remove the backup locality parameter.
q.Del(localityURLParam)
parsedURI.RawQuery = q.Encode()
baseURI := parsedURI.String()
return localityKV, baseURI, nil
}
urisByLocalityKV := make(map[string]string)
if len(to) == 1 {
localityKV, baseURI, err := localityAndBaseURI(to[0])
if err != nil {
return "", nil, err
}
if localityKV != "" && localityKV != defaultLocalityValue {
return "", nil, errors.Errorf("%s %s is invalid for a single BACKUP location",
localityURLParam, localityKV)
}
return baseURI, urisByLocalityKV, nil
}
var defaultURI string
for _, uri := range to {
localityKV, baseURI, err := localityAndBaseURI(uri)
if err != nil {
return "", nil, err
}
if localityKV == "" {
return "", nil, errors.Errorf(
"multiple URLs are provided for partitioned BACKUP, but %s is not specified",
localityURLParam,
)
}
if localityKV == defaultLocalityValue {
if defaultURI != "" {
return "", nil, errors.Errorf("multiple default URLs provided for partition backup")
}
defaultURI = baseURI
} else {
kv := roachpb.Tier{}
if err := kv.FromString(localityKV); err != nil {
return "", nil, errors.Wrap(err, "failed to parse backup locality")
}
if _, ok := urisByLocalityKV[localityKV]; ok {
return "", nil, errors.Errorf("duplicate URIs for locality %s", localityKV)
}
urisByLocalityKV[localityKV] = baseURI
}
}
if defaultURI == "" {
return "", nil, errors.Errorf("no default URL provided for partitioned backup")
}
return defaultURI, urisByLocalityKV, nil
}
// maybeUpgradeTableDescsInBackupDescriptors updates the backup descriptors'
// table descriptors to use the newer 19.2-style foreign key representation,
// if they are not already upgraded. This requires resolving cross-table FK
// references, which is done by looking up all table descriptors across all
// backup descriptors provided. if skipFKsWithNoMatchingTable is set, FKs whose
// "other" table is missing from the set provided are omitted during the
// upgrade, instead of causing an error to be returned.
func maybeUpgradeTableDescsInBackupDescriptors(
ctx context.Context, backupDescs []BackupDescriptor, skipFKsWithNoMatchingTable bool,
) error {
protoGetter := sqlbase.MapProtoGetter{
Protos: make(map[interface{}]protoutil.Message),
}
// Populate the protoGetter with all table descriptors in all backup
// descriptors so that they can be looked up.
for _, backupDesc := range backupDescs {
for _, desc := range backupDesc.Descriptors {
if table := desc.Table(hlc.Timestamp{}); table != nil {
protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(table.ID))] =
sqlbase.WrapDescriptor(protoutil.Clone(table).(*sqlbase.TableDescriptor))
}
}
}
for i := range backupDescs {
backupDesc := &backupDescs[i]
for j := range backupDesc.Descriptors {
if table := backupDesc.Descriptors[j].Table(hlc.Timestamp{}); table != nil {
if _, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, skipFKsWithNoMatchingTable); err != nil {
return err
}
// TODO(lucy): Is this necessary?
backupDesc.Descriptors[j] = *sqlbase.WrapDescriptor(table)
}
}
}
return nil
}
| pkg/ccl/backupccl/backup.go | 1 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.9983065128326416,
0.027742469683289528,
0.00016204023268073797,
0.00018107297364622355,
0.15526698529720306
] |
{
"id": 0,
"code_window": [
"\t\t// TODO(dt): If we audit exactly what not-exists error each ExternalStorage\n",
"\t\t// returns (and then wrap/tag them), we could narrow this check.\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupDescriptorName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 940
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coltypes"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
// getDataAndFullSelection is a test helper that generates tuples representing
// a one-column coltypes.Int64 batch where each element is its ordinal and an
// accompanying selection vector that selects every index in tuples.
func getDataAndFullSelection() (tuples, []uint16) {
data := make(tuples, coldata.BatchSize())
fullSelection := make([]uint16, coldata.BatchSize())
for i := range data {
data[i] = tuple{i}
fullSelection[i] = uint16(i)
}
return data, fullSelection
}
func TestRouterOutputAddBatch(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
data, fullSelection := getDataAndFullSelection()
// Since the actual data doesn't matter, we will just be reusing data for each
// test case.
testCases := []struct {
inputBatchSize uint16
outputBatchSize int
blockedThreshold int
// selection determines which indices to add to the router output as well
// as how many elements from data are compared to the output.
selection []uint16
name string
}{
{
inputBatchSize: coldata.BatchSize(),
outputBatchSize: int(coldata.BatchSize()),
blockedThreshold: defaultRouterOutputBlockedThreshold,
selection: fullSelection,
name: "OneBatch",
},
{
inputBatchSize: coldata.BatchSize(),
outputBatchSize: 4,
blockedThreshold: defaultRouterOutputBlockedThreshold,
selection: fullSelection,
name: "OneBatchGTOutputSize",
},
{
inputBatchSize: 4,
outputBatchSize: int(coldata.BatchSize()),
blockedThreshold: defaultRouterOutputBlockedThreshold,
selection: fullSelection,
name: "MultipleInputBatchesLTOutputSize",
},
{
inputBatchSize: coldata.BatchSize(),
outputBatchSize: int(coldata.BatchSize()),
blockedThreshold: defaultRouterOutputBlockedThreshold,
selection: fullSelection[:len(fullSelection)/4],
name: "QuarterSelection",
},
}
// unblockEventsChan is purposefully unbuffered; the router output should never write to it
// in this test.
unblockEventsChan := make(chan struct{})
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
o := newRouterOutputOpWithBlockedThresholdAndBatchSize(
testAllocator, []coltypes.T{coltypes.Int64}, unblockEventsChan, tc.blockedThreshold, tc.outputBatchSize,
)
in := newOpTestInput(tc.inputBatchSize, data, nil /* typs */)
out := newOpTestOutput(o, data[:len(tc.selection)])
in.Init()
for {
b := in.Next(ctx)
o.addBatch(b, tc.selection)
if b.Length() == 0 {
break
}
}
if err := out.Verify(); err != nil {
t.Fatal(err)
}
// The output should never block. This assumes test cases never send more
// than defaultRouterOutputBlockedThreshold values.
select {
case b := <-unblockEventsChan:
t.Fatalf("unexpected output state change blocked: %t", b)
default:
}
})
}
}
func TestRouterOutputNext(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
data, fullSelection := getDataAndFullSelection()
testCases := []struct {
unblockEvent func(in Operator, o *routerOutputOp)
expected tuples
name string
}{
{
// ReaderWaitsForData verifies that a reader blocks in Next(ctx) until there
// is data available.
unblockEvent: func(in Operator, o *routerOutputOp) {
for {
b := in.Next(ctx)
o.addBatch(b, fullSelection)
if b.Length() == 0 {
break
}
}
},
expected: data,
name: "ReaderWaitsForData",
},
{
// ReaderWaitsForZeroBatch verifies that a reader blocking on Next will
// also get unblocked with no data other than the zero batch.
unblockEvent: func(_ Operator, o *routerOutputOp) {
o.addBatch(coldata.ZeroBatch, nil /* selection */)
},
expected: tuples{},
name: "ReaderWaitsForZeroBatch",
},
{
// CancelUnblocksReader verifies that calling cancel on an output unblocks
// a reader.
unblockEvent: func(_ Operator, o *routerOutputOp) {
o.cancel()
},
expected: tuples{},
name: "CancelUnblocksReader",
},
}
// unblockedEventsChan is purposefully unbuffered; the router output should
// never write to it in this test.
unblockedEventsChan := make(chan struct{})
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var wg sync.WaitGroup
batchChan := make(chan coldata.Batch)
o := newRouterOutputOp(testAllocator, []coltypes.T{coltypes.Int64}, unblockedEventsChan)
in := newOpTestInput(coldata.BatchSize(), data, nil /* typs */)
in.Init()
wg.Add(1)
go func() {
for {
b := o.Next(ctx)
batchChan <- b
if b.Length() == 0 {
break
}
}
wg.Done()
}()
// Sleep a long enough amount of time to make sure that if Next didn't block
// above, we have a good chance of reading a batch.
time.Sleep(time.Millisecond)
select {
case <-batchChan:
t.Fatal("expected reader goroutine to block when no data ready")
default:
}
tc.unblockEvent(in, o)
// Should have data available, pushed by our reader goroutine.
batches := NewBatchBuffer()
out := newOpTestOutput(batches, tc.expected)
for {
b := <-batchChan
batches.Add(b)
if b.Length() == 0 {
break
}
}
if err := out.Verify(); err != nil {
t.Fatal(err)
}
wg.Wait()
select {
case <-unblockedEventsChan:
t.Fatal("unexpected output state change")
default:
}
})
}
t.Run("NextAfterZeroBatchDoesntBlock", func(t *testing.T) {
o := newRouterOutputOp(testAllocator, []coltypes.T{coltypes.Int64}, unblockedEventsChan)
o.addBatch(coldata.ZeroBatch, fullSelection)
o.Next(ctx)
o.Next(ctx)
select {
case <-unblockedEventsChan:
t.Fatal("unexpected output state change")
default:
}
})
t.Run("AddBatchDoesntBlockWhenOutputIsBlocked", func(t *testing.T) {
const (
smallBatchSize = 8
blockThreshold = smallBatchSize / 2
)
// Use a smaller selection than the batch size; it increases test coverage.
selection := fullSelection[:blockThreshold]
expected := make(tuples, len(data)/(smallBatchSize/blockThreshold))
for i, j := 0, 0; i < len(data) && j < len(expected); i, j = i+smallBatchSize, j+blockThreshold {
for k := 0; k < blockThreshold; k++ {
expected[j+k] = data[i+k]
}
}
ch := make(chan struct{}, 2)
o := newRouterOutputOpWithBlockedThresholdAndBatchSize(
testAllocator, []coltypes.T{coltypes.Int64}, ch, blockThreshold, int(coldata.BatchSize()),
)
in := newOpTestInput(smallBatchSize, data, nil /* typs */)
out := newOpTestOutput(o, expected)
in.Init()
b := in.Next(ctx)
// Make sure the output doesn't consider itself blocked. We're right at the
// limit but not over.
if o.addBatch(b, selection) {
t.Fatal("unexpectedly blocked")
}
b = in.Next(ctx)
// This addBatch call should now block the output.
if !o.addBatch(b, selection) {
t.Fatal("unexpectedly still unblocked")
}
// Add the rest of the data.
for {
b = in.Next(ctx)
if o.addBatch(b, selection) {
t.Fatal("should only return true when switching from unblocked to blocked")
}
if b.Length() == 0 {
break
}
}
// Unblock the output.
if err := out.Verify(); err != nil {
t.Fatal(err)
}
// Verify that an unblock event is sent on the channel. This test will fail
// with a timeout on a channel read if not.
<-ch
})
}
func TestRouterOutputRandom(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
rng, _ := randutil.NewPseudoRand()
var (
maxValues = int(coldata.BatchSize()) * 4
blockedThreshold = 1 + rng.Intn(maxValues-1)
outputSize = 1 + rng.Intn(maxValues-1)
)
typs := []coltypes.T{coltypes.Int64, coltypes.Int64}
dataLen := 1 + rng.Intn(maxValues-1)
data := make(tuples, dataLen)
for i := range data {
data[i] = make(tuple, len(typs))
for j := range typs {
data[i][j] = rng.Int63()
}
}
testName := fmt.Sprintf(
"blockedThreshold=%d/outputSize=%d/totalInputSize=%d", blockedThreshold, outputSize, len(data),
)
t.Run(testName, func(t *testing.T) {
runTestsWithFn(t, []tuples{data}, nil /* typs */, func(t *testing.T, inputs []Operator) {
var wg sync.WaitGroup
unblockedEventsChans := make(chan struct{}, 2)
o := newRouterOutputOpWithBlockedThresholdAndBatchSize(
testAllocator, typs, unblockedEventsChans, blockedThreshold, outputSize,
)
inputs[0].Init()
expected := make(tuples, 0, len(data))
// Producer.
errCh := make(chan error)
go func() {
lastBlockedState := false
for {
b := inputs[0].Next(ctx)
selection := b.Selection()
if selection == nil {
selection = randomSel(rng, b.Length(), rng.Float64())
}
selection = selection[:b.Length()]
for _, i := range selection {
expected = append(expected, make(tuple, len(typs)))
for j := range typs {
expected[len(expected)-1][j] = b.ColVec(j).Int64()[i]
}
}
if o.addBatch(b, selection) {
if lastBlockedState {
// We might have missed an unblock event during the last loop.
select {
case <-unblockedEventsChans:
default:
errCh <- errors.New("output returned state change to blocked when already blocked")
}
}
lastBlockedState = true
}
// Read any state changes.
for moreToRead := true; moreToRead; {
select {
case <-unblockedEventsChans:
if !lastBlockedState {
errCh <- errors.New("received unblocked state change when output is already unblocked")
}
lastBlockedState = false
default:
moreToRead = false
}
}
if b.Length() == 0 {
errCh <- nil
return
}
}
}()
actual := NewBatchBuffer()
// Consumer.
wg.Add(1)
go func() {
for {
b := o.Next(ctx)
actual.Add(b)
if b.Length() == 0 {
wg.Done()
return
}
}
}()
if err := <-errCh; err != nil {
t.Fatal(err)
}
wg.Wait()
if err := newOpTestOutput(actual, expected).Verify(); err != nil {
t.Fatal(err)
}
})
})
}
type callbackRouterOutput struct {
ZeroInputNode
addBatchCb func(coldata.Batch, []uint16) bool
cancelCb func()
}
var _ routerOutput = callbackRouterOutput{}
func (o callbackRouterOutput) addBatch(batch coldata.Batch, selection []uint16) bool {
if o.addBatchCb != nil {
return o.addBatchCb(batch, selection)
}
return false
}
func (o callbackRouterOutput) cancel() {
if o.cancelCb != nil {
o.cancelCb()
}
}
func TestHashRouterComputesDestination(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
data := make(tuples, coldata.BatchSize())
valsYetToSee := make(map[int64]struct{})
for i := range data {
data[i] = tuple{i}
valsYetToSee[int64(i)] = struct{}{}
}
in := newOpTestInput(coldata.BatchSize(), data, nil /* typs */)
in.Init()
var (
// expectedNumVals is the number of expected values the output at the
// corresponding index in outputs receives. This should not change between
// runs of tests unless the underlying hash algorithm changes. If it does,
// distributed hash routing will not produce correct results.
expectedNumVals = []int{273, 252, 287, 212}
valsPushed = make([]int, len(expectedNumVals))
)
outputs := make([]routerOutput, len(expectedNumVals))
for i := range outputs {
// Capture the index.
outputIdx := i
outputs[i] = callbackRouterOutput{
addBatchCb: func(batch coldata.Batch, sel []uint16) bool {
for _, j := range sel {
key := batch.ColVec(0).Int64()[j]
if _, ok := valsYetToSee[key]; !ok {
t.Fatalf("pushed alread seen value to router output: %d", key)
}
delete(valsYetToSee, key)
valsPushed[outputIdx]++
}
return false
},
cancelCb: func() {
t.Fatalf(
"output %d canceled, outputs should not be canceled during normal operation", outputIdx,
)
},
}
}
r := newHashRouterWithOutputs(in, []coltypes.T{coltypes.Int64}, []int{0}, nil /* ch */, outputs)
for r.processNextBatch(ctx) {
}
if len(valsYetToSee) != 0 {
t.Fatalf("hash router failed to push values: %v", valsYetToSee)
}
for i, expected := range expectedNumVals {
if valsPushed[i] != expected {
t.Fatalf("num val slices differ at output %d, expected: %v actual: %v", i, expectedNumVals, valsPushed)
}
}
}
func TestHashRouterCancellation(t *testing.T) {
defer leaktest.AfterTest(t)()
outputs := make([]routerOutput, 4)
numCancels := int64(0)
numAddBatches := int64(0)
for i := range outputs {
// We'll just be checking canceled.
outputs[i] = callbackRouterOutput{
addBatchCb: func(_ coldata.Batch, _ []uint16) bool {
atomic.AddInt64(&numAddBatches, 1)
return false
},
cancelCb: func() { atomic.AddInt64(&numCancels, 1) },
}
}
// Never-ending input of 0s.
batch := testAllocator.NewMemBatch([]coltypes.T{coltypes.Int64})
batch.SetLength(coldata.BatchSize())
in := NewRepeatableBatchSource(batch)
unbufferedCh := make(chan struct{})
r := newHashRouterWithOutputs(in, []coltypes.T{coltypes.Int64}, []int{0}, unbufferedCh, outputs)
t.Run("BeforeRun", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
r.Run(ctx)
if numCancels != int64(len(outputs)) {
t.Fatalf("expected %d canceled outputs, actual %d", len(outputs), numCancels)
}
if numAddBatches != 0 {
t.Fatalf("detected %d addBatch calls but expected 0", numAddBatches)
}
meta := r.DrainMeta(ctx)
require.Equal(t, 1, len(meta))
require.True(t, testutils.IsError(meta[0].Err, "context canceled"), meta[0].Err)
})
testCases := []struct {
blocked bool
name string
}{
{
blocked: false,
name: "DuringRun",
},
{
blocked: true,
name: "WhileWaitingForUnblock",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
numCancels = 0
numAddBatches = 0
ctx, cancel := context.WithCancel(context.Background())
if tc.blocked {
r.numBlockedOutputs = len(outputs)
defer func() {
r.numBlockedOutputs = 0
}()
}
routerMeta := make(chan []execinfrapb.ProducerMetadata)
go func() {
r.Run(ctx)
routerMeta <- r.DrainMeta(ctx)
close(routerMeta)
}()
time.Sleep(time.Millisecond)
if tc.blocked {
// Make sure no addBatches happened.
if n := atomic.LoadInt64(&numAddBatches); n != 0 {
t.Fatalf("expected router to be blocked, but detected %d addBatch calls", n)
}
}
select {
case <-routerMeta:
t.Fatal("hash router goroutine unexpectedly done")
default:
}
cancel()
meta := <-routerMeta
require.Equal(t, 1, len(meta))
require.True(t, testutils.IsError(meta[0].Err, "canceled"), meta[0].Err)
if numCancels != int64(len(outputs)) {
t.Fatalf("expected %d canceled outputs, actual %d", len(outputs), numCancels)
}
})
}
}
func TestHashRouterOneOutput(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
rng, _ := randutil.NewPseudoRand()
sel := randomSel(rng, coldata.BatchSize(), rng.Float64())
data, _ := getDataAndFullSelection()
typs := []coltypes.T{coltypes.Int64}
r, routerOutputs := NewHashRouter(
testAllocator, newOpFixedSelTestInput(sel, uint16(len(sel)), data), typs, []int{0}, 1, /* numOutputs */
)
if len(routerOutputs) != 1 {
t.Fatalf("expected 1 router output but got %d", len(routerOutputs))
}
expected := make(tuples, 0, len(data))
for _, i := range sel {
expected = append(expected, data[i])
}
o := newOpTestOutput(routerOutputs[0], expected)
var wg sync.WaitGroup
wg.Add(1)
go func() {
r.Run(ctx)
wg.Done()
}()
if err := o.Verify(); err != nil {
t.Fatal(err)
}
wg.Wait()
}
func TestHashRouterRandom(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
rng, _ := randutil.NewPseudoRand()
var (
maxValues = int(coldata.BatchSize()) * 4
maxOutputs = int(coldata.BatchSize())
blockedThreshold = 1 + rng.Intn(maxValues-1)
outputSize = 1 + rng.Intn(maxValues-1)
numOutputs = 1 + rng.Intn(maxOutputs-1)
)
typs := []coltypes.T{coltypes.Int64, coltypes.Int64}
dataLen := 1 + rng.Intn(maxValues-1)
data := make(tuples, dataLen)
for i := range data {
data[i] = make(tuple, len(typs))
for j := range typs {
data[i][j] = rng.Int63()
}
}
hashCols := make([]int, 0, len(typs))
hashCols = append(hashCols, 0)
for i := 1; i < cap(hashCols); i++ {
if rng.Float64() < 0.5 {
hashCols = append(hashCols, i)
}
}
// cancel determines whether we test cancellation.
cancel := false
if rng.Float64() < 0.25 {
cancel = true
}
testName := fmt.Sprintf(
"numOutputs=%d/blockedThreshold=%d/outputSize=%d/totalInputSize=%d/hashCols=%v/cancel=%t",
numOutputs,
blockedThreshold,
outputSize,
len(data),
hashCols,
cancel,
)
// expectedDistribution is set after the first run and used to verify that the
// distribution of results does not change between runs, as we are sending the
// same data to the same number of outputs.
var expectedDistribution []int
t.Run(testName, func(t *testing.T) {
runTestsWithFn(t, []tuples{data}, nil /* typs */, func(t *testing.T, inputs []Operator) {
unblockEventsChan := make(chan struct{}, 2*numOutputs)
outputs := make([]routerOutput, numOutputs)
outputsAsOps := make([]Operator, numOutputs)
for i := range outputs {
op := newRouterOutputOpWithBlockedThresholdAndBatchSize(
testAllocator, typs, unblockEventsChan, blockedThreshold, outputSize,
)
outputs[i] = op
outputsAsOps[i] = op
}
r := newHashRouterWithOutputs(
inputs[0], typs, hashCols, unblockEventsChan, outputs,
)
var (
results uint64
wg sync.WaitGroup
)
resultsByOp := make([]int, len(outputsAsOps))
wg.Add(len(outputsAsOps))
for i := range outputsAsOps {
go func(i int) {
for {
b := outputsAsOps[i].Next(ctx)
if b.Length() == 0 {
break
}
atomic.AddUint64(&results, uint64(b.Length()))
resultsByOp[i] += int(b.Length())
}
wg.Done()
}(i)
}
ctx, cancelFunc := context.WithCancel(context.Background())
wg.Add(1)
go func() {
r.Run(ctx)
wg.Done()
}()
if cancel {
// Sleep between 0 and ~5 milliseconds.
time.Sleep(time.Microsecond * time.Duration(rng.Intn(5000)))
cancelFunc()
} else {
// Satisfy linter context leak error.
defer cancelFunc()
}
// Ensure all goroutines end. If a test fails with a hang here it is most
// likely due to a cancellation bug.
wg.Wait()
if !cancel {
// Only do output verification if no cancellation happened.
if actualTotal := atomic.LoadUint64(&results); actualTotal != uint64(len(data)) {
t.Fatalf("unexpected number of results %d, expected %d", actualTotal, len(data))
}
if expectedDistribution == nil {
expectedDistribution = resultsByOp
return
}
for i, numVals := range expectedDistribution {
if numVals != resultsByOp[i] {
t.Fatalf(
"distribution of results changed compared to first run at output %d. expected: %v, actual: %v",
i,
expectedDistribution,
resultsByOp,
)
}
}
}
})
})
}
func BenchmarkHashRouter(b *testing.B) {
defer leaktest.AfterTest(b)()
ctx := context.Background()
types := []coltypes.T{coltypes.Int64}
// Use only one type. Note: the more types you use, the more you inflate the
// numbers.
batch := testAllocator.NewMemBatch(types)
batch.SetLength(coldata.BatchSize())
input := NewRepeatableBatchSource(batch)
var wg sync.WaitGroup
for _, numOutputs := range []int{2, 4, 8, 16} {
for _, numInputBatches := range []int{2, 4, 8, 16} {
b.Run(fmt.Sprintf("numOutputs=%d/numInputBatches=%d", numOutputs, numInputBatches), func(b *testing.B) {
r, outputs := NewHashRouter(testAllocator, input, types, []int{0}, numOutputs)
b.SetBytes(8 * int64(coldata.BatchSize()) * int64(numInputBatches))
// We expect distribution to not change. This is a sanity check that
// we're resetting properly.
var expectedDistribution []int
actualDistribution := make([]int, len(outputs))
// zeroDistribution just allows us to reset actualDistribution with a
// copy.
zeroDistribution := make([]int, len(outputs))
b.ResetTimer()
for i := 0; i < b.N; i++ {
input.ResetBatchesToReturn(numInputBatches)
r.reset()
wg.Add(len(outputs))
for j := range outputs {
go func(j int) {
for {
oBatch := outputs[j].Next(ctx)
actualDistribution[j] += int(oBatch.Length())
if oBatch.Length() == 0 {
break
}
}
wg.Done()
}(j)
}
r.Run(ctx)
wg.Wait()
// sum sanity checks that we are actually pushing as many values as we
// expect.
sum := 0
for i := range actualDistribution {
sum += actualDistribution[i]
}
if sum != numInputBatches*int(coldata.BatchSize()) {
b.Fatalf("unexpected sum %d, expected %d", sum, numInputBatches*int(coldata.BatchSize()))
}
if expectedDistribution == nil {
expectedDistribution = make([]int, len(actualDistribution))
copy(expectedDistribution, actualDistribution)
} else {
for j := range expectedDistribution {
if expectedDistribution[j] != actualDistribution[j] {
b.Fatalf(
"not resetting properly expected distribution: %v, actual distribution: %v",
expectedDistribution,
actualDistribution,
)
}
}
}
copy(actualDistribution, zeroDistribution)
}
})
}
}
}
| pkg/sql/colexec/routers_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.0034672273322939873,
0.0003242050588596612,
0.00016267884348053485,
0.0001731081574689597,
0.0005442690453492105
] |
{
"id": 0,
"code_window": [
"\t\t// TODO(dt): If we audit exactly what not-exists error each ExternalStorage\n",
"\t\t// returns (and then wrap/tag them), we could narrow this check.\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupDescriptorName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 940
} | ["\u002c"] | pkg/util/json/testdata/raw/string_one-byte-utf-8.json | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.00017389861750416458,
0.00017389861750416458,
0.00017389861750416458,
0.00017389861750416458,
0
] |
{
"id": 0,
"code_window": [
"\t\t// TODO(dt): If we audit exactly what not-exists error each ExternalStorage\n",
"\t\t// returns (and then wrap/tag them), we could narrow this check.\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupDescriptorName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 940
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"context"
"fmt"
"sort"
"strings"
)
// alterZoneConfigAndClusterSettings changes the zone configurations so that GC
// occurs more quickly and jobs are retained for less time. This is useful for
// most ORM tests because they create/drop/alter tables frequently, which can
// cause thousands of table descriptors and schema change jobs to accumulate
// rapidly, thereby decreasing performance.
func alterZoneConfigAndClusterSettings(
ctx context.Context, version string, c *cluster, nodeIdx int,
) error {
db, err := c.ConnE(ctx, nodeIdx)
if err != nil {
return err
}
defer db.Close()
if _, err := db.ExecContext(
ctx, `ALTER RANGE default CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 120;`,
); err != nil {
return err
}
if _, err := db.ExecContext(
ctx, `ALTER DATABASE system CONFIGURE ZONE USING num_replicas = 1, gc.ttlseconds = 120;`,
); err != nil {
return err
}
// TODO(rafi): remove this check once we stop testing against 2.0 and 2.1
if strings.HasPrefix(version, "v2.0") || strings.HasPrefix(version, "v2.1") {
return nil
}
if _, err := db.ExecContext(
ctx, `SET CLUSTER SETTING jobs.retention_time = '180s';`,
); err != nil {
return err
}
return nil
}
// ormTestsResults is a helper struct to be used in all roachtests for ORMs and
// drivers' compatibility.
type ormTestsResults struct {
currentFailures, allTests []string
failUnexpectedCount, failExpectedCount int
ignoredCount, skipCount, unexpectedSkipCount int
passUnexpectedCount, passExpectedCount int
// Put all the results in a giant map of [testname]result.
results map[string]string
// Put all issue hints in a map of [testname]issue.
allIssueHints map[string]string
runTests map[string]struct{}
}
func newORMTestsResults() *ormTestsResults {
return &ormTestsResults{
results: make(map[string]string),
allIssueHints: make(map[string]string),
runTests: make(map[string]struct{}),
}
}
// summarizeAll summarizes the result of running an ORM or a driver test suite
// against a cockroach node. If an unexpected result is observed (for example,
// a test unexpectedly failed or passed), a new blacklist is populated.
func (r *ormTestsResults) summarizeAll(
t *test, ormName, blacklistName string, expectedFailures blacklist, version, latestTag string,
) {
// Collect all the tests that were not run.
notRunCount := 0
for test, issue := range expectedFailures {
if _, ok := r.runTests[test]; ok {
continue
}
r.allTests = append(r.allTests, test)
r.results[test] = fmt.Sprintf("--- FAIL: %s - %s (not run)", test, maybeAddGithubLink(issue))
notRunCount++
}
// Log all the test results. We re-order the tests alphabetically here.
sort.Strings(r.allTests)
for _, test := range r.allTests {
result, ok := r.results[test]
if !ok {
t.Fatalf("can't find %s in test result list", test)
}
t.l.Printf("%s\n", result)
}
t.l.Printf("------------------------\n")
r.summarizeFailed(
t, ormName, blacklistName, expectedFailures, version, latestTag, notRunCount,
)
}
// summarizeFailed prints out the results of running an ORM or a driver test
// suite against a cockroach node. It is similar to summarizeAll except that it
// doesn't pay attention to all the tests - only to the failed ones.
// If a test suite outputs only the failures, then this method should be used.
func (r *ormTestsResults) summarizeFailed(
t *test,
ormName, blacklistName string,
expectedFailures blacklist,
version, latestTag string,
notRunCount int,
) {
var bResults strings.Builder
fmt.Fprintf(&bResults, "Tests run on Cockroach %s\n", version)
fmt.Fprintf(&bResults, "Tests run against %s %s\n", ormName, latestTag)
fmt.Fprintf(&bResults, "%d Total Tests Run\n",
r.passExpectedCount+r.passUnexpectedCount+r.failExpectedCount+r.failUnexpectedCount,
)
p := func(msg string, count int) {
testString := "tests"
if count == 1 {
testString = "test"
}
fmt.Fprintf(&bResults, "%d %s %s\n", count, testString, msg)
}
p("passed", r.passUnexpectedCount+r.passExpectedCount)
p("failed", r.failUnexpectedCount+r.failExpectedCount)
p("skipped", r.skipCount)
p("ignored", r.ignoredCount)
p("passed unexpectedly", r.passUnexpectedCount)
p("failed unexpectedly", r.failUnexpectedCount)
p("expected failed but skipped", r.unexpectedSkipCount)
p("expected failed but not run", notRunCount)
fmt.Fprintf(&bResults, "---\n")
for _, result := range r.results {
if strings.Contains(result, "unexpected") {
fmt.Fprintf(&bResults, "%s\n", result)
}
}
fmt.Fprintf(&bResults, "For a full summary look at the %s artifacts \n", ormName)
t.l.Printf("%s\n", bResults.String())
t.l.Printf("------------------------\n")
if r.failUnexpectedCount > 0 || r.passUnexpectedCount > 0 ||
notRunCount > 0 || r.unexpectedSkipCount > 0 {
// Create a new blacklist so we can easily update this test.
sort.Strings(r.currentFailures)
var b strings.Builder
fmt.Fprintf(&b, "Here is new %s blacklist that can be used to update the test:\n\n", ormName)
fmt.Fprintf(&b, "var %s = blacklist{\n", blacklistName)
for _, test := range r.currentFailures {
issue := expectedFailures[test]
if len(issue) == 0 || issue == "unknown" {
issue = r.allIssueHints[test]
}
if len(issue) == 0 {
issue = "unknown"
}
fmt.Fprintf(&b, " \"%s\": \"%s\",\n", test, issue)
}
fmt.Fprintf(&b, "}\n\n")
t.l.Printf("\n\n%s\n\n", b.String())
t.l.Printf("------------------------\n")
t.Fatalf("\n%s\nAn updated blacklist (%s) is available in the artifacts' %s log\n",
bResults.String(),
blacklistName,
ormName,
)
}
}
| pkg/cmd/roachtest/orm_helpers.go | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.00047120804083533585,
0.00019113301823381335,
0.0001636307715671137,
0.00016867124941200018,
0.00006995080912020057
] |
{
"id": 1,
"code_window": [
"\t\t// TODO(dt): If we audit exactly what not-exists error each ExternalStorage\n",
"\t\t// returns (and then wrap/tag them), we could narrow this check.\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupManifestName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {\n",
"\t\tr.Close()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 948
} | // Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"net/url"
"sort"
"time"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/covering"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/interval"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
const (
// BackupDescriptorName is the file name used for serialized
// BackupDescriptor protos.
BackupDescriptorName = "BACKUP"
// BackupManifestName is a future name for the serialized
// BackupDescriptor proto.
BackupManifestName = "BACKUP_MANIFEST"
// BackupPartitionDescriptorPrefix is the file name prefix for serialized
// BackupPartitionDescriptor protos.
BackupPartitionDescriptorPrefix = "BACKUP_PART"
// BackupDescriptorCheckpointName is the file name used to store the
// serialized BackupDescriptor proto while the backup is in progress.
BackupDescriptorCheckpointName = "BACKUP-CHECKPOINT"
// BackupFormatDescriptorTrackingVersion added tracking of complete DBs.
BackupFormatDescriptorTrackingVersion uint32 = 1
)
const (
backupOptRevisionHistory = "revision_history"
localityURLParam = "COCKROACH_LOCALITY"
defaultLocalityValue = "default"
)
var useTBI = settings.RegisterBoolSetting(
"kv.bulk_io_write.experimental_incremental_export_enabled",
"use experimental time-bound file filter when exporting in BACKUP",
false,
)
var backupOptionExpectValues = map[string]sql.KVStringOptValidate{
backupOptRevisionHistory: sql.KVStringOptRequireNoValue,
}
// BackupCheckpointInterval is the interval at which backup progress is saved
// to durable storage.
var BackupCheckpointInterval = time.Minute
// ReadBackupDescriptorFromURI creates an export store from the given URI, then
// reads and unmarshals a BackupDescriptor at the standard location in the
// export storage.
func ReadBackupDescriptorFromURI(
ctx context.Context, uri string, makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory,
) (BackupDescriptor, error) {
exportStore, err := makeExternalStorageFromURI(ctx, uri)
if err != nil {
return BackupDescriptor{}, err
}
defer exportStore.Close()
backupDesc, err := readBackupDescriptor(ctx, exportStore, BackupDescriptorName)
if err != nil {
backupManifest, manifestErr := readBackupDescriptor(ctx, exportStore, BackupManifestName)
if manifestErr != nil {
return BackupDescriptor{}, err
}
backupDesc = backupManifest
}
backupDesc.Dir = exportStore.Conf()
// TODO(dan): Sanity check this BackupDescriptor: non-empty EndTime,
// non-empty Paths, and non-overlapping Spans and keyranges in Files.
return backupDesc, nil
}
// readBackupDescriptor reads and unmarshals a BackupDescriptor from filename in
// the provided export store.
func readBackupDescriptor(
ctx context.Context, exportStore cloud.ExternalStorage, filename string,
) (BackupDescriptor, error) {
r, err := exportStore.ReadFile(ctx, filename)
if err != nil {
return BackupDescriptor{}, err
}
defer r.Close()
descBytes, err := ioutil.ReadAll(r)
if err != nil {
return BackupDescriptor{}, err
}
var backupDesc BackupDescriptor
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupDescriptor{}, err
}
for _, d := range backupDesc.Descriptors {
// Calls to GetTable are generally frowned upon.
// This specific call exists to provide backwards compatibility with
// backups created prior to version 19.1. Starting in v19.1 the
// ModificationTime is always written in backups for all versions
// of table descriptors. In earlier cockroach versions only later
// table descriptor versions contain a non-empty ModificationTime.
// Later versions of CockroachDB use the MVCC timestamp to fill in
// the ModificationTime for table descriptors. When performing a restore
// we no longer have access to that MVCC timestamp but we can set it
// to a value we know will be safe.
if t := d.GetTable(); t == nil {
continue
} else if t.Version == 1 && t.ModificationTime.IsEmpty() {
t.ModificationTime = hlc.Timestamp{WallTime: 1}
}
}
return backupDesc, err
}
func readBackupPartitionDescriptor(
ctx context.Context, exportStore cloud.ExternalStorage, filename string,
) (BackupPartitionDescriptor, error) {
r, err := exportStore.ReadFile(ctx, filename)
if err != nil {
return BackupPartitionDescriptor{}, err
}
defer r.Close()
descBytes, err := ioutil.ReadAll(r)
if err != nil {
return BackupPartitionDescriptor{}, err
}
var backupDesc BackupPartitionDescriptor
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupPartitionDescriptor{}, err
}
return backupDesc, err
}
// getRelevantDescChanges finds the changes between start and end time to the
// SQL descriptors matching `descs` or `expandedDBs`, ordered by time. A
// descriptor revision matches if it is an earlier revision of a descriptor in
// descs (same ID) or has parentID in `expanded`. Deleted descriptors are
// represented as nil. Fills in the `priorIDs` map in the process, which maps
// a descriptor the the ID by which it was previously known (e.g pre-TRUNCATE).
func getRelevantDescChanges(
ctx context.Context,
db *client.DB,
startTime, endTime hlc.Timestamp,
descs []sqlbase.Descriptor,
expanded []sqlbase.ID,
priorIDs map[sqlbase.ID]sqlbase.ID,
) ([]BackupDescriptor_DescriptorRevision, error) {
allChanges, err := getAllDescChanges(ctx, db, startTime, endTime, priorIDs)
if err != nil {
return nil, err
}
// If no descriptors changed, we can just stop now and have RESTORE use the
// normal list of descs (i.e. as of endTime).
if len(allChanges) == 0 {
return nil, nil
}
// interestingChanges will be every descriptor change relevant to the backup.
var interestingChanges []BackupDescriptor_DescriptorRevision
// interestingIDs are the descriptor for which we're interested in capturing
// changes. This is initially the descriptors matched (as of endTime) by our
// target spec, plus those that belonged to a DB that our spec expanded at any
// point in the interval.
interestingIDs := make(map[sqlbase.ID]struct{}, len(descs))
// The descriptors that currently (endTime) match the target spec (desc) are
// obviously interesting to our backup.
for _, i := range descs {
interestingIDs[i.GetID()] = struct{}{}
if t := i.Table(hlc.Timestamp{}); t != nil {
for j := t.ReplacementOf.ID; j != sqlbase.InvalidID; j = priorIDs[j] {
interestingIDs[j] = struct{}{}
}
}
}
// We're also interested in any desc that belonged to a DB we're backing up.
// We'll start by looking at all descriptors as of the beginning of the
// interval and add to the set of IDs that we are interested any descriptor that
// belongs to one of the parents we care about.
interestingParents := make(map[sqlbase.ID]struct{}, len(expanded))
for _, i := range expanded {
interestingParents[i] = struct{}{}
}
if !startTime.IsEmpty() {
starting, err := loadAllDescs(ctx, db, startTime)
if err != nil {
return nil, err
}
for _, i := range starting {
if table := i.Table(hlc.Timestamp{}); table != nil {
// We need to add to interestingIDs so that if we later see a delete for
// this ID we still know it is interesting to us, even though we will not
// have a parentID at that point (since the delete is a nil desc).
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
}
}
if _, ok := interestingIDs[i.GetID()]; ok {
desc := i
// We inject a fake "revision" that captures the starting state for
// matched descriptor, to allow restoring to times before its first rev
// actually inside the window. This likely ends up duplicating the last
// version in the previous BACKUP descriptor, but avoids adding more
// complicated special-cases in RESTORE, so it only needs to look in a
// single BACKUP to restore to a particular time.
initial := BackupDescriptor_DescriptorRevision{Time: startTime, ID: i.GetID(), Desc: &desc}
interestingChanges = append(interestingChanges, initial)
}
}
}
for _, change := range allChanges {
// A change to an ID that we are interested in is obviously interesting --
// a change is also interesting if it is to a table that has a parent that
// we are interested and thereafter it also becomes an ID in which we are
// interested in changes (since, as mentioned above, to decide if deletes
// are interesting).
if _, ok := interestingIDs[change.ID]; ok {
interestingChanges = append(interestingChanges, change)
} else if change.Desc != nil {
if table := change.Desc.Table(hlc.Timestamp{}); table != nil {
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
interestingChanges = append(interestingChanges, change)
}
}
}
}
sort.Slice(interestingChanges, func(i, j int) bool {
return interestingChanges[i].Time.Less(interestingChanges[j].Time)
})
return interestingChanges, nil
}
// getAllDescChanges gets every sql descriptor change between start and end time
// returning its ID, content and the change time (with deletions represented as
// nil content).
func getAllDescChanges(
ctx context.Context,
db *client.DB,
startTime, endTime hlc.Timestamp,
priorIDs map[sqlbase.ID]sqlbase.ID,
) ([]BackupDescriptor_DescriptorRevision, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
allRevs, err := getAllRevisions(ctx, db, startKey, endKey, startTime, endTime)
if err != nil {
return nil, err
}
var res []BackupDescriptor_DescriptorRevision
for _, revs := range allRevs {
id, err := keys.DecodeDescMetadataID(revs.Key)
if err != nil {
return nil, err
}
for _, rev := range revs.Values {
r := BackupDescriptor_DescriptorRevision{ID: sqlbase.ID(id), Time: rev.Timestamp}
if len(rev.RawBytes) != 0 {
var desc sqlbase.Descriptor
if err := rev.GetProto(&desc); err != nil {
return nil, err
}
r.Desc = &desc
t := desc.Table(rev.Timestamp)
if t != nil && t.ReplacementOf.ID != sqlbase.InvalidID {
priorIDs[t.ID] = t.ReplacementOf.ID
}
}
res = append(res, r)
}
}
return res, nil
}
func allSQLDescriptors(ctx context.Context, txn *client.Txn) ([]sqlbase.Descriptor, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
rows, err := txn.Scan(ctx, startKey, endKey, 0)
if err != nil {
return nil, err
}
sqlDescs := make([]sqlbase.Descriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&sqlDescs[i]); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal SQL descriptor", row.Key)
}
if row.Value != nil {
sqlDescs[i].Table(row.Value.Timestamp)
}
}
return sqlDescs, nil
}
func ensureInterleavesIncluded(tables []*sqlbase.TableDescriptor) error {
inBackup := make(map[sqlbase.ID]bool, len(tables))
for _, t := range tables {
inBackup[t.ID] = true
}
for _, table := range tables {
if err := table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error {
for _, a := range index.Interleave.Ancestors {
if !inBackup[a.TableID] {
return errors.Errorf(
"cannot backup table %q without interleave parent (ID %d)", table.Name, a.TableID,
)
}
}
for _, c := range index.InterleavedBy {
if !inBackup[c.Table] {
return errors.Errorf(
"cannot backup table %q without interleave child table (ID %d)", table.Name, c.Table,
)
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
func allRangeDescriptors(ctx context.Context, txn *client.Txn) ([]roachpb.RangeDescriptor, error) {
rows, err := txn.Scan(ctx, keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return nil, errors.Wrapf(err,
"unable to scan range descriptors")
}
rangeDescs := make([]roachpb.RangeDescriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&rangeDescs[i]); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal range descriptor", row.Key)
}
}
return rangeDescs, nil
}
type tableAndIndex struct {
tableID sqlbase.ID
indexID sqlbase.IndexID
}
// spansForAllTableIndexes returns non-overlapping spans for every index and
// table passed in. They would normally overlap if any of them are interleaved.
func spansForAllTableIndexes(
tables []*sqlbase.TableDescriptor, revs []BackupDescriptor_DescriptorRevision,
) []roachpb.Span {
added := make(map[tableAndIndex]bool, len(tables))
sstIntervalTree := interval.NewTree(interval.ExclusiveOverlapper)
for _, table := range tables {
for _, index := range table.AllNonDropIndexes() {
if err := sstIntervalTree.Insert(intervalSpan(table.IndexSpan(index.ID)), false); err != nil {
panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
}
added[tableAndIndex{tableID: table.ID, indexID: index.ID}] = true
}
}
// If there are desc revisions, ensure that we also add any index spans
// in them that we didn't already get above e.g. indexes or tables that are
// not in latest because they were dropped during the time window in question.
for _, rev := range revs {
if tbl := rev.Desc.Table(hlc.Timestamp{}); tbl != nil {
for _, idx := range tbl.AllNonDropIndexes() {
key := tableAndIndex{tableID: tbl.ID, indexID: idx.ID}
if !added[key] {
if err := sstIntervalTree.Insert(intervalSpan(tbl.IndexSpan(idx.ID)), false); err != nil {
panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
}
added[key] = true
}
}
}
}
var spans []roachpb.Span
_ = sstIntervalTree.Do(func(r interval.Interface) bool {
spans = append(spans, roachpb.Span{
Key: roachpb.Key(r.Range().Start),
EndKey: roachpb.Key(r.Range().End),
})
return false
})
return spans
}
// coveringFromSpans creates an interval.Covering with a fixed payload from a
// slice of roachpb.Spans.
func coveringFromSpans(spans []roachpb.Span, payload interface{}) covering.Covering {
var c covering.Covering
for _, span := range spans {
c = append(c, covering.Range{
Start: []byte(span.Key),
End: []byte(span.EndKey),
Payload: payload,
})
}
return c
}
// splitAndFilterSpans returns the spans that represent the set difference
// (includes - excludes) while also guaranteeing that each output span does not
// cross the endpoint of a RangeDescriptor in ranges.
func splitAndFilterSpans(
includes []roachpb.Span, excludes []roachpb.Span, ranges []roachpb.RangeDescriptor,
) []roachpb.Span {
type includeMarker struct{}
type excludeMarker struct{}
includeCovering := coveringFromSpans(includes, includeMarker{})
excludeCovering := coveringFromSpans(excludes, excludeMarker{})
var rangeCovering covering.Covering
for _, rangeDesc := range ranges {
rangeCovering = append(rangeCovering, covering.Range{
Start: []byte(rangeDesc.StartKey),
End: []byte(rangeDesc.EndKey),
})
}
splits := covering.OverlapCoveringMerge(
[]covering.Covering{includeCovering, excludeCovering, rangeCovering},
)
var out []roachpb.Span
for _, split := range splits {
include := false
exclude := false
for _, payload := range split.Payload.([]interface{}) {
switch payload.(type) {
case includeMarker:
include = true
case excludeMarker:
exclude = true
}
}
if include && !exclude {
out = append(out, roachpb.Span{
Key: roachpb.Key(split.Start),
EndKey: roachpb.Key(split.End),
})
}
}
return out
}
func optsToKVOptions(opts map[string]string) tree.KVOptions {
if len(opts) == 0 {
return nil
}
sortedOpts := make([]string, 0, len(opts))
for k := range opts {
sortedOpts = append(sortedOpts, k)
}
sort.Strings(sortedOpts)
kvopts := make(tree.KVOptions, 0, len(opts))
for _, k := range sortedOpts {
opt := tree.KVOption{Key: tree.Name(k)}
if v := opts[k]; v != "" {
opt.Value = tree.NewDString(v)
}
kvopts = append(kvopts, opt)
}
return kvopts
}
func backupJobDescription(
p sql.PlanHookState,
backup *tree.Backup,
to []string,
incrementalFrom []string,
opts map[string]string,
) (string, error) {
b := &tree.Backup{
AsOf: backup.AsOf,
Options: optsToKVOptions(opts),
Targets: backup.Targets,
}
for _, t := range to {
sanitizedTo, err := cloud.SanitizeExternalStorageURI(t)
if err != nil {
return "", err
}
b.To = append(b.To, tree.NewDString(sanitizedTo))
}
for _, from := range incrementalFrom {
sanitizedFrom, err := cloud.SanitizeExternalStorageURI(from)
if err != nil {
return "", err
}
b.IncrementalFrom = append(b.IncrementalFrom, tree.NewDString(sanitizedFrom))
}
ann := p.ExtendedEvalContext().Annotations
return tree.AsStringWithFQNames(b, ann), nil
}
// clusterNodeCount returns the approximate number of nodes in the cluster.
func clusterNodeCount(g *gossip.Gossip) int {
var nodes int
_ = g.IterateInfos(gossip.KeyNodeIDPrefix, func(_ string, _ gossip.Info) error {
nodes++
return nil
})
return nodes
}
// BackupFileDescriptors is an alias on which to implement sort's interface.
type BackupFileDescriptors []BackupDescriptor_File
func (r BackupFileDescriptors) Len() int { return len(r) }
func (r BackupFileDescriptors) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r BackupFileDescriptors) Less(i, j int) bool {
if cmp := bytes.Compare(r[i].Span.Key, r[j].Span.Key); cmp != 0 {
return cmp < 0
}
return bytes.Compare(r[i].Span.EndKey, r[j].Span.EndKey) < 0
}
func writeBackupDescriptor(
ctx context.Context,
settings *cluster.Settings,
exportStore cloud.ExternalStorage,
filename string,
desc *BackupDescriptor,
) error {
sort.Sort(BackupFileDescriptors(desc.Files))
descBuf, err := protoutil.Marshal(desc)
if err != nil {
return err
}
return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
}
// writeBackupPartitionDescriptor writes metadata (containing a locality KV and
// partial file listing) for a partitioned BACKUP to one of the stores in the
// backup.
func writeBackupPartitionDescriptor(
ctx context.Context,
exportStore cloud.ExternalStorage,
filename string,
desc *BackupPartitionDescriptor,
) error {
descBuf, err := protoutil.Marshal(desc)
if err != nil {
return err
}
return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
}
func loadAllDescs(
ctx context.Context, db *client.DB, asOf hlc.Timestamp,
) ([]sqlbase.Descriptor, error) {
var allDescs []sqlbase.Descriptor
if err := db.Txn(
ctx,
func(ctx context.Context, txn *client.Txn) error {
var err error
txn.SetFixedTimestamp(ctx, asOf)
allDescs, err = allSQLDescriptors(ctx, txn)
return err
}); err != nil {
return nil, err
}
return allDescs, nil
}
// ResolveTargetsToDescriptors performs name resolution on a set of targets and
// returns the resulting descriptors.
func ResolveTargetsToDescriptors(
ctx context.Context, p sql.PlanHookState, endTime hlc.Timestamp, targets tree.TargetList,
) ([]sqlbase.Descriptor, []sqlbase.ID, error) {
allDescs, err := loadAllDescs(ctx, p.ExecCfg().DB, endTime)
if err != nil {
return nil, nil, err
}
var matched descriptorsMatched
if matched, err = descriptorsMatchingTargets(ctx,
p.CurrentDatabase(), p.CurrentSearchPath(), allDescs, targets); err != nil {
return nil, nil, err
}
// Ensure interleaved tables appear after their parent. Since parents must be
// created before their children, simply sorting by ID accomplishes this.
sort.Slice(matched.descs, func(i, j int) bool { return matched.descs[i].GetID() < matched.descs[j].GetID() })
return matched.descs, matched.expandedDB, nil
}
type spanAndTime struct {
span roachpb.Span
start, end hlc.Timestamp
}
// backup exports a snapshot of every kv entry into ranged sstables.
//
// The output is an sstable per range with files in the following locations:
// - <dir>/<unique_int>.sst
// - <dir> is given by the user and may be cloud storage
// - Each file contains data for a key range that doesn't overlap with any other
// file.
func backup(
ctx context.Context,
db *client.DB,
gossip *gossip.Gossip,
settings *cluster.Settings,
defaultStore cloud.ExternalStorage,
storageByLocalityKV map[string]*roachpb.ExternalStorage,
job *jobs.Job,
backupDesc *BackupDescriptor,
checkpointDesc *BackupDescriptor,
resultsCh chan<- tree.Datums,
makeExternalStorage cloud.ExternalStorageFactory,
) (roachpb.BulkOpSummary, error) {
// TODO(dan): Figure out how permissions should work. #6713 is tracking this
// for grpc.
mu := struct {
syncutil.Mutex
files []BackupDescriptor_File
exported roachpb.BulkOpSummary
lastCheckpoint time.Time
}{}
var checkpointMu syncutil.Mutex
var ranges []roachpb.RangeDescriptor
if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error {
var err error
// TODO(benesch): limit the range descriptors we fetch to the ranges that
// are actually relevant in the backup to speed up small backups on large
// clusters.
ranges, err = allRangeDescriptors(ctx, txn)
return err
}); err != nil {
return mu.exported, err
}
var completedSpans, completedIntroducedSpans []roachpb.Span
if checkpointDesc != nil {
// TODO(benesch): verify these files, rather than accepting them as truth
// blindly.
// No concurrency yet, so these assignments are safe.
mu.files = checkpointDesc.Files
mu.exported = checkpointDesc.EntryCounts
for _, file := range checkpointDesc.Files {
if file.StartTime.IsEmpty() && !file.EndTime.IsEmpty() {
completedIntroducedSpans = append(completedIntroducedSpans, file.Span)
} else {
completedSpans = append(completedSpans, file.Span)
}
}
}
// Subtract out any completed spans and split the remaining spans into
// range-sized pieces so that we can use the number of completed requests as a
// rough measure of progress.
spans := splitAndFilterSpans(backupDesc.Spans, completedSpans, ranges)
introducedSpans := splitAndFilterSpans(backupDesc.IntroducedSpans, completedIntroducedSpans, ranges)
allSpans := make([]spanAndTime, 0, len(spans)+len(introducedSpans))
for _, s := range introducedSpans {
allSpans = append(allSpans, spanAndTime{span: s, start: hlc.Timestamp{}, end: backupDesc.StartTime})
}
for _, s := range spans {
allSpans = append(allSpans, spanAndTime{span: s, start: backupDesc.StartTime, end: backupDesc.EndTime})
}
// Sequential ranges may have clustered leaseholders, for example a
// geo-partitioned table likely has all the leaseholders for some contiguous
// span of the table (i.e. a partition) pinned to just the nodes in a region.
// In such cases, sending spans sequentially may under-utilize the rest of the
// cluster given that we have a limit on the number of spans we send out at
// a given time. Randomizing the order of spans should help ensure a more even
// distribution of work across the cluster regardless of how leaseholders may
// or may not be clustered.
rand.Shuffle(len(allSpans), func(i, j int) {
allSpans[i], allSpans[j] = allSpans[j], allSpans[i]
})
progressLogger := jobs.NewChunkProgressLogger(job, len(spans), job.FractionCompleted(), jobs.ProgressUpdateOnly)
// We're already limiting these on the server-side, but sending all the
// Export requests at once would fill up distsender/grpc/something and cause
// all sorts of badness (node liveness timeouts leading to mass leaseholder
// transfers, poor performance on SQL workloads, etc) as well as log spam
// about slow distsender requests. Rate limit them here, too.
//
// Each node limits the number of running Export & Import requests it serves
// to avoid overloading the network, so multiply that by the number of nodes
// in the cluster and use that as the number of outstanding Export requests
// for the rate limiting. This attempts to strike a balance between
// simplicity, not getting slow distsender log spam, and keeping the server
// side limiter full.
//
// TODO(dan): Make this limiting per node.
//
// TODO(dan): See if there's some better solution than rate-limiting #14798.
maxConcurrentExports := clusterNodeCount(gossip) * int(storage.ExportRequestsLimit.Get(&settings.SV)) * 10
exportsSem := make(chan struct{}, maxConcurrentExports)
g := ctxgroup.WithContext(ctx)
requestFinishedCh := make(chan struct{}, len(spans)) // enough buffer to never block
// Only start the progress logger if there are spans, otherwise this will
// block forever. This is needed for TestBackupRestoreResume which doesn't
// have any spans. Users should never hit this.
if len(spans) > 0 {
g.GoCtx(func(ctx context.Context) error {
return progressLogger.Loop(ctx, requestFinishedCh)
})
}
g.GoCtx(func(ctx context.Context) error {
for i := range allSpans {
{
select {
case exportsSem <- struct{}{}:
case <-ctx.Done():
// Break the for loop to avoid creating more work - the backup
// has failed because either the context has been canceled or an
// error has been returned. Either way, Wait() is guaranteed to
// return an error now.
return ctx.Err()
}
}
span := allSpans[i]
g.GoCtx(func(ctx context.Context) error {
defer func() { <-exportsSem }()
header := roachpb.Header{Timestamp: span.end}
req := &roachpb.ExportRequest{
RequestHeader: roachpb.RequestHeaderFromSpan(span.span),
Storage: defaultStore.Conf(),
StorageByLocalityKV: storageByLocalityKV,
StartTime: span.start,
EnableTimeBoundIteratorOptimization: useTBI.Get(&settings.SV),
MVCCFilter: roachpb.MVCCFilter(backupDesc.MVCCFilter),
}
rawRes, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req)
if pErr != nil {
return pErr.GoError()
}
res := rawRes.(*roachpb.ExportResponse)
mu.Lock()
if backupDesc.RevisionStartTime.Less(res.StartTime) {
backupDesc.RevisionStartTime = res.StartTime
}
for _, file := range res.Files {
f := BackupDescriptor_File{
Span: file.Span,
Path: file.Path,
Sha512: file.Sha512,
EntryCounts: file.Exported,
LocalityKV: file.LocalityKV,
}
if span.start != backupDesc.StartTime {
f.StartTime = span.start
f.EndTime = span.end
}
mu.files = append(mu.files, f)
mu.exported.Add(file.Exported)
}
var checkpointFiles BackupFileDescriptors
if timeutil.Since(mu.lastCheckpoint) > BackupCheckpointInterval {
// We optimistically assume the checkpoint will succeed to prevent
// multiple threads from attempting to checkpoint.
mu.lastCheckpoint = timeutil.Now()
checkpointFiles = append(checkpointFiles, mu.files...)
}
mu.Unlock()
requestFinishedCh <- struct{}{}
if checkpointFiles != nil {
checkpointMu.Lock()
backupDesc.Files = checkpointFiles
err := writeBackupDescriptor(
ctx, settings, defaultStore, BackupDescriptorCheckpointName, backupDesc,
)
checkpointMu.Unlock()
if err != nil {
log.Errorf(ctx, "unable to checkpoint backup descriptor: %+v", err)
}
}
return nil
})
}
return nil
})
if err := g.Wait(); err != nil {
return mu.exported, errors.Wrapf(err, "exporting %d ranges", errors.Safe(len(spans)))
}
// No more concurrency, so no need to acquire locks below.
backupDesc.Files = mu.files
backupDesc.EntryCounts = mu.exported
backupID := uuid.MakeV4()
backupDesc.ID = backupID
// Write additional partial descriptors to each node for partitioned backups.
if len(storageByLocalityKV) > 0 {
filesByLocalityKV := make(map[string][]BackupDescriptor_File)
for i := range mu.files {
file := &mu.files[i]
filesByLocalityKV[file.LocalityKV] = append(filesByLocalityKV[file.LocalityKV], *file)
}
nextPartitionedDescFilenameID := 1
for kv, conf := range storageByLocalityKV {
backupDesc.LocalityKVs = append(backupDesc.LocalityKVs, kv)
// Set a unique filename for each partition backup descriptor. The ID
// ensures uniqueness, and the kv string appended to the end is for
// readability.
filename := fmt.Sprintf("%s_%d_%s",
BackupPartitionDescriptorPrefix, nextPartitionedDescFilenameID, sanitizeLocalityKV(kv))
nextPartitionedDescFilenameID++
backupDesc.PartitionDescriptorFilenames = append(backupDesc.PartitionDescriptorFilenames, filename)
desc := BackupPartitionDescriptor{
LocalityKV: kv,
Files: filesByLocalityKV[kv],
BackupID: backupID,
}
if err := func() error {
store, err := makeExternalStorage(ctx, *conf)
if err != nil {
return err
}
defer store.Close()
return writeBackupPartitionDescriptor(ctx, store, filename, &desc)
}(); err != nil {
return mu.exported, err
}
}
}
if err := writeBackupDescriptor(ctx, settings, defaultStore, BackupDescriptorName, backupDesc); err != nil {
return mu.exported, err
}
return mu.exported, nil
}
// sanitizeLocalityKV returns a sanitized version of the input string where all
// characters that are not alphanumeric or -, =, or _ are replaced with _.
func sanitizeLocalityKV(kv string) string {
sanitizedKV := make([]byte, len(kv))
for i := 0; i < len(kv); i++ {
if (kv[i] >= 'a' && kv[i] <= 'z') ||
(kv[i] >= 'A' && kv[i] <= 'Z') ||
(kv[i] >= '0' && kv[i] <= '9') || kv[i] == '-' || kv[i] == '=' {
sanitizedKV[i] = kv[i]
} else {
sanitizedKV[i] = '_'
}
}
return string(sanitizedKV)
}
// VerifyUsableExportTarget ensures that the target location does not already
// contain a BACKUP or checkpoint and writes an empty checkpoint, both verifying
// that the location is writable and locking out accidental concurrent
// operations on that location if subsequently try this check. Callers must
// clean up the written checkpoint file (BackupDescriptorCheckpointName) only
// after writing to the backup file location (BackupDescriptorName).
func VerifyUsableExportTarget(
ctx context.Context,
settings *cluster.Settings,
exportStore cloud.ExternalStorage,
readable string,
) error {
if r, err := exportStore.ReadFile(ctx, BackupDescriptorName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file",
readable, BackupDescriptorName)
}
if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file",
readable, BackupManifestName)
}
if r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file (is another operation already in progress?)",
readable, BackupDescriptorCheckpointName)
}
if err := writeBackupDescriptor(
ctx, settings, exportStore, BackupDescriptorCheckpointName, &BackupDescriptor{},
); err != nil {
return errors.Wrapf(err, "cannot write to %s", readable)
}
return nil
}
// backupPlanHook implements PlanHookFn.
func backupPlanHook(
_ context.Context, stmt tree.Statement, p sql.PlanHookState,
) (sql.PlanHookRowFn, sqlbase.ResultColumns, []sql.PlanNode, bool, error) {
backupStmt, ok := stmt.(*tree.Backup)
if !ok {
return nil, nil, nil, false, nil
}
toFn, err := p.TypeAsStringArray(tree.Exprs(backupStmt.To), "BACKUP")
if err != nil {
return nil, nil, nil, false, err
}
incrementalFromFn, err := p.TypeAsStringArray(backupStmt.IncrementalFrom, "BACKUP")
if err != nil {
return nil, nil, nil, false, err
}
optsFn, err := p.TypeAsStringOpts(backupStmt.Options, backupOptionExpectValues)
if err != nil {
return nil, nil, nil, false, err
}
header := sqlbase.ResultColumns{
{Name: "job_id", Typ: types.Int},
{Name: "status", Typ: types.String},
{Name: "fraction_completed", Typ: types.Float},
{Name: "rows", Typ: types.Int},
{Name: "index_entries", Typ: types.Int},
{Name: "system_records", Typ: types.Int},
{Name: "bytes", Typ: types.Int},
}
fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error {
// TODO(dan): Move this span into sql.
ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag())
defer tracing.FinishSpan(span)
if err := utilccl.CheckEnterpriseEnabled(
p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "BACKUP",
); err != nil {
return err
}
if err := p.RequireAdminRole(ctx, "BACKUP"); err != nil {
return err
}
if !p.ExtendedEvalContext().TxnImplicit {
return errors.Errorf("BACKUP cannot be used inside a transaction")
}
to, err := toFn()
if err != nil {
return err
}
if len(to) > 1 &&
!cluster.Version.IsActive(ctx, p.ExecCfg().Settings, cluster.VersionPartitionedBackup) {
return errors.Errorf("partitioned backups can only be made on a cluster that has been fully upgraded to version 19.2")
}
incrementalFrom, err := incrementalFromFn()
if err != nil {
return err
}
endTime := p.ExecCfg().Clock.Now()
if backupStmt.AsOf.Expr != nil {
var err error
if endTime, err = p.EvalAsOfTimestamp(backupStmt.AsOf); err != nil {
return err
}
}
defaultURI, urisByLocalityKV, err := getURIsByLocalityKV(to)
if err != nil {
return nil
}
defaultStore, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, defaultURI)
if err != nil {
return err
}
defer defaultStore.Close()
opts, err := optsFn()
if err != nil {
return err
}
mvccFilter := MVCCFilter_Latest
if _, ok := opts[backupOptRevisionHistory]; ok {
mvccFilter = MVCCFilter_All
}
targetDescs, completeDBs, err := ResolveTargetsToDescriptors(ctx, p, endTime, backupStmt.Targets)
if err != nil {
return err
}
statsCache := p.ExecCfg().TableStatsCache
tableStatistics := make([]*stats.TableStatisticProto, 0)
var tables []*sqlbase.TableDescriptor
for _, desc := range targetDescs {
if dbDesc := desc.GetDatabase(); dbDesc != nil {
if err := p.CheckPrivilege(ctx, dbDesc, privilege.SELECT); err != nil {
return err
}
}
if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
if err := p.CheckPrivilege(ctx, tableDesc, privilege.SELECT); err != nil {
return err
}
tables = append(tables, tableDesc)
// Collect all the table stats for this table.
tableStatisticsAcc, err := statsCache.GetTableStats(ctx, tableDesc.GetID())
if err != nil {
return err
}
for i := range tableStatisticsAcc {
tableStatistics = append(tableStatistics, &tableStatisticsAcc[i].TableStatisticProto)
}
}
}
if err := ensureInterleavesIncluded(tables); err != nil {
return err
}
var prevBackups []BackupDescriptor
if len(incrementalFrom) > 0 {
clusterID := p.ExecCfg().ClusterID()
prevBackups = make([]BackupDescriptor, len(incrementalFrom))
for i, uri := range incrementalFrom {
// TODO(lucy): We may want to upgrade the table descs to the newer
// foreign key representation here, in case there are backups from an
// older cluster. Keeping the descriptors as they are works for now
// since all we need to do is get the past backups' table/index spans,
// but it will be safer for future code to avoid having older-style
// descriptors around.
desc, err := ReadBackupDescriptorFromURI(ctx, uri, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI)
if err != nil {
return errors.Wrapf(err, "failed to read backup from %q", uri)
}
// IDs are how we identify tables, and those are only meaningful in the
// context of their own cluster, so we need to ensure we only allow
// incremental previous backups that we created.
if !desc.ClusterID.Equal(clusterID) {
return errors.Newf("previous BACKUP %q belongs to cluster %s", uri, desc.ClusterID.String())
}
prevBackups[i] = desc
}
}
var startTime hlc.Timestamp
var newSpans roachpb.Spans
if len(prevBackups) > 0 {
startTime = prevBackups[len(prevBackups)-1].EndTime
}
var priorIDs map[sqlbase.ID]sqlbase.ID
var revs []BackupDescriptor_DescriptorRevision
if mvccFilter == MVCCFilter_All {
priorIDs = make(map[sqlbase.ID]sqlbase.ID)
revs, err = getRelevantDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, targetDescs, completeDBs, priorIDs)
if err != nil {
return err
}
}
spans := spansForAllTableIndexes(tables, revs)
if len(prevBackups) > 0 {
tablesInPrev := make(map[sqlbase.ID]struct{})
dbsInPrev := make(map[sqlbase.ID]struct{})
for _, d := range prevBackups[len(prevBackups)-1].Descriptors {
if t := d.Table(hlc.Timestamp{}); t != nil {
tablesInPrev[t.ID] = struct{}{}
}
}
for _, d := range prevBackups[len(prevBackups)-1].CompleteDbs {
dbsInPrev[d] = struct{}{}
}
for _, d := range targetDescs {
if t := d.Table(hlc.Timestamp{}); t != nil {
// If we're trying to use a previous backup for this table, ideally it
// actually contains this table.
if _, ok := tablesInPrev[t.ID]; ok {
continue
}
// This table isn't in the previous backup... maybe was added to a
// DB that the previous backup captured?
if _, ok := dbsInPrev[t.ParentID]; ok {
continue
}
// Maybe this table is missing from the previous backup because it was
// truncated?
if t.ReplacementOf.ID != sqlbase.InvalidID {
// Check if we need to lazy-load the priorIDs (i.e. if this is the first
// truncate we've encountered in non-MVCC backup).
if priorIDs == nil {
priorIDs = make(map[sqlbase.ID]sqlbase.ID)
_, err := getAllDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, priorIDs)
if err != nil {
return err
}
}
found := false
for was := t.ReplacementOf.ID; was != sqlbase.InvalidID && !found; was = priorIDs[was] {
_, found = tablesInPrev[was]
}
if found {
continue
}
}
return errors.Errorf("previous backup does not contain table %q", t.Name)
}
}
var err error
_, coveredTime, err := makeImportSpans(
spans,
prevBackups,
nil, /*backupLocalityInfo*/
keys.MinKey,
func(span covering.Range, start, end hlc.Timestamp) error {
if (start == hlc.Timestamp{}) {
newSpans = append(newSpans, roachpb.Span{Key: span.Start, EndKey: span.End})
return nil
}
return errOnMissingRange(span, start, end)
},
)
if err != nil {
return errors.Wrapf(err, "invalid previous backups (a new full backup may be required if a table has been created, dropped or truncated)")
}
if coveredTime != startTime {
return errors.Wrapf(err, "expected previous backups to cover until time %v, got %v", startTime, coveredTime)
}
}
// if CompleteDbs is lost by a 1.x node, FormatDescriptorTrackingVersion
// means that a 2.0 node will disallow `RESTORE DATABASE foo`, but `RESTORE
// foo.table1, foo.table2...` will still work. MVCCFilter would be
// mis-handled, but is disallowed above. IntroducedSpans may also be lost by
// a 1.x node, meaning that if 1.1 nodes may resume a backup, the limitation
// of requiring full backups after schema changes remains.
backupDesc := BackupDescriptor{
StartTime: startTime,
EndTime: endTime,
MVCCFilter: mvccFilter,
Descriptors: targetDescs,
DescriptorChanges: revs,
CompleteDbs: completeDBs,
Spans: spans,
IntroducedSpans: newSpans,
FormatVersion: BackupFormatDescriptorTrackingVersion,
BuildInfo: build.GetInfo(),
NodeID: p.ExecCfg().NodeID.Get(),
ClusterID: p.ExecCfg().ClusterID(),
Statistics: tableStatistics,
}
// Sanity check: re-run the validation that RESTORE will do, but this time
// including this backup, to ensure that the this backup plus any previous
// backups does cover the interval expected.
if _, coveredEnd, err := makeImportSpans(
spans,
append(prevBackups, backupDesc),
nil, /*backupLocalityInfo*/
keys.MinKey,
errOnMissingRange,
); err != nil {
return err
} else if coveredEnd != endTime {
return errors.Errorf("expected backup (along with any previous backups) to cover to %v, not %v", endTime, coveredEnd)
}
descBytes, err := protoutil.Marshal(&backupDesc)
if err != nil {
return err
}
description, err := backupJobDescription(p, backupStmt, to, incrementalFrom, opts)
if err != nil {
return err
}
// TODO (lucy): For partitioned backups, also add verification for other
// stores we are writing to in addition to the default.
if err := VerifyUsableExportTarget(ctx, p.ExecCfg().Settings, defaultStore, defaultURI); err != nil {
return err
}
_, errCh, err := p.ExecCfg().JobRegistry.CreateAndStartJob(ctx, resultsCh, jobs.Record{
Description: description,
Username: p.User(),
DescriptorIDs: func() (sqlDescIDs []sqlbase.ID) {
for _, sqlDesc := range backupDesc.Descriptors {
sqlDescIDs = append(sqlDescIDs, sqlDesc.GetID())
}
return sqlDescIDs
}(),
Details: jobspb.BackupDetails{
StartTime: startTime,
EndTime: endTime,
URI: defaultURI,
URIsByLocalityKV: urisByLocalityKV,
BackupDescriptor: descBytes,
},
Progress: jobspb.BackupProgress{},
})
if err != nil {
return err
}
return <-errCh
}
return fn, header, nil, false, nil
}
type backupResumer struct {
job *jobs.Job
settings *cluster.Settings
res roachpb.BulkOpSummary
makeExternalStorage cloud.ExternalStorageFactory
}
// Resume is part of the jobs.Resumer interface.
func (b *backupResumer) Resume(
ctx context.Context, phs interface{}, resultsCh chan<- tree.Datums,
) error {
details := b.job.Details().(jobspb.BackupDetails)
p := phs.(sql.PlanHookState)
b.makeExternalStorage = p.ExecCfg().DistSQLSrv.ExternalStorage
if len(details.BackupDescriptor) == 0 {
return errors.Newf("missing backup descriptor; cannot resume a backup from an older version")
}
var backupDesc BackupDescriptor
if err := protoutil.Unmarshal(details.BackupDescriptor, &backupDesc); err != nil {
return pgerror.Wrapf(err, pgcode.DataCorrupted,
"unmarshal backup descriptor")
}
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
defaultConf, err := cloud.ExternalStorageConfFromURI(details.URI)
if err != nil {
return errors.Wrapf(err, "export configuration")
}
defaultStore, err := b.makeExternalStorage(ctx, defaultConf)
if err != nil {
return errors.Wrapf(err, "make storage")
}
storageByLocalityKV := make(map[string]*roachpb.ExternalStorage)
for kv, uri := range details.URIsByLocalityKV {
conf, err := cloud.ExternalStorageConfFromURI(uri)
if err != nil {
return err
}
storageByLocalityKV[kv] = &conf
}
var checkpointDesc *BackupDescriptor
// We don't read the table descriptors from the backup descriptor, but
// they could be using either the new or the old foreign key
// representations. We should just preserve whatever representation the
// table descriptors were using and leave them alone.
if desc, err := readBackupDescriptor(ctx, defaultStore, BackupDescriptorCheckpointName); err == nil {
// If the checkpoint is from a different cluster, it's meaningless to us.
// More likely though are dummy/lock-out checkpoints with no ClusterID.
if desc.ClusterID.Equal(p.ExecCfg().ClusterID()) {
checkpointDesc = &desc
}
} else {
// TODO(benesch): distinguish between a missing checkpoint, which simply
// indicates the prior backup attempt made no progress, and a corrupted
// checkpoint, which is more troubling. Sadly, storageccl doesn't provide a
// "not found" error that's consistent across all ExternalStorage
// implementations.
log.Warningf(ctx, "unable to load backup checkpoint while resuming job %d: %v", *b.job.ID(), err)
}
res, err := backup(
ctx,
p.ExecCfg().DB,
p.ExecCfg().Gossip,
p.ExecCfg().Settings,
defaultStore,
storageByLocalityKV,
b.job,
&backupDesc,
checkpointDesc,
resultsCh,
b.makeExternalStorage,
)
b.res = res
return err
}
// OnFailOrCancel is part of the jobs.Resumer interface.
func (b *backupResumer) OnFailOrCancel(context.Context, *client.Txn) error {
return nil
}
// OnSuccess is part of the jobs.Resumer interface.
func (b *backupResumer) OnSuccess(context.Context, *client.Txn) error { return nil }
// OnTerminal is part of the jobs.Resumer interface.
func (b *backupResumer) OnTerminal(
ctx context.Context, status jobs.Status, resultsCh chan<- tree.Datums,
) {
// Attempt to delete BACKUP-CHECKPOINT.
if err := func() error {
details := b.job.Details().(jobspb.BackupDetails)
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
conf, err := cloud.ExternalStorageConfFromURI(details.URI)
if err != nil {
return err
}
exportStore, err := b.makeExternalStorage(ctx, conf)
if err != nil {
return err
}
return exportStore.Delete(ctx, BackupDescriptorCheckpointName)
}(); err != nil {
log.Warningf(ctx, "unable to delete checkpointed backup descriptor: %+v", err)
}
if status == jobs.StatusSucceeded {
// TODO(benesch): emit periodic progress updates.
// TODO(mjibson): if a restore was resumed, then these counts will only have
// the current coordinator's counts.
resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(*b.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(b.res.Rows)),
tree.NewDInt(tree.DInt(b.res.IndexEntries)),
tree.NewDInt(tree.DInt(b.res.SystemRecords)),
tree.NewDInt(tree.DInt(b.res.DataSize)),
}
}
}
type versionedValues struct {
Key roachpb.Key
Values []roachpb.Value
}
// getAllRevisions scans all keys between startKey and endKey getting all
// revisions between startTime and endTime.
// TODO(dt): if/when client gets a ScanRevisionsRequest or similar, use that.
func getAllRevisions(
ctx context.Context,
db *client.DB,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
) ([]versionedValues, error) {
// TODO(dt): version check.
header := roachpb.Header{Timestamp: endTime}
req := &roachpb.ExportRequest{
RequestHeader: roachpb.RequestHeader{Key: startKey, EndKey: endKey},
StartTime: startTime,
MVCCFilter: roachpb.MVCCFilter_All,
ReturnSST: true,
OmitChecksum: true,
}
resp, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req)
if pErr != nil {
return nil, pErr.GoError()
}
var res []versionedValues
for _, file := range resp.(*roachpb.ExportResponse).Files {
sst := engine.MakeRocksDBSstFileReader()
defer sst.Close()
if err := sst.IngestExternalFile(file.SST); err != nil {
return nil, err
}
if err := sst.Iterate(startKey, endKey, func(kv engine.MVCCKeyValue) (bool, error) {
if len(res) == 0 || !res[len(res)-1].Key.Equal(kv.Key.Key) {
res = append(res, versionedValues{Key: kv.Key.Key})
}
res[len(res)-1].Values = append(res[len(res)-1].Values, roachpb.Value{Timestamp: kv.Key.Timestamp, RawBytes: kv.Value})
return false, nil
}); err != nil {
return nil, err
}
}
return res, nil
}
var _ jobs.Resumer = &backupResumer{}
func init() {
sql.AddPlanHook(backupPlanHook)
jobs.RegisterConstructor(
jobspb.TypeBackup,
func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer {
return &backupResumer{
job: job,
settings: settings,
}
},
)
}
// getURIsByLocalityKV takes a slice of URIs for a single (possibly partitioned)
// backup, and returns the default backup destination URI and a map of all other
// URIs by locality KV. The URIs in the result do not include the
// COCKROACH_LOCALITY parameter.
func getURIsByLocalityKV(to []string) (string, map[string]string, error) {
localityAndBaseURI := func(uri string) (string, string, error) {
parsedURI, err := url.Parse(uri)
if err != nil {
return "", "", err
}
q := parsedURI.Query()
localityKV := q.Get(localityURLParam)
// Remove the backup locality parameter.
q.Del(localityURLParam)
parsedURI.RawQuery = q.Encode()
baseURI := parsedURI.String()
return localityKV, baseURI, nil
}
urisByLocalityKV := make(map[string]string)
if len(to) == 1 {
localityKV, baseURI, err := localityAndBaseURI(to[0])
if err != nil {
return "", nil, err
}
if localityKV != "" && localityKV != defaultLocalityValue {
return "", nil, errors.Errorf("%s %s is invalid for a single BACKUP location",
localityURLParam, localityKV)
}
return baseURI, urisByLocalityKV, nil
}
var defaultURI string
for _, uri := range to {
localityKV, baseURI, err := localityAndBaseURI(uri)
if err != nil {
return "", nil, err
}
if localityKV == "" {
return "", nil, errors.Errorf(
"multiple URLs are provided for partitioned BACKUP, but %s is not specified",
localityURLParam,
)
}
if localityKV == defaultLocalityValue {
if defaultURI != "" {
return "", nil, errors.Errorf("multiple default URLs provided for partition backup")
}
defaultURI = baseURI
} else {
kv := roachpb.Tier{}
if err := kv.FromString(localityKV); err != nil {
return "", nil, errors.Wrap(err, "failed to parse backup locality")
}
if _, ok := urisByLocalityKV[localityKV]; ok {
return "", nil, errors.Errorf("duplicate URIs for locality %s", localityKV)
}
urisByLocalityKV[localityKV] = baseURI
}
}
if defaultURI == "" {
return "", nil, errors.Errorf("no default URL provided for partitioned backup")
}
return defaultURI, urisByLocalityKV, nil
}
// maybeUpgradeTableDescsInBackupDescriptors updates the backup descriptors'
// table descriptors to use the newer 19.2-style foreign key representation,
// if they are not already upgraded. This requires resolving cross-table FK
// references, which is done by looking up all table descriptors across all
// backup descriptors provided. if skipFKsWithNoMatchingTable is set, FKs whose
// "other" table is missing from the set provided are omitted during the
// upgrade, instead of causing an error to be returned.
func maybeUpgradeTableDescsInBackupDescriptors(
ctx context.Context, backupDescs []BackupDescriptor, skipFKsWithNoMatchingTable bool,
) error {
protoGetter := sqlbase.MapProtoGetter{
Protos: make(map[interface{}]protoutil.Message),
}
// Populate the protoGetter with all table descriptors in all backup
// descriptors so that they can be looked up.
for _, backupDesc := range backupDescs {
for _, desc := range backupDesc.Descriptors {
if table := desc.Table(hlc.Timestamp{}); table != nil {
protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(table.ID))] =
sqlbase.WrapDescriptor(protoutil.Clone(table).(*sqlbase.TableDescriptor))
}
}
}
for i := range backupDescs {
backupDesc := &backupDescs[i]
for j := range backupDesc.Descriptors {
if table := backupDesc.Descriptors[j].Table(hlc.Timestamp{}); table != nil {
if _, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, skipFKsWithNoMatchingTable); err != nil {
return err
}
// TODO(lucy): Is this necessary?
backupDesc.Descriptors[j] = *sqlbase.WrapDescriptor(table)
}
}
}
return nil
}
| pkg/ccl/backupccl/backup.go | 1 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.9978846907615662,
0.027490880340337753,
0.00016126017726492137,
0.0001713274687062949,
0.1537494957447052
] |
{
"id": 1,
"code_window": [
"\t\t// TODO(dt): If we audit exactly what not-exists error each ExternalStorage\n",
"\t\t// returns (and then wrap/tag them), we could narrow this check.\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupManifestName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {\n",
"\t\tr.Close()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 948
} | sql
CREATE DATABASE d;
CREATE TABLE d.t (
a JSON,
b JSON,
INVERTED INDEX idx (a)
);
CREATE INVERTED INDEX idx2 ON d.t (b);
INSERT INTO d.t VALUES ('{"a": "b"}', '{"c": "d"}');
----
INSERT 1
dump d t
----
----
CREATE TABLE t (
a JSONB NULL,
b JSONB NULL,
INVERTED INDEX idx (a),
INVERTED INDEX idx2 (b),
FAMILY "primary" (a, b, rowid)
);
INSERT INTO t (a, b) VALUES
('{"a": "b"}', '{"c": "d"}');
----
----
| pkg/cli/testdata/dump/inverted_index | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.000168791098985821,
0.0001633016363484785,
0.00015949588851071894,
0.00016161789244506508,
0.000003977139385824557
] |
{
"id": 1,
"code_window": [
"\t\t// TODO(dt): If we audit exactly what not-exists error each ExternalStorage\n",
"\t\t// returns (and then wrap/tag them), we could narrow this check.\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupManifestName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {\n",
"\t\tr.Close()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 948
} | #!/usr/bin/env bash
# Trigger this script by running `make generate PKG=./pkg/sql/parser` from the
# repository root to ensure your PATH includes vendored binaries.
set -euo pipefail
# We need to set these environment variables to ensure the output of
# `sort` is the same everywhere.
export LC_ALL=C
export LANG=C
cat <<EOF
// Code generated by help_gen_test.sh. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
package parser
var expectedHelpStrings = []string{
EOF
grep 'helpWith(' | sed -e 's/^.*sqllex,//g;s/).*/,/g' | grep -v '""' | sort -u
cat <<EOF
}
EOF
| pkg/sql/parser/help_gen_test.sh | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.0001753893302520737,
0.00016791507368907332,
0.00016294415399897844,
0.00016541173681616783,
0.000005380249149311567
] |
{
"id": 1,
"code_window": [
"\t\t// TODO(dt): If we audit exactly what not-exists error each ExternalStorage\n",
"\t\t// returns (and then wrap/tag them), we could narrow this check.\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupManifestName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {\n",
"\t\tr.Close()\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 948
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tree
// AlterSequence represents an ALTER SEQUENCE statement, except in the case of
// ALTER SEQUENCE <seqName> RENAME TO <newSeqName>, which is represented by a
// RenameTable node.
type AlterSequence struct {
IfExists bool
Name *UnresolvedObjectName
Options SequenceOptions
}
// Format implements the NodeFormatter interface.
func (node *AlterSequence) Format(ctx *FmtCtx) {
ctx.WriteString("ALTER SEQUENCE ")
if node.IfExists {
ctx.WriteString("IF EXISTS ")
}
ctx.FormatNode(node.Name)
ctx.FormatNode(&node.Options)
}
| pkg/sql/sem/tree/alter_sequence.go | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.00017870425654109567,
0.00017068671877495944,
0.00016079616034403443,
0.00017162322183139622,
0.000006452527486544568
] |
{
"id": 2,
"code_window": [
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupManifestName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file (is another operation already in progress?)\",\n",
"\t\t\treadable, BackupDescriptorCheckpointName)\n",
"\t}\n",
"\tif err := writeBackupDescriptor(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 954
} | // Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"net/url"
"sort"
"time"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/covering"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/interval"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
const (
// BackupDescriptorName is the file name used for serialized
// BackupDescriptor protos.
BackupDescriptorName = "BACKUP"
// BackupManifestName is a future name for the serialized
// BackupDescriptor proto.
BackupManifestName = "BACKUP_MANIFEST"
// BackupPartitionDescriptorPrefix is the file name prefix for serialized
// BackupPartitionDescriptor protos.
BackupPartitionDescriptorPrefix = "BACKUP_PART"
// BackupDescriptorCheckpointName is the file name used to store the
// serialized BackupDescriptor proto while the backup is in progress.
BackupDescriptorCheckpointName = "BACKUP-CHECKPOINT"
// BackupFormatDescriptorTrackingVersion added tracking of complete DBs.
BackupFormatDescriptorTrackingVersion uint32 = 1
)
const (
backupOptRevisionHistory = "revision_history"
localityURLParam = "COCKROACH_LOCALITY"
defaultLocalityValue = "default"
)
var useTBI = settings.RegisterBoolSetting(
"kv.bulk_io_write.experimental_incremental_export_enabled",
"use experimental time-bound file filter when exporting in BACKUP",
false,
)
var backupOptionExpectValues = map[string]sql.KVStringOptValidate{
backupOptRevisionHistory: sql.KVStringOptRequireNoValue,
}
// BackupCheckpointInterval is the interval at which backup progress is saved
// to durable storage.
var BackupCheckpointInterval = time.Minute
// ReadBackupDescriptorFromURI creates an export store from the given URI, then
// reads and unmarshals a BackupDescriptor at the standard location in the
// export storage.
func ReadBackupDescriptorFromURI(
ctx context.Context, uri string, makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory,
) (BackupDescriptor, error) {
exportStore, err := makeExternalStorageFromURI(ctx, uri)
if err != nil {
return BackupDescriptor{}, err
}
defer exportStore.Close()
backupDesc, err := readBackupDescriptor(ctx, exportStore, BackupDescriptorName)
if err != nil {
backupManifest, manifestErr := readBackupDescriptor(ctx, exportStore, BackupManifestName)
if manifestErr != nil {
return BackupDescriptor{}, err
}
backupDesc = backupManifest
}
backupDesc.Dir = exportStore.Conf()
// TODO(dan): Sanity check this BackupDescriptor: non-empty EndTime,
// non-empty Paths, and non-overlapping Spans and keyranges in Files.
return backupDesc, nil
}
// readBackupDescriptor reads and unmarshals a BackupDescriptor from filename in
// the provided export store.
func readBackupDescriptor(
ctx context.Context, exportStore cloud.ExternalStorage, filename string,
) (BackupDescriptor, error) {
r, err := exportStore.ReadFile(ctx, filename)
if err != nil {
return BackupDescriptor{}, err
}
defer r.Close()
descBytes, err := ioutil.ReadAll(r)
if err != nil {
return BackupDescriptor{}, err
}
var backupDesc BackupDescriptor
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupDescriptor{}, err
}
for _, d := range backupDesc.Descriptors {
// Calls to GetTable are generally frowned upon.
// This specific call exists to provide backwards compatibility with
// backups created prior to version 19.1. Starting in v19.1 the
// ModificationTime is always written in backups for all versions
// of table descriptors. In earlier cockroach versions only later
// table descriptor versions contain a non-empty ModificationTime.
// Later versions of CockroachDB use the MVCC timestamp to fill in
// the ModificationTime for table descriptors. When performing a restore
// we no longer have access to that MVCC timestamp but we can set it
// to a value we know will be safe.
if t := d.GetTable(); t == nil {
continue
} else if t.Version == 1 && t.ModificationTime.IsEmpty() {
t.ModificationTime = hlc.Timestamp{WallTime: 1}
}
}
return backupDesc, err
}
func readBackupPartitionDescriptor(
ctx context.Context, exportStore cloud.ExternalStorage, filename string,
) (BackupPartitionDescriptor, error) {
r, err := exportStore.ReadFile(ctx, filename)
if err != nil {
return BackupPartitionDescriptor{}, err
}
defer r.Close()
descBytes, err := ioutil.ReadAll(r)
if err != nil {
return BackupPartitionDescriptor{}, err
}
var backupDesc BackupPartitionDescriptor
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupPartitionDescriptor{}, err
}
return backupDesc, err
}
// getRelevantDescChanges finds the changes between start and end time to the
// SQL descriptors matching `descs` or `expandedDBs`, ordered by time. A
// descriptor revision matches if it is an earlier revision of a descriptor in
// descs (same ID) or has parentID in `expanded`. Deleted descriptors are
// represented as nil. Fills in the `priorIDs` map in the process, which maps
// a descriptor the the ID by which it was previously known (e.g pre-TRUNCATE).
func getRelevantDescChanges(
ctx context.Context,
db *client.DB,
startTime, endTime hlc.Timestamp,
descs []sqlbase.Descriptor,
expanded []sqlbase.ID,
priorIDs map[sqlbase.ID]sqlbase.ID,
) ([]BackupDescriptor_DescriptorRevision, error) {
allChanges, err := getAllDescChanges(ctx, db, startTime, endTime, priorIDs)
if err != nil {
return nil, err
}
// If no descriptors changed, we can just stop now and have RESTORE use the
// normal list of descs (i.e. as of endTime).
if len(allChanges) == 0 {
return nil, nil
}
// interestingChanges will be every descriptor change relevant to the backup.
var interestingChanges []BackupDescriptor_DescriptorRevision
// interestingIDs are the descriptor for which we're interested in capturing
// changes. This is initially the descriptors matched (as of endTime) by our
// target spec, plus those that belonged to a DB that our spec expanded at any
// point in the interval.
interestingIDs := make(map[sqlbase.ID]struct{}, len(descs))
// The descriptors that currently (endTime) match the target spec (desc) are
// obviously interesting to our backup.
for _, i := range descs {
interestingIDs[i.GetID()] = struct{}{}
if t := i.Table(hlc.Timestamp{}); t != nil {
for j := t.ReplacementOf.ID; j != sqlbase.InvalidID; j = priorIDs[j] {
interestingIDs[j] = struct{}{}
}
}
}
// We're also interested in any desc that belonged to a DB we're backing up.
// We'll start by looking at all descriptors as of the beginning of the
// interval and add to the set of IDs that we are interested any descriptor that
// belongs to one of the parents we care about.
interestingParents := make(map[sqlbase.ID]struct{}, len(expanded))
for _, i := range expanded {
interestingParents[i] = struct{}{}
}
if !startTime.IsEmpty() {
starting, err := loadAllDescs(ctx, db, startTime)
if err != nil {
return nil, err
}
for _, i := range starting {
if table := i.Table(hlc.Timestamp{}); table != nil {
// We need to add to interestingIDs so that if we later see a delete for
// this ID we still know it is interesting to us, even though we will not
// have a parentID at that point (since the delete is a nil desc).
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
}
}
if _, ok := interestingIDs[i.GetID()]; ok {
desc := i
// We inject a fake "revision" that captures the starting state for
// matched descriptor, to allow restoring to times before its first rev
// actually inside the window. This likely ends up duplicating the last
// version in the previous BACKUP descriptor, but avoids adding more
// complicated special-cases in RESTORE, so it only needs to look in a
// single BACKUP to restore to a particular time.
initial := BackupDescriptor_DescriptorRevision{Time: startTime, ID: i.GetID(), Desc: &desc}
interestingChanges = append(interestingChanges, initial)
}
}
}
for _, change := range allChanges {
// A change to an ID that we are interested in is obviously interesting --
// a change is also interesting if it is to a table that has a parent that
// we are interested and thereafter it also becomes an ID in which we are
// interested in changes (since, as mentioned above, to decide if deletes
// are interesting).
if _, ok := interestingIDs[change.ID]; ok {
interestingChanges = append(interestingChanges, change)
} else if change.Desc != nil {
if table := change.Desc.Table(hlc.Timestamp{}); table != nil {
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
interestingChanges = append(interestingChanges, change)
}
}
}
}
sort.Slice(interestingChanges, func(i, j int) bool {
return interestingChanges[i].Time.Less(interestingChanges[j].Time)
})
return interestingChanges, nil
}
// getAllDescChanges gets every sql descriptor change between start and end time
// returning its ID, content and the change time (with deletions represented as
// nil content).
func getAllDescChanges(
ctx context.Context,
db *client.DB,
startTime, endTime hlc.Timestamp,
priorIDs map[sqlbase.ID]sqlbase.ID,
) ([]BackupDescriptor_DescriptorRevision, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
allRevs, err := getAllRevisions(ctx, db, startKey, endKey, startTime, endTime)
if err != nil {
return nil, err
}
var res []BackupDescriptor_DescriptorRevision
for _, revs := range allRevs {
id, err := keys.DecodeDescMetadataID(revs.Key)
if err != nil {
return nil, err
}
for _, rev := range revs.Values {
r := BackupDescriptor_DescriptorRevision{ID: sqlbase.ID(id), Time: rev.Timestamp}
if len(rev.RawBytes) != 0 {
var desc sqlbase.Descriptor
if err := rev.GetProto(&desc); err != nil {
return nil, err
}
r.Desc = &desc
t := desc.Table(rev.Timestamp)
if t != nil && t.ReplacementOf.ID != sqlbase.InvalidID {
priorIDs[t.ID] = t.ReplacementOf.ID
}
}
res = append(res, r)
}
}
return res, nil
}
func allSQLDescriptors(ctx context.Context, txn *client.Txn) ([]sqlbase.Descriptor, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
rows, err := txn.Scan(ctx, startKey, endKey, 0)
if err != nil {
return nil, err
}
sqlDescs := make([]sqlbase.Descriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&sqlDescs[i]); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal SQL descriptor", row.Key)
}
if row.Value != nil {
sqlDescs[i].Table(row.Value.Timestamp)
}
}
return sqlDescs, nil
}
func ensureInterleavesIncluded(tables []*sqlbase.TableDescriptor) error {
inBackup := make(map[sqlbase.ID]bool, len(tables))
for _, t := range tables {
inBackup[t.ID] = true
}
for _, table := range tables {
if err := table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error {
for _, a := range index.Interleave.Ancestors {
if !inBackup[a.TableID] {
return errors.Errorf(
"cannot backup table %q without interleave parent (ID %d)", table.Name, a.TableID,
)
}
}
for _, c := range index.InterleavedBy {
if !inBackup[c.Table] {
return errors.Errorf(
"cannot backup table %q without interleave child table (ID %d)", table.Name, c.Table,
)
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
func allRangeDescriptors(ctx context.Context, txn *client.Txn) ([]roachpb.RangeDescriptor, error) {
rows, err := txn.Scan(ctx, keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return nil, errors.Wrapf(err,
"unable to scan range descriptors")
}
rangeDescs := make([]roachpb.RangeDescriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&rangeDescs[i]); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal range descriptor", row.Key)
}
}
return rangeDescs, nil
}
type tableAndIndex struct {
tableID sqlbase.ID
indexID sqlbase.IndexID
}
// spansForAllTableIndexes returns non-overlapping spans for every index and
// table passed in. They would normally overlap if any of them are interleaved.
func spansForAllTableIndexes(
tables []*sqlbase.TableDescriptor, revs []BackupDescriptor_DescriptorRevision,
) []roachpb.Span {
added := make(map[tableAndIndex]bool, len(tables))
sstIntervalTree := interval.NewTree(interval.ExclusiveOverlapper)
for _, table := range tables {
for _, index := range table.AllNonDropIndexes() {
if err := sstIntervalTree.Insert(intervalSpan(table.IndexSpan(index.ID)), false); err != nil {
panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
}
added[tableAndIndex{tableID: table.ID, indexID: index.ID}] = true
}
}
// If there are desc revisions, ensure that we also add any index spans
// in them that we didn't already get above e.g. indexes or tables that are
// not in latest because they were dropped during the time window in question.
for _, rev := range revs {
if tbl := rev.Desc.Table(hlc.Timestamp{}); tbl != nil {
for _, idx := range tbl.AllNonDropIndexes() {
key := tableAndIndex{tableID: tbl.ID, indexID: idx.ID}
if !added[key] {
if err := sstIntervalTree.Insert(intervalSpan(tbl.IndexSpan(idx.ID)), false); err != nil {
panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
}
added[key] = true
}
}
}
}
var spans []roachpb.Span
_ = sstIntervalTree.Do(func(r interval.Interface) bool {
spans = append(spans, roachpb.Span{
Key: roachpb.Key(r.Range().Start),
EndKey: roachpb.Key(r.Range().End),
})
return false
})
return spans
}
// coveringFromSpans creates an interval.Covering with a fixed payload from a
// slice of roachpb.Spans.
func coveringFromSpans(spans []roachpb.Span, payload interface{}) covering.Covering {
var c covering.Covering
for _, span := range spans {
c = append(c, covering.Range{
Start: []byte(span.Key),
End: []byte(span.EndKey),
Payload: payload,
})
}
return c
}
// splitAndFilterSpans returns the spans that represent the set difference
// (includes - excludes) while also guaranteeing that each output span does not
// cross the endpoint of a RangeDescriptor in ranges.
func splitAndFilterSpans(
includes []roachpb.Span, excludes []roachpb.Span, ranges []roachpb.RangeDescriptor,
) []roachpb.Span {
type includeMarker struct{}
type excludeMarker struct{}
includeCovering := coveringFromSpans(includes, includeMarker{})
excludeCovering := coveringFromSpans(excludes, excludeMarker{})
var rangeCovering covering.Covering
for _, rangeDesc := range ranges {
rangeCovering = append(rangeCovering, covering.Range{
Start: []byte(rangeDesc.StartKey),
End: []byte(rangeDesc.EndKey),
})
}
splits := covering.OverlapCoveringMerge(
[]covering.Covering{includeCovering, excludeCovering, rangeCovering},
)
var out []roachpb.Span
for _, split := range splits {
include := false
exclude := false
for _, payload := range split.Payload.([]interface{}) {
switch payload.(type) {
case includeMarker:
include = true
case excludeMarker:
exclude = true
}
}
if include && !exclude {
out = append(out, roachpb.Span{
Key: roachpb.Key(split.Start),
EndKey: roachpb.Key(split.End),
})
}
}
return out
}
func optsToKVOptions(opts map[string]string) tree.KVOptions {
if len(opts) == 0 {
return nil
}
sortedOpts := make([]string, 0, len(opts))
for k := range opts {
sortedOpts = append(sortedOpts, k)
}
sort.Strings(sortedOpts)
kvopts := make(tree.KVOptions, 0, len(opts))
for _, k := range sortedOpts {
opt := tree.KVOption{Key: tree.Name(k)}
if v := opts[k]; v != "" {
opt.Value = tree.NewDString(v)
}
kvopts = append(kvopts, opt)
}
return kvopts
}
func backupJobDescription(
p sql.PlanHookState,
backup *tree.Backup,
to []string,
incrementalFrom []string,
opts map[string]string,
) (string, error) {
b := &tree.Backup{
AsOf: backup.AsOf,
Options: optsToKVOptions(opts),
Targets: backup.Targets,
}
for _, t := range to {
sanitizedTo, err := cloud.SanitizeExternalStorageURI(t)
if err != nil {
return "", err
}
b.To = append(b.To, tree.NewDString(sanitizedTo))
}
for _, from := range incrementalFrom {
sanitizedFrom, err := cloud.SanitizeExternalStorageURI(from)
if err != nil {
return "", err
}
b.IncrementalFrom = append(b.IncrementalFrom, tree.NewDString(sanitizedFrom))
}
ann := p.ExtendedEvalContext().Annotations
return tree.AsStringWithFQNames(b, ann), nil
}
// clusterNodeCount returns the approximate number of nodes in the cluster.
func clusterNodeCount(g *gossip.Gossip) int {
var nodes int
_ = g.IterateInfos(gossip.KeyNodeIDPrefix, func(_ string, _ gossip.Info) error {
nodes++
return nil
})
return nodes
}
// BackupFileDescriptors is an alias on which to implement sort's interface.
type BackupFileDescriptors []BackupDescriptor_File
func (r BackupFileDescriptors) Len() int { return len(r) }
func (r BackupFileDescriptors) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r BackupFileDescriptors) Less(i, j int) bool {
if cmp := bytes.Compare(r[i].Span.Key, r[j].Span.Key); cmp != 0 {
return cmp < 0
}
return bytes.Compare(r[i].Span.EndKey, r[j].Span.EndKey) < 0
}
func writeBackupDescriptor(
ctx context.Context,
settings *cluster.Settings,
exportStore cloud.ExternalStorage,
filename string,
desc *BackupDescriptor,
) error {
sort.Sort(BackupFileDescriptors(desc.Files))
descBuf, err := protoutil.Marshal(desc)
if err != nil {
return err
}
return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
}
// writeBackupPartitionDescriptor writes metadata (containing a locality KV and
// partial file listing) for a partitioned BACKUP to one of the stores in the
// backup.
func writeBackupPartitionDescriptor(
ctx context.Context,
exportStore cloud.ExternalStorage,
filename string,
desc *BackupPartitionDescriptor,
) error {
descBuf, err := protoutil.Marshal(desc)
if err != nil {
return err
}
return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
}
func loadAllDescs(
ctx context.Context, db *client.DB, asOf hlc.Timestamp,
) ([]sqlbase.Descriptor, error) {
var allDescs []sqlbase.Descriptor
if err := db.Txn(
ctx,
func(ctx context.Context, txn *client.Txn) error {
var err error
txn.SetFixedTimestamp(ctx, asOf)
allDescs, err = allSQLDescriptors(ctx, txn)
return err
}); err != nil {
return nil, err
}
return allDescs, nil
}
// ResolveTargetsToDescriptors performs name resolution on a set of targets and
// returns the resulting descriptors.
func ResolveTargetsToDescriptors(
ctx context.Context, p sql.PlanHookState, endTime hlc.Timestamp, targets tree.TargetList,
) ([]sqlbase.Descriptor, []sqlbase.ID, error) {
allDescs, err := loadAllDescs(ctx, p.ExecCfg().DB, endTime)
if err != nil {
return nil, nil, err
}
var matched descriptorsMatched
if matched, err = descriptorsMatchingTargets(ctx,
p.CurrentDatabase(), p.CurrentSearchPath(), allDescs, targets); err != nil {
return nil, nil, err
}
// Ensure interleaved tables appear after their parent. Since parents must be
// created before their children, simply sorting by ID accomplishes this.
sort.Slice(matched.descs, func(i, j int) bool { return matched.descs[i].GetID() < matched.descs[j].GetID() })
return matched.descs, matched.expandedDB, nil
}
type spanAndTime struct {
span roachpb.Span
start, end hlc.Timestamp
}
// backup exports a snapshot of every kv entry into ranged sstables.
//
// The output is an sstable per range with files in the following locations:
// - <dir>/<unique_int>.sst
// - <dir> is given by the user and may be cloud storage
// - Each file contains data for a key range that doesn't overlap with any other
// file.
func backup(
ctx context.Context,
db *client.DB,
gossip *gossip.Gossip,
settings *cluster.Settings,
defaultStore cloud.ExternalStorage,
storageByLocalityKV map[string]*roachpb.ExternalStorage,
job *jobs.Job,
backupDesc *BackupDescriptor,
checkpointDesc *BackupDescriptor,
resultsCh chan<- tree.Datums,
makeExternalStorage cloud.ExternalStorageFactory,
) (roachpb.BulkOpSummary, error) {
// TODO(dan): Figure out how permissions should work. #6713 is tracking this
// for grpc.
mu := struct {
syncutil.Mutex
files []BackupDescriptor_File
exported roachpb.BulkOpSummary
lastCheckpoint time.Time
}{}
var checkpointMu syncutil.Mutex
var ranges []roachpb.RangeDescriptor
if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error {
var err error
// TODO(benesch): limit the range descriptors we fetch to the ranges that
// are actually relevant in the backup to speed up small backups on large
// clusters.
ranges, err = allRangeDescriptors(ctx, txn)
return err
}); err != nil {
return mu.exported, err
}
var completedSpans, completedIntroducedSpans []roachpb.Span
if checkpointDesc != nil {
// TODO(benesch): verify these files, rather than accepting them as truth
// blindly.
// No concurrency yet, so these assignments are safe.
mu.files = checkpointDesc.Files
mu.exported = checkpointDesc.EntryCounts
for _, file := range checkpointDesc.Files {
if file.StartTime.IsEmpty() && !file.EndTime.IsEmpty() {
completedIntroducedSpans = append(completedIntroducedSpans, file.Span)
} else {
completedSpans = append(completedSpans, file.Span)
}
}
}
// Subtract out any completed spans and split the remaining spans into
// range-sized pieces so that we can use the number of completed requests as a
// rough measure of progress.
spans := splitAndFilterSpans(backupDesc.Spans, completedSpans, ranges)
introducedSpans := splitAndFilterSpans(backupDesc.IntroducedSpans, completedIntroducedSpans, ranges)
allSpans := make([]spanAndTime, 0, len(spans)+len(introducedSpans))
for _, s := range introducedSpans {
allSpans = append(allSpans, spanAndTime{span: s, start: hlc.Timestamp{}, end: backupDesc.StartTime})
}
for _, s := range spans {
allSpans = append(allSpans, spanAndTime{span: s, start: backupDesc.StartTime, end: backupDesc.EndTime})
}
// Sequential ranges may have clustered leaseholders, for example a
// geo-partitioned table likely has all the leaseholders for some contiguous
// span of the table (i.e. a partition) pinned to just the nodes in a region.
// In such cases, sending spans sequentially may under-utilize the rest of the
// cluster given that we have a limit on the number of spans we send out at
// a given time. Randomizing the order of spans should help ensure a more even
// distribution of work across the cluster regardless of how leaseholders may
// or may not be clustered.
rand.Shuffle(len(allSpans), func(i, j int) {
allSpans[i], allSpans[j] = allSpans[j], allSpans[i]
})
progressLogger := jobs.NewChunkProgressLogger(job, len(spans), job.FractionCompleted(), jobs.ProgressUpdateOnly)
// We're already limiting these on the server-side, but sending all the
// Export requests at once would fill up distsender/grpc/something and cause
// all sorts of badness (node liveness timeouts leading to mass leaseholder
// transfers, poor performance on SQL workloads, etc) as well as log spam
// about slow distsender requests. Rate limit them here, too.
//
// Each node limits the number of running Export & Import requests it serves
// to avoid overloading the network, so multiply that by the number of nodes
// in the cluster and use that as the number of outstanding Export requests
// for the rate limiting. This attempts to strike a balance between
// simplicity, not getting slow distsender log spam, and keeping the server
// side limiter full.
//
// TODO(dan): Make this limiting per node.
//
// TODO(dan): See if there's some better solution than rate-limiting #14798.
maxConcurrentExports := clusterNodeCount(gossip) * int(storage.ExportRequestsLimit.Get(&settings.SV)) * 10
exportsSem := make(chan struct{}, maxConcurrentExports)
g := ctxgroup.WithContext(ctx)
requestFinishedCh := make(chan struct{}, len(spans)) // enough buffer to never block
// Only start the progress logger if there are spans, otherwise this will
// block forever. This is needed for TestBackupRestoreResume which doesn't
// have any spans. Users should never hit this.
if len(spans) > 0 {
g.GoCtx(func(ctx context.Context) error {
return progressLogger.Loop(ctx, requestFinishedCh)
})
}
g.GoCtx(func(ctx context.Context) error {
for i := range allSpans {
{
select {
case exportsSem <- struct{}{}:
case <-ctx.Done():
// Break the for loop to avoid creating more work - the backup
// has failed because either the context has been canceled or an
// error has been returned. Either way, Wait() is guaranteed to
// return an error now.
return ctx.Err()
}
}
span := allSpans[i]
g.GoCtx(func(ctx context.Context) error {
defer func() { <-exportsSem }()
header := roachpb.Header{Timestamp: span.end}
req := &roachpb.ExportRequest{
RequestHeader: roachpb.RequestHeaderFromSpan(span.span),
Storage: defaultStore.Conf(),
StorageByLocalityKV: storageByLocalityKV,
StartTime: span.start,
EnableTimeBoundIteratorOptimization: useTBI.Get(&settings.SV),
MVCCFilter: roachpb.MVCCFilter(backupDesc.MVCCFilter),
}
rawRes, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req)
if pErr != nil {
return pErr.GoError()
}
res := rawRes.(*roachpb.ExportResponse)
mu.Lock()
if backupDesc.RevisionStartTime.Less(res.StartTime) {
backupDesc.RevisionStartTime = res.StartTime
}
for _, file := range res.Files {
f := BackupDescriptor_File{
Span: file.Span,
Path: file.Path,
Sha512: file.Sha512,
EntryCounts: file.Exported,
LocalityKV: file.LocalityKV,
}
if span.start != backupDesc.StartTime {
f.StartTime = span.start
f.EndTime = span.end
}
mu.files = append(mu.files, f)
mu.exported.Add(file.Exported)
}
var checkpointFiles BackupFileDescriptors
if timeutil.Since(mu.lastCheckpoint) > BackupCheckpointInterval {
// We optimistically assume the checkpoint will succeed to prevent
// multiple threads from attempting to checkpoint.
mu.lastCheckpoint = timeutil.Now()
checkpointFiles = append(checkpointFiles, mu.files...)
}
mu.Unlock()
requestFinishedCh <- struct{}{}
if checkpointFiles != nil {
checkpointMu.Lock()
backupDesc.Files = checkpointFiles
err := writeBackupDescriptor(
ctx, settings, defaultStore, BackupDescriptorCheckpointName, backupDesc,
)
checkpointMu.Unlock()
if err != nil {
log.Errorf(ctx, "unable to checkpoint backup descriptor: %+v", err)
}
}
return nil
})
}
return nil
})
if err := g.Wait(); err != nil {
return mu.exported, errors.Wrapf(err, "exporting %d ranges", errors.Safe(len(spans)))
}
// No more concurrency, so no need to acquire locks below.
backupDesc.Files = mu.files
backupDesc.EntryCounts = mu.exported
backupID := uuid.MakeV4()
backupDesc.ID = backupID
// Write additional partial descriptors to each node for partitioned backups.
if len(storageByLocalityKV) > 0 {
filesByLocalityKV := make(map[string][]BackupDescriptor_File)
for i := range mu.files {
file := &mu.files[i]
filesByLocalityKV[file.LocalityKV] = append(filesByLocalityKV[file.LocalityKV], *file)
}
nextPartitionedDescFilenameID := 1
for kv, conf := range storageByLocalityKV {
backupDesc.LocalityKVs = append(backupDesc.LocalityKVs, kv)
// Set a unique filename for each partition backup descriptor. The ID
// ensures uniqueness, and the kv string appended to the end is for
// readability.
filename := fmt.Sprintf("%s_%d_%s",
BackupPartitionDescriptorPrefix, nextPartitionedDescFilenameID, sanitizeLocalityKV(kv))
nextPartitionedDescFilenameID++
backupDesc.PartitionDescriptorFilenames = append(backupDesc.PartitionDescriptorFilenames, filename)
desc := BackupPartitionDescriptor{
LocalityKV: kv,
Files: filesByLocalityKV[kv],
BackupID: backupID,
}
if err := func() error {
store, err := makeExternalStorage(ctx, *conf)
if err != nil {
return err
}
defer store.Close()
return writeBackupPartitionDescriptor(ctx, store, filename, &desc)
}(); err != nil {
return mu.exported, err
}
}
}
if err := writeBackupDescriptor(ctx, settings, defaultStore, BackupDescriptorName, backupDesc); err != nil {
return mu.exported, err
}
return mu.exported, nil
}
// sanitizeLocalityKV returns a sanitized version of the input string where all
// characters that are not alphanumeric or -, =, or _ are replaced with _.
func sanitizeLocalityKV(kv string) string {
sanitizedKV := make([]byte, len(kv))
for i := 0; i < len(kv); i++ {
if (kv[i] >= 'a' && kv[i] <= 'z') ||
(kv[i] >= 'A' && kv[i] <= 'Z') ||
(kv[i] >= '0' && kv[i] <= '9') || kv[i] == '-' || kv[i] == '=' {
sanitizedKV[i] = kv[i]
} else {
sanitizedKV[i] = '_'
}
}
return string(sanitizedKV)
}
// VerifyUsableExportTarget ensures that the target location does not already
// contain a BACKUP or checkpoint and writes an empty checkpoint, both verifying
// that the location is writable and locking out accidental concurrent
// operations on that location if subsequently try this check. Callers must
// clean up the written checkpoint file (BackupDescriptorCheckpointName) only
// after writing to the backup file location (BackupDescriptorName).
func VerifyUsableExportTarget(
ctx context.Context,
settings *cluster.Settings,
exportStore cloud.ExternalStorage,
readable string,
) error {
if r, err := exportStore.ReadFile(ctx, BackupDescriptorName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file",
readable, BackupDescriptorName)
}
if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file",
readable, BackupManifestName)
}
if r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file (is another operation already in progress?)",
readable, BackupDescriptorCheckpointName)
}
if err := writeBackupDescriptor(
ctx, settings, exportStore, BackupDescriptorCheckpointName, &BackupDescriptor{},
); err != nil {
return errors.Wrapf(err, "cannot write to %s", readable)
}
return nil
}
// backupPlanHook implements PlanHookFn.
func backupPlanHook(
_ context.Context, stmt tree.Statement, p sql.PlanHookState,
) (sql.PlanHookRowFn, sqlbase.ResultColumns, []sql.PlanNode, bool, error) {
backupStmt, ok := stmt.(*tree.Backup)
if !ok {
return nil, nil, nil, false, nil
}
toFn, err := p.TypeAsStringArray(tree.Exprs(backupStmt.To), "BACKUP")
if err != nil {
return nil, nil, nil, false, err
}
incrementalFromFn, err := p.TypeAsStringArray(backupStmt.IncrementalFrom, "BACKUP")
if err != nil {
return nil, nil, nil, false, err
}
optsFn, err := p.TypeAsStringOpts(backupStmt.Options, backupOptionExpectValues)
if err != nil {
return nil, nil, nil, false, err
}
header := sqlbase.ResultColumns{
{Name: "job_id", Typ: types.Int},
{Name: "status", Typ: types.String},
{Name: "fraction_completed", Typ: types.Float},
{Name: "rows", Typ: types.Int},
{Name: "index_entries", Typ: types.Int},
{Name: "system_records", Typ: types.Int},
{Name: "bytes", Typ: types.Int},
}
fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error {
// TODO(dan): Move this span into sql.
ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag())
defer tracing.FinishSpan(span)
if err := utilccl.CheckEnterpriseEnabled(
p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "BACKUP",
); err != nil {
return err
}
if err := p.RequireAdminRole(ctx, "BACKUP"); err != nil {
return err
}
if !p.ExtendedEvalContext().TxnImplicit {
return errors.Errorf("BACKUP cannot be used inside a transaction")
}
to, err := toFn()
if err != nil {
return err
}
if len(to) > 1 &&
!cluster.Version.IsActive(ctx, p.ExecCfg().Settings, cluster.VersionPartitionedBackup) {
return errors.Errorf("partitioned backups can only be made on a cluster that has been fully upgraded to version 19.2")
}
incrementalFrom, err := incrementalFromFn()
if err != nil {
return err
}
endTime := p.ExecCfg().Clock.Now()
if backupStmt.AsOf.Expr != nil {
var err error
if endTime, err = p.EvalAsOfTimestamp(backupStmt.AsOf); err != nil {
return err
}
}
defaultURI, urisByLocalityKV, err := getURIsByLocalityKV(to)
if err != nil {
return nil
}
defaultStore, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, defaultURI)
if err != nil {
return err
}
defer defaultStore.Close()
opts, err := optsFn()
if err != nil {
return err
}
mvccFilter := MVCCFilter_Latest
if _, ok := opts[backupOptRevisionHistory]; ok {
mvccFilter = MVCCFilter_All
}
targetDescs, completeDBs, err := ResolveTargetsToDescriptors(ctx, p, endTime, backupStmt.Targets)
if err != nil {
return err
}
statsCache := p.ExecCfg().TableStatsCache
tableStatistics := make([]*stats.TableStatisticProto, 0)
var tables []*sqlbase.TableDescriptor
for _, desc := range targetDescs {
if dbDesc := desc.GetDatabase(); dbDesc != nil {
if err := p.CheckPrivilege(ctx, dbDesc, privilege.SELECT); err != nil {
return err
}
}
if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
if err := p.CheckPrivilege(ctx, tableDesc, privilege.SELECT); err != nil {
return err
}
tables = append(tables, tableDesc)
// Collect all the table stats for this table.
tableStatisticsAcc, err := statsCache.GetTableStats(ctx, tableDesc.GetID())
if err != nil {
return err
}
for i := range tableStatisticsAcc {
tableStatistics = append(tableStatistics, &tableStatisticsAcc[i].TableStatisticProto)
}
}
}
if err := ensureInterleavesIncluded(tables); err != nil {
return err
}
var prevBackups []BackupDescriptor
if len(incrementalFrom) > 0 {
clusterID := p.ExecCfg().ClusterID()
prevBackups = make([]BackupDescriptor, len(incrementalFrom))
for i, uri := range incrementalFrom {
// TODO(lucy): We may want to upgrade the table descs to the newer
// foreign key representation here, in case there are backups from an
// older cluster. Keeping the descriptors as they are works for now
// since all we need to do is get the past backups' table/index spans,
// but it will be safer for future code to avoid having older-style
// descriptors around.
desc, err := ReadBackupDescriptorFromURI(ctx, uri, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI)
if err != nil {
return errors.Wrapf(err, "failed to read backup from %q", uri)
}
// IDs are how we identify tables, and those are only meaningful in the
// context of their own cluster, so we need to ensure we only allow
// incremental previous backups that we created.
if !desc.ClusterID.Equal(clusterID) {
return errors.Newf("previous BACKUP %q belongs to cluster %s", uri, desc.ClusterID.String())
}
prevBackups[i] = desc
}
}
var startTime hlc.Timestamp
var newSpans roachpb.Spans
if len(prevBackups) > 0 {
startTime = prevBackups[len(prevBackups)-1].EndTime
}
var priorIDs map[sqlbase.ID]sqlbase.ID
var revs []BackupDescriptor_DescriptorRevision
if mvccFilter == MVCCFilter_All {
priorIDs = make(map[sqlbase.ID]sqlbase.ID)
revs, err = getRelevantDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, targetDescs, completeDBs, priorIDs)
if err != nil {
return err
}
}
spans := spansForAllTableIndexes(tables, revs)
if len(prevBackups) > 0 {
tablesInPrev := make(map[sqlbase.ID]struct{})
dbsInPrev := make(map[sqlbase.ID]struct{})
for _, d := range prevBackups[len(prevBackups)-1].Descriptors {
if t := d.Table(hlc.Timestamp{}); t != nil {
tablesInPrev[t.ID] = struct{}{}
}
}
for _, d := range prevBackups[len(prevBackups)-1].CompleteDbs {
dbsInPrev[d] = struct{}{}
}
for _, d := range targetDescs {
if t := d.Table(hlc.Timestamp{}); t != nil {
// If we're trying to use a previous backup for this table, ideally it
// actually contains this table.
if _, ok := tablesInPrev[t.ID]; ok {
continue
}
// This table isn't in the previous backup... maybe was added to a
// DB that the previous backup captured?
if _, ok := dbsInPrev[t.ParentID]; ok {
continue
}
// Maybe this table is missing from the previous backup because it was
// truncated?
if t.ReplacementOf.ID != sqlbase.InvalidID {
// Check if we need to lazy-load the priorIDs (i.e. if this is the first
// truncate we've encountered in non-MVCC backup).
if priorIDs == nil {
priorIDs = make(map[sqlbase.ID]sqlbase.ID)
_, err := getAllDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, priorIDs)
if err != nil {
return err
}
}
found := false
for was := t.ReplacementOf.ID; was != sqlbase.InvalidID && !found; was = priorIDs[was] {
_, found = tablesInPrev[was]
}
if found {
continue
}
}
return errors.Errorf("previous backup does not contain table %q", t.Name)
}
}
var err error
_, coveredTime, err := makeImportSpans(
spans,
prevBackups,
nil, /*backupLocalityInfo*/
keys.MinKey,
func(span covering.Range, start, end hlc.Timestamp) error {
if (start == hlc.Timestamp{}) {
newSpans = append(newSpans, roachpb.Span{Key: span.Start, EndKey: span.End})
return nil
}
return errOnMissingRange(span, start, end)
},
)
if err != nil {
return errors.Wrapf(err, "invalid previous backups (a new full backup may be required if a table has been created, dropped or truncated)")
}
if coveredTime != startTime {
return errors.Wrapf(err, "expected previous backups to cover until time %v, got %v", startTime, coveredTime)
}
}
// if CompleteDbs is lost by a 1.x node, FormatDescriptorTrackingVersion
// means that a 2.0 node will disallow `RESTORE DATABASE foo`, but `RESTORE
// foo.table1, foo.table2...` will still work. MVCCFilter would be
// mis-handled, but is disallowed above. IntroducedSpans may also be lost by
// a 1.x node, meaning that if 1.1 nodes may resume a backup, the limitation
// of requiring full backups after schema changes remains.
backupDesc := BackupDescriptor{
StartTime: startTime,
EndTime: endTime,
MVCCFilter: mvccFilter,
Descriptors: targetDescs,
DescriptorChanges: revs,
CompleteDbs: completeDBs,
Spans: spans,
IntroducedSpans: newSpans,
FormatVersion: BackupFormatDescriptorTrackingVersion,
BuildInfo: build.GetInfo(),
NodeID: p.ExecCfg().NodeID.Get(),
ClusterID: p.ExecCfg().ClusterID(),
Statistics: tableStatistics,
}
// Sanity check: re-run the validation that RESTORE will do, but this time
// including this backup, to ensure that the this backup plus any previous
// backups does cover the interval expected.
if _, coveredEnd, err := makeImportSpans(
spans,
append(prevBackups, backupDesc),
nil, /*backupLocalityInfo*/
keys.MinKey,
errOnMissingRange,
); err != nil {
return err
} else if coveredEnd != endTime {
return errors.Errorf("expected backup (along with any previous backups) to cover to %v, not %v", endTime, coveredEnd)
}
descBytes, err := protoutil.Marshal(&backupDesc)
if err != nil {
return err
}
description, err := backupJobDescription(p, backupStmt, to, incrementalFrom, opts)
if err != nil {
return err
}
// TODO (lucy): For partitioned backups, also add verification for other
// stores we are writing to in addition to the default.
if err := VerifyUsableExportTarget(ctx, p.ExecCfg().Settings, defaultStore, defaultURI); err != nil {
return err
}
_, errCh, err := p.ExecCfg().JobRegistry.CreateAndStartJob(ctx, resultsCh, jobs.Record{
Description: description,
Username: p.User(),
DescriptorIDs: func() (sqlDescIDs []sqlbase.ID) {
for _, sqlDesc := range backupDesc.Descriptors {
sqlDescIDs = append(sqlDescIDs, sqlDesc.GetID())
}
return sqlDescIDs
}(),
Details: jobspb.BackupDetails{
StartTime: startTime,
EndTime: endTime,
URI: defaultURI,
URIsByLocalityKV: urisByLocalityKV,
BackupDescriptor: descBytes,
},
Progress: jobspb.BackupProgress{},
})
if err != nil {
return err
}
return <-errCh
}
return fn, header, nil, false, nil
}
type backupResumer struct {
job *jobs.Job
settings *cluster.Settings
res roachpb.BulkOpSummary
makeExternalStorage cloud.ExternalStorageFactory
}
// Resume is part of the jobs.Resumer interface.
func (b *backupResumer) Resume(
ctx context.Context, phs interface{}, resultsCh chan<- tree.Datums,
) error {
details := b.job.Details().(jobspb.BackupDetails)
p := phs.(sql.PlanHookState)
b.makeExternalStorage = p.ExecCfg().DistSQLSrv.ExternalStorage
if len(details.BackupDescriptor) == 0 {
return errors.Newf("missing backup descriptor; cannot resume a backup from an older version")
}
var backupDesc BackupDescriptor
if err := protoutil.Unmarshal(details.BackupDescriptor, &backupDesc); err != nil {
return pgerror.Wrapf(err, pgcode.DataCorrupted,
"unmarshal backup descriptor")
}
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
defaultConf, err := cloud.ExternalStorageConfFromURI(details.URI)
if err != nil {
return errors.Wrapf(err, "export configuration")
}
defaultStore, err := b.makeExternalStorage(ctx, defaultConf)
if err != nil {
return errors.Wrapf(err, "make storage")
}
storageByLocalityKV := make(map[string]*roachpb.ExternalStorage)
for kv, uri := range details.URIsByLocalityKV {
conf, err := cloud.ExternalStorageConfFromURI(uri)
if err != nil {
return err
}
storageByLocalityKV[kv] = &conf
}
var checkpointDesc *BackupDescriptor
// We don't read the table descriptors from the backup descriptor, but
// they could be using either the new or the old foreign key
// representations. We should just preserve whatever representation the
// table descriptors were using and leave them alone.
if desc, err := readBackupDescriptor(ctx, defaultStore, BackupDescriptorCheckpointName); err == nil {
// If the checkpoint is from a different cluster, it's meaningless to us.
// More likely though are dummy/lock-out checkpoints with no ClusterID.
if desc.ClusterID.Equal(p.ExecCfg().ClusterID()) {
checkpointDesc = &desc
}
} else {
// TODO(benesch): distinguish between a missing checkpoint, which simply
// indicates the prior backup attempt made no progress, and a corrupted
// checkpoint, which is more troubling. Sadly, storageccl doesn't provide a
// "not found" error that's consistent across all ExternalStorage
// implementations.
log.Warningf(ctx, "unable to load backup checkpoint while resuming job %d: %v", *b.job.ID(), err)
}
res, err := backup(
ctx,
p.ExecCfg().DB,
p.ExecCfg().Gossip,
p.ExecCfg().Settings,
defaultStore,
storageByLocalityKV,
b.job,
&backupDesc,
checkpointDesc,
resultsCh,
b.makeExternalStorage,
)
b.res = res
return err
}
// OnFailOrCancel is part of the jobs.Resumer interface.
func (b *backupResumer) OnFailOrCancel(context.Context, *client.Txn) error {
return nil
}
// OnSuccess is part of the jobs.Resumer interface.
func (b *backupResumer) OnSuccess(context.Context, *client.Txn) error { return nil }
// OnTerminal is part of the jobs.Resumer interface.
func (b *backupResumer) OnTerminal(
ctx context.Context, status jobs.Status, resultsCh chan<- tree.Datums,
) {
// Attempt to delete BACKUP-CHECKPOINT.
if err := func() error {
details := b.job.Details().(jobspb.BackupDetails)
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
conf, err := cloud.ExternalStorageConfFromURI(details.URI)
if err != nil {
return err
}
exportStore, err := b.makeExternalStorage(ctx, conf)
if err != nil {
return err
}
return exportStore.Delete(ctx, BackupDescriptorCheckpointName)
}(); err != nil {
log.Warningf(ctx, "unable to delete checkpointed backup descriptor: %+v", err)
}
if status == jobs.StatusSucceeded {
// TODO(benesch): emit periodic progress updates.
// TODO(mjibson): if a restore was resumed, then these counts will only have
// the current coordinator's counts.
resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(*b.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(b.res.Rows)),
tree.NewDInt(tree.DInt(b.res.IndexEntries)),
tree.NewDInt(tree.DInt(b.res.SystemRecords)),
tree.NewDInt(tree.DInt(b.res.DataSize)),
}
}
}
type versionedValues struct {
Key roachpb.Key
Values []roachpb.Value
}
// getAllRevisions scans all keys between startKey and endKey getting all
// revisions between startTime and endTime.
// TODO(dt): if/when client gets a ScanRevisionsRequest or similar, use that.
func getAllRevisions(
ctx context.Context,
db *client.DB,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
) ([]versionedValues, error) {
// TODO(dt): version check.
header := roachpb.Header{Timestamp: endTime}
req := &roachpb.ExportRequest{
RequestHeader: roachpb.RequestHeader{Key: startKey, EndKey: endKey},
StartTime: startTime,
MVCCFilter: roachpb.MVCCFilter_All,
ReturnSST: true,
OmitChecksum: true,
}
resp, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req)
if pErr != nil {
return nil, pErr.GoError()
}
var res []versionedValues
for _, file := range resp.(*roachpb.ExportResponse).Files {
sst := engine.MakeRocksDBSstFileReader()
defer sst.Close()
if err := sst.IngestExternalFile(file.SST); err != nil {
return nil, err
}
if err := sst.Iterate(startKey, endKey, func(kv engine.MVCCKeyValue) (bool, error) {
if len(res) == 0 || !res[len(res)-1].Key.Equal(kv.Key.Key) {
res = append(res, versionedValues{Key: kv.Key.Key})
}
res[len(res)-1].Values = append(res[len(res)-1].Values, roachpb.Value{Timestamp: kv.Key.Timestamp, RawBytes: kv.Value})
return false, nil
}); err != nil {
return nil, err
}
}
return res, nil
}
var _ jobs.Resumer = &backupResumer{}
func init() {
sql.AddPlanHook(backupPlanHook)
jobs.RegisterConstructor(
jobspb.TypeBackup,
func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer {
return &backupResumer{
job: job,
settings: settings,
}
},
)
}
// getURIsByLocalityKV takes a slice of URIs for a single (possibly partitioned)
// backup, and returns the default backup destination URI and a map of all other
// URIs by locality KV. The URIs in the result do not include the
// COCKROACH_LOCALITY parameter.
func getURIsByLocalityKV(to []string) (string, map[string]string, error) {
localityAndBaseURI := func(uri string) (string, string, error) {
parsedURI, err := url.Parse(uri)
if err != nil {
return "", "", err
}
q := parsedURI.Query()
localityKV := q.Get(localityURLParam)
// Remove the backup locality parameter.
q.Del(localityURLParam)
parsedURI.RawQuery = q.Encode()
baseURI := parsedURI.String()
return localityKV, baseURI, nil
}
urisByLocalityKV := make(map[string]string)
if len(to) == 1 {
localityKV, baseURI, err := localityAndBaseURI(to[0])
if err != nil {
return "", nil, err
}
if localityKV != "" && localityKV != defaultLocalityValue {
return "", nil, errors.Errorf("%s %s is invalid for a single BACKUP location",
localityURLParam, localityKV)
}
return baseURI, urisByLocalityKV, nil
}
var defaultURI string
for _, uri := range to {
localityKV, baseURI, err := localityAndBaseURI(uri)
if err != nil {
return "", nil, err
}
if localityKV == "" {
return "", nil, errors.Errorf(
"multiple URLs are provided for partitioned BACKUP, but %s is not specified",
localityURLParam,
)
}
if localityKV == defaultLocalityValue {
if defaultURI != "" {
return "", nil, errors.Errorf("multiple default URLs provided for partition backup")
}
defaultURI = baseURI
} else {
kv := roachpb.Tier{}
if err := kv.FromString(localityKV); err != nil {
return "", nil, errors.Wrap(err, "failed to parse backup locality")
}
if _, ok := urisByLocalityKV[localityKV]; ok {
return "", nil, errors.Errorf("duplicate URIs for locality %s", localityKV)
}
urisByLocalityKV[localityKV] = baseURI
}
}
if defaultURI == "" {
return "", nil, errors.Errorf("no default URL provided for partitioned backup")
}
return defaultURI, urisByLocalityKV, nil
}
// maybeUpgradeTableDescsInBackupDescriptors updates the backup descriptors'
// table descriptors to use the newer 19.2-style foreign key representation,
// if they are not already upgraded. This requires resolving cross-table FK
// references, which is done by looking up all table descriptors across all
// backup descriptors provided. if skipFKsWithNoMatchingTable is set, FKs whose
// "other" table is missing from the set provided are omitted during the
// upgrade, instead of causing an error to be returned.
func maybeUpgradeTableDescsInBackupDescriptors(
ctx context.Context, backupDescs []BackupDescriptor, skipFKsWithNoMatchingTable bool,
) error {
protoGetter := sqlbase.MapProtoGetter{
Protos: make(map[interface{}]protoutil.Message),
}
// Populate the protoGetter with all table descriptors in all backup
// descriptors so that they can be looked up.
for _, backupDesc := range backupDescs {
for _, desc := range backupDesc.Descriptors {
if table := desc.Table(hlc.Timestamp{}); table != nil {
protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(table.ID))] =
sqlbase.WrapDescriptor(protoutil.Clone(table).(*sqlbase.TableDescriptor))
}
}
}
for i := range backupDescs {
backupDesc := &backupDescs[i]
for j := range backupDesc.Descriptors {
if table := backupDesc.Descriptors[j].Table(hlc.Timestamp{}); table != nil {
if _, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, skipFKsWithNoMatchingTable); err != nil {
return err
}
// TODO(lucy): Is this necessary?
backupDesc.Descriptors[j] = *sqlbase.WrapDescriptor(table)
}
}
}
return nil
}
| pkg/ccl/backupccl/backup.go | 1 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.9982434511184692,
0.013657661154866219,
0.00015993266424629837,
0.00019541796064004302,
0.11106357723474503
] |
{
"id": 2,
"code_window": [
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupManifestName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file (is another operation already in progress?)\",\n",
"\t\t\treadable, BackupDescriptorCheckpointName)\n",
"\t}\n",
"\tif err := writeBackupDescriptor(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 954
} | revoke_stmt ::=
'REVOKE' ( role_name ) ( ( ',' role_name ) )* 'FROM' ( user_name ) ( ( ',' user_name ) )*
| 'REVOKE' 'ADMIN' 'OPTION' 'FOR' ( role_name ) ( ( ',' role_name ) )* 'FROM' ( user_name ) ( ( ',' user_name ) )*
| docs/generated/sql/bnf/revoke_roles.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.00017161980213131756,
0.00017161980213131756,
0.00017161980213131756,
0.00017161980213131756,
0
] |
{
"id": 2,
"code_window": [
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupManifestName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file (is another operation already in progress?)\",\n",
"\t\t\treadable, BackupDescriptorCheckpointName)\n",
"\t}\n",
"\tif err := writeBackupDescriptor(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 954
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgdate
import (
"reflect"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
func TestExtractRelative(t *testing.T) {
tests := []struct {
s string
rel int
}{
{
s: keywordYesterday,
rel: -1,
},
{
s: keywordToday,
rel: 0,
},
{
s: keywordTomorrow,
rel: 1,
},
}
now := time.Date(2018, 10, 17, 0, 0, 0, 0, time.UTC)
for _, tc := range tests {
t.Run(tc.s, func(t *testing.T) {
d, err := ParseDate(now, ParseModeYMD, tc.s)
if err != nil {
t.Fatal(err)
}
ts, err := d.ToTime()
if err != nil {
t.Fatal(err)
}
exp := now.AddDate(0, 0, tc.rel)
if ts != exp {
t.Fatalf("expected %v, got %v", exp, ts)
}
})
}
}
func TestExtractSentinels(t *testing.T) {
now := timeutil.Unix(42, 56)
tests := []struct {
s string
expected time.Time
err bool
}{
{
s: keywordEpoch,
expected: TimeEpoch,
},
{
s: keywordInfinity,
expected: TimeInfinity,
},
{
s: "-" + keywordInfinity,
expected: TimeNegativeInfinity,
},
{
s: keywordNow,
expected: now,
},
{
s: keywordNow + " tomorrow",
err: true,
},
}
for _, tc := range tests {
t.Run(tc.s, func(t *testing.T) {
fe := fieldExtract{now: now}
err := fe.Extract(tc.s)
if tc.err {
if err == nil {
t.Fatal("expected error")
}
return
}
if err != nil {
t.Fatal(err)
}
if fe.MakeTimestamp() != tc.expected {
t.Fatal("did not get expected sentinel value")
}
})
}
}
func TestFieldExtractSet(t *testing.T) {
p := fieldExtract{wanted: dateFields}
if err := p.Set(fieldYear, 2018); err != nil {
t.Fatal(err)
}
if err := p.Set(fieldMonth, 1); err != nil {
t.Fatal(err)
}
if p.Wants(fieldSecond) {
t.Fatal("should not want RelativeDate")
}
t.Log(p.String())
}
func TestChunking(t *testing.T) {
// Using an over-long UTF-8 sequence from:
// https://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt
badString := string([]byte{0xe0, 0x80, 0xaf})
tests := []struct {
s string
count int
expected []stringChunk
tail string
}{
{
// Empty input.
s: "",
expected: []stringChunk{},
},
{
s: "@@ foo!bar baz %%",
count: 3,
expected: []stringChunk{
{"@@ ", "foo"},
{"!", "bar"},
{" ", "baz"},
},
tail: " %%",
},
{
s: "Εργαστήρια κατσαρίδων", /* Cockroach Labs */
count: 2,
expected: []stringChunk{{"", "Εργαστήρια"}, {" ", "κατσαρίδων"}},
},
{
s: "!@#$%^",
expected: []stringChunk{},
tail: "!@#$%^",
},
// Check cases where we see bad UTF-8 inputs. We should
// try to keep scanning until a reasonable value reappears.
{
s: "foo bar baz" + badString + "boom",
count: 4,
expected: []stringChunk{
{"", "foo"},
{" ", "bar"},
{" ", "baz"},
{badString, "boom"},
},
},
{
s: badString + "boom",
count: 1,
expected: []stringChunk{
{string([]byte{0xe0, 0x80, 0xaf}), "boom"},
},
},
{
s: "boom" + badString,
count: 1,
expected: []stringChunk{
{"", "boom"},
},
tail: badString,
},
{
s: badString,
expected: []stringChunk{},
tail: badString,
},
{
// This should be too long to fit in the slice.
s: "1 2 3 4 5 6 7 8 9 10",
count: -1,
},
}
for _, tc := range tests {
t.Run(tc.s, func(t *testing.T) {
textChunks := make([]stringChunk, 8)
count, tail := chunk(tc.s, textChunks)
if count != tc.count {
t.Errorf("expected %d, got %d", len(tc.expected), count)
}
if count < 0 {
return
}
if !reflect.DeepEqual(tc.expected, textChunks[:count]) {
t.Errorf("expected %v, got %v", tc.expected, textChunks[:count])
}
if tail != tc.tail {
t.Errorf("expected tail %s, got %s", tail, tc.tail)
}
})
}
}
func BenchmarkChunking(b *testing.B) {
for i := 0; i < b.N; i++ {
buf := make([]stringChunk, 8)
chunk("foo bar baz", buf)
}
}
| pkg/util/timeutil/pgdate/field_extract_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.00018477275443729013,
0.0001731833181111142,
0.00016396174032706767,
0.00017301508341915905,
0.000004028621788165765
] |
{
"id": 2,
"code_window": [
"\t\t\t\"%s already contains a %s file\",\n",
"\t\t\treadable, BackupManifestName)\n",
"\t}\n",
"\tif r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {\n",
"\t\tr.Close()\n",
"\t\treturn pgerror.Newf(pgcode.DuplicateFile,\n",
"\t\t\t\"%s already contains a %s file (is another operation already in progress?)\",\n",
"\t\t\treadable, BackupDescriptorCheckpointName)\n",
"\t}\n",
"\tif err := writeBackupDescriptor(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn pgerror.Newf(pgcode.FileAlreadyExists,\n"
],
"file_path": "pkg/ccl/backupccl/backup.go",
"type": "replace",
"edit_start_line_idx": 954
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage
import (
"context"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage/rditer"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"go.etcd.io/etcd/raft/raftpb"
"golang.org/x/time/rate"
)
func TestSnapshotRaftLogLimit(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
store, _ := createTestStore(t,
testStoreOpts{
// This test was written before test stores could start with more than one
// range and was not adapted.
createSystemRanges: false,
},
stopper)
store.SetRaftLogQueueActive(false)
repl, err := store.GetReplica(1)
if err != nil {
t.Fatal(err)
}
var bytesWritten int64
blob := []byte(strings.Repeat("a", 1024*1024))
for i := 0; bytesWritten < 5*store.cfg.RaftLogTruncationThreshold; i++ {
pArgs := putArgs(roachpb.Key("a"), blob)
_, pErr := client.SendWrappedWith(ctx, store, roachpb.Header{RangeID: 1}, &pArgs)
if pErr != nil {
t.Fatal(pErr)
}
bytesWritten += int64(len(blob))
}
for _, snapType := range []SnapshotRequest_Type{SnapshotRequest_PREEMPTIVE, SnapshotRequest_RAFT} {
t.Run(snapType.String(), func(t *testing.T) {
lastIndex, err := repl.GetLastIndex()
if err != nil {
t.Fatal(err)
}
eng := store.Engine()
snap := eng.NewSnapshot()
defer snap.Close()
ss := kvBatchSnapshotStrategy{
raftCfg: &store.cfg.RaftConfig,
limiter: rate.NewLimiter(1<<10, 1),
newBatch: eng.NewBatch,
}
iter := rditer.NewReplicaDataIterator(repl.Desc(), snap, true /* replicatedOnly */)
defer iter.Close()
outSnap := &OutgoingSnapshot{
Iter: iter,
EngineSnap: snap,
snapType: snapType,
RaftSnap: raftpb.Snapshot{
Metadata: raftpb.SnapshotMetadata{
Index: lastIndex,
},
},
}
var stream fakeSnapshotStream
header := SnapshotRequest_Header{
State: repl.State().ReplicaState,
}
err = ss.Send(ctx, stream, header, outSnap)
if snapType == SnapshotRequest_PREEMPTIVE {
if !testutils.IsError(err, "aborting snapshot because raft log is too large") {
t.Fatalf("unexpected error: %+v", err)
}
} else {
if err != nil {
t.Fatal(err)
}
}
})
}
}
// TestSnapshotPreemptiveOnUninitializedReplica is a targeted regression test
// against a bug that once accepted these snapshots without forcing them to
// check for overlapping ranges.
func TestSnapshotPreemptiveOnUninitializedReplica(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
store, _ := createTestStore(t, testStoreOpts{}, stopper)
// Create an uninitialized replica.
repl, created, err := store.getOrCreateReplica(ctx, 77, 1, nil, true)
if err != nil {
t.Fatal(err)
}
if !created {
t.Fatal("no replica created")
}
// Make a descriptor that overlaps r1 (any descriptor does because r1 covers
// all of the keyspace).
desc := *repl.Desc()
desc.StartKey = roachpb.RKey("a")
desc.EndKey = roachpb.RKey("b")
header := &SnapshotRequest_Header{}
header.State.Desc = &desc
if !header.IsPreemptive() {
t.Fatal("mock snapshot isn't preemptive")
}
if _, err := store.canApplyPreemptiveSnapshot(
ctx, header, true, /* authoritative */
); !testutils.IsError(err, "intersects existing range") {
t.Fatal(err)
}
}
| pkg/storage/store_snapshot_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.0004738577117677778,
0.00018879087292589247,
0.00015882888692431152,
0.00016860305913724005,
0.00007634290523128584
] |
{
"id": 3,
"code_window": [
"\tInvalidFunctionDefinition = \"42P13\"\n",
"\tInvalidPreparedStatementDefinition = \"42P14\"\n",
"\tInvalidSchemaDefinition = \"42P15\"\n",
"\tInvalidTableDefinition = \"42P16\"\n",
"\tInvalidObjectDefinition = \"42P17\"\n",
"\t// Class 44 - WITH CHECK OPTION Violation\n",
"\tWithCheckOptionViolation = \"44000\"\n",
"\t// Class 53 - Insufficient Resources\n",
"\tInsufficientResources = \"53000\"\n",
"\tDiskFull = \"53100\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tFileAlreadyExists = \"42C01\"\n"
],
"file_path": "pkg/sql/pgwire/pgcode/codes.go",
"type": "add",
"edit_start_line_idx": 224
} | // Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"net/url"
"sort"
"time"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/covering"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/interval"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
const (
// BackupDescriptorName is the file name used for serialized
// BackupDescriptor protos.
BackupDescriptorName = "BACKUP"
// BackupManifestName is a future name for the serialized
// BackupDescriptor proto.
BackupManifestName = "BACKUP_MANIFEST"
// BackupPartitionDescriptorPrefix is the file name prefix for serialized
// BackupPartitionDescriptor protos.
BackupPartitionDescriptorPrefix = "BACKUP_PART"
// BackupDescriptorCheckpointName is the file name used to store the
// serialized BackupDescriptor proto while the backup is in progress.
BackupDescriptorCheckpointName = "BACKUP-CHECKPOINT"
// BackupFormatDescriptorTrackingVersion added tracking of complete DBs.
BackupFormatDescriptorTrackingVersion uint32 = 1
)
const (
backupOptRevisionHistory = "revision_history"
localityURLParam = "COCKROACH_LOCALITY"
defaultLocalityValue = "default"
)
var useTBI = settings.RegisterBoolSetting(
"kv.bulk_io_write.experimental_incremental_export_enabled",
"use experimental time-bound file filter when exporting in BACKUP",
false,
)
var backupOptionExpectValues = map[string]sql.KVStringOptValidate{
backupOptRevisionHistory: sql.KVStringOptRequireNoValue,
}
// BackupCheckpointInterval is the interval at which backup progress is saved
// to durable storage.
var BackupCheckpointInterval = time.Minute
// ReadBackupDescriptorFromURI creates an export store from the given URI, then
// reads and unmarshals a BackupDescriptor at the standard location in the
// export storage.
func ReadBackupDescriptorFromURI(
ctx context.Context, uri string, makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory,
) (BackupDescriptor, error) {
exportStore, err := makeExternalStorageFromURI(ctx, uri)
if err != nil {
return BackupDescriptor{}, err
}
defer exportStore.Close()
backupDesc, err := readBackupDescriptor(ctx, exportStore, BackupDescriptorName)
if err != nil {
backupManifest, manifestErr := readBackupDescriptor(ctx, exportStore, BackupManifestName)
if manifestErr != nil {
return BackupDescriptor{}, err
}
backupDesc = backupManifest
}
backupDesc.Dir = exportStore.Conf()
// TODO(dan): Sanity check this BackupDescriptor: non-empty EndTime,
// non-empty Paths, and non-overlapping Spans and keyranges in Files.
return backupDesc, nil
}
// readBackupDescriptor reads and unmarshals a BackupDescriptor from filename in
// the provided export store.
func readBackupDescriptor(
ctx context.Context, exportStore cloud.ExternalStorage, filename string,
) (BackupDescriptor, error) {
r, err := exportStore.ReadFile(ctx, filename)
if err != nil {
return BackupDescriptor{}, err
}
defer r.Close()
descBytes, err := ioutil.ReadAll(r)
if err != nil {
return BackupDescriptor{}, err
}
var backupDesc BackupDescriptor
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupDescriptor{}, err
}
for _, d := range backupDesc.Descriptors {
// Calls to GetTable are generally frowned upon.
// This specific call exists to provide backwards compatibility with
// backups created prior to version 19.1. Starting in v19.1 the
// ModificationTime is always written in backups for all versions
// of table descriptors. In earlier cockroach versions only later
// table descriptor versions contain a non-empty ModificationTime.
// Later versions of CockroachDB use the MVCC timestamp to fill in
// the ModificationTime for table descriptors. When performing a restore
// we no longer have access to that MVCC timestamp but we can set it
// to a value we know will be safe.
if t := d.GetTable(); t == nil {
continue
} else if t.Version == 1 && t.ModificationTime.IsEmpty() {
t.ModificationTime = hlc.Timestamp{WallTime: 1}
}
}
return backupDesc, err
}
func readBackupPartitionDescriptor(
ctx context.Context, exportStore cloud.ExternalStorage, filename string,
) (BackupPartitionDescriptor, error) {
r, err := exportStore.ReadFile(ctx, filename)
if err != nil {
return BackupPartitionDescriptor{}, err
}
defer r.Close()
descBytes, err := ioutil.ReadAll(r)
if err != nil {
return BackupPartitionDescriptor{}, err
}
var backupDesc BackupPartitionDescriptor
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupPartitionDescriptor{}, err
}
return backupDesc, err
}
// getRelevantDescChanges finds the changes between start and end time to the
// SQL descriptors matching `descs` or `expandedDBs`, ordered by time. A
// descriptor revision matches if it is an earlier revision of a descriptor in
// descs (same ID) or has parentID in `expanded`. Deleted descriptors are
// represented as nil. Fills in the `priorIDs` map in the process, which maps
// a descriptor the the ID by which it was previously known (e.g pre-TRUNCATE).
func getRelevantDescChanges(
ctx context.Context,
db *client.DB,
startTime, endTime hlc.Timestamp,
descs []sqlbase.Descriptor,
expanded []sqlbase.ID,
priorIDs map[sqlbase.ID]sqlbase.ID,
) ([]BackupDescriptor_DescriptorRevision, error) {
allChanges, err := getAllDescChanges(ctx, db, startTime, endTime, priorIDs)
if err != nil {
return nil, err
}
// If no descriptors changed, we can just stop now and have RESTORE use the
// normal list of descs (i.e. as of endTime).
if len(allChanges) == 0 {
return nil, nil
}
// interestingChanges will be every descriptor change relevant to the backup.
var interestingChanges []BackupDescriptor_DescriptorRevision
// interestingIDs are the descriptor for which we're interested in capturing
// changes. This is initially the descriptors matched (as of endTime) by our
// target spec, plus those that belonged to a DB that our spec expanded at any
// point in the interval.
interestingIDs := make(map[sqlbase.ID]struct{}, len(descs))
// The descriptors that currently (endTime) match the target spec (desc) are
// obviously interesting to our backup.
for _, i := range descs {
interestingIDs[i.GetID()] = struct{}{}
if t := i.Table(hlc.Timestamp{}); t != nil {
for j := t.ReplacementOf.ID; j != sqlbase.InvalidID; j = priorIDs[j] {
interestingIDs[j] = struct{}{}
}
}
}
// We're also interested in any desc that belonged to a DB we're backing up.
// We'll start by looking at all descriptors as of the beginning of the
// interval and add to the set of IDs that we are interested any descriptor that
// belongs to one of the parents we care about.
interestingParents := make(map[sqlbase.ID]struct{}, len(expanded))
for _, i := range expanded {
interestingParents[i] = struct{}{}
}
if !startTime.IsEmpty() {
starting, err := loadAllDescs(ctx, db, startTime)
if err != nil {
return nil, err
}
for _, i := range starting {
if table := i.Table(hlc.Timestamp{}); table != nil {
// We need to add to interestingIDs so that if we later see a delete for
// this ID we still know it is interesting to us, even though we will not
// have a parentID at that point (since the delete is a nil desc).
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
}
}
if _, ok := interestingIDs[i.GetID()]; ok {
desc := i
// We inject a fake "revision" that captures the starting state for
// matched descriptor, to allow restoring to times before its first rev
// actually inside the window. This likely ends up duplicating the last
// version in the previous BACKUP descriptor, but avoids adding more
// complicated special-cases in RESTORE, so it only needs to look in a
// single BACKUP to restore to a particular time.
initial := BackupDescriptor_DescriptorRevision{Time: startTime, ID: i.GetID(), Desc: &desc}
interestingChanges = append(interestingChanges, initial)
}
}
}
for _, change := range allChanges {
// A change to an ID that we are interested in is obviously interesting --
// a change is also interesting if it is to a table that has a parent that
// we are interested and thereafter it also becomes an ID in which we are
// interested in changes (since, as mentioned above, to decide if deletes
// are interesting).
if _, ok := interestingIDs[change.ID]; ok {
interestingChanges = append(interestingChanges, change)
} else if change.Desc != nil {
if table := change.Desc.Table(hlc.Timestamp{}); table != nil {
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
interestingChanges = append(interestingChanges, change)
}
}
}
}
sort.Slice(interestingChanges, func(i, j int) bool {
return interestingChanges[i].Time.Less(interestingChanges[j].Time)
})
return interestingChanges, nil
}
// getAllDescChanges gets every sql descriptor change between start and end time
// returning its ID, content and the change time (with deletions represented as
// nil content).
func getAllDescChanges(
ctx context.Context,
db *client.DB,
startTime, endTime hlc.Timestamp,
priorIDs map[sqlbase.ID]sqlbase.ID,
) ([]BackupDescriptor_DescriptorRevision, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
allRevs, err := getAllRevisions(ctx, db, startKey, endKey, startTime, endTime)
if err != nil {
return nil, err
}
var res []BackupDescriptor_DescriptorRevision
for _, revs := range allRevs {
id, err := keys.DecodeDescMetadataID(revs.Key)
if err != nil {
return nil, err
}
for _, rev := range revs.Values {
r := BackupDescriptor_DescriptorRevision{ID: sqlbase.ID(id), Time: rev.Timestamp}
if len(rev.RawBytes) != 0 {
var desc sqlbase.Descriptor
if err := rev.GetProto(&desc); err != nil {
return nil, err
}
r.Desc = &desc
t := desc.Table(rev.Timestamp)
if t != nil && t.ReplacementOf.ID != sqlbase.InvalidID {
priorIDs[t.ID] = t.ReplacementOf.ID
}
}
res = append(res, r)
}
}
return res, nil
}
func allSQLDescriptors(ctx context.Context, txn *client.Txn) ([]sqlbase.Descriptor, error) {
startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))
endKey := startKey.PrefixEnd()
rows, err := txn.Scan(ctx, startKey, endKey, 0)
if err != nil {
return nil, err
}
sqlDescs := make([]sqlbase.Descriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&sqlDescs[i]); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal SQL descriptor", row.Key)
}
if row.Value != nil {
sqlDescs[i].Table(row.Value.Timestamp)
}
}
return sqlDescs, nil
}
func ensureInterleavesIncluded(tables []*sqlbase.TableDescriptor) error {
inBackup := make(map[sqlbase.ID]bool, len(tables))
for _, t := range tables {
inBackup[t.ID] = true
}
for _, table := range tables {
if err := table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error {
for _, a := range index.Interleave.Ancestors {
if !inBackup[a.TableID] {
return errors.Errorf(
"cannot backup table %q without interleave parent (ID %d)", table.Name, a.TableID,
)
}
}
for _, c := range index.InterleavedBy {
if !inBackup[c.Table] {
return errors.Errorf(
"cannot backup table %q without interleave child table (ID %d)", table.Name, c.Table,
)
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
func allRangeDescriptors(ctx context.Context, txn *client.Txn) ([]roachpb.RangeDescriptor, error) {
rows, err := txn.Scan(ctx, keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return nil, errors.Wrapf(err,
"unable to scan range descriptors")
}
rangeDescs := make([]roachpb.RangeDescriptor, len(rows))
for i, row := range rows {
if err := row.ValueProto(&rangeDescs[i]); err != nil {
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal range descriptor", row.Key)
}
}
return rangeDescs, nil
}
type tableAndIndex struct {
tableID sqlbase.ID
indexID sqlbase.IndexID
}
// spansForAllTableIndexes returns non-overlapping spans for every index and
// table passed in. They would normally overlap if any of them are interleaved.
func spansForAllTableIndexes(
tables []*sqlbase.TableDescriptor, revs []BackupDescriptor_DescriptorRevision,
) []roachpb.Span {
added := make(map[tableAndIndex]bool, len(tables))
sstIntervalTree := interval.NewTree(interval.ExclusiveOverlapper)
for _, table := range tables {
for _, index := range table.AllNonDropIndexes() {
if err := sstIntervalTree.Insert(intervalSpan(table.IndexSpan(index.ID)), false); err != nil {
panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
}
added[tableAndIndex{tableID: table.ID, indexID: index.ID}] = true
}
}
// If there are desc revisions, ensure that we also add any index spans
// in them that we didn't already get above e.g. indexes or tables that are
// not in latest because they were dropped during the time window in question.
for _, rev := range revs {
if tbl := rev.Desc.Table(hlc.Timestamp{}); tbl != nil {
for _, idx := range tbl.AllNonDropIndexes() {
key := tableAndIndex{tableID: tbl.ID, indexID: idx.ID}
if !added[key] {
if err := sstIntervalTree.Insert(intervalSpan(tbl.IndexSpan(idx.ID)), false); err != nil {
panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
}
added[key] = true
}
}
}
}
var spans []roachpb.Span
_ = sstIntervalTree.Do(func(r interval.Interface) bool {
spans = append(spans, roachpb.Span{
Key: roachpb.Key(r.Range().Start),
EndKey: roachpb.Key(r.Range().End),
})
return false
})
return spans
}
// coveringFromSpans creates an interval.Covering with a fixed payload from a
// slice of roachpb.Spans.
func coveringFromSpans(spans []roachpb.Span, payload interface{}) covering.Covering {
var c covering.Covering
for _, span := range spans {
c = append(c, covering.Range{
Start: []byte(span.Key),
End: []byte(span.EndKey),
Payload: payload,
})
}
return c
}
// splitAndFilterSpans returns the spans that represent the set difference
// (includes - excludes) while also guaranteeing that each output span does not
// cross the endpoint of a RangeDescriptor in ranges.
func splitAndFilterSpans(
includes []roachpb.Span, excludes []roachpb.Span, ranges []roachpb.RangeDescriptor,
) []roachpb.Span {
type includeMarker struct{}
type excludeMarker struct{}
includeCovering := coveringFromSpans(includes, includeMarker{})
excludeCovering := coveringFromSpans(excludes, excludeMarker{})
var rangeCovering covering.Covering
for _, rangeDesc := range ranges {
rangeCovering = append(rangeCovering, covering.Range{
Start: []byte(rangeDesc.StartKey),
End: []byte(rangeDesc.EndKey),
})
}
splits := covering.OverlapCoveringMerge(
[]covering.Covering{includeCovering, excludeCovering, rangeCovering},
)
var out []roachpb.Span
for _, split := range splits {
include := false
exclude := false
for _, payload := range split.Payload.([]interface{}) {
switch payload.(type) {
case includeMarker:
include = true
case excludeMarker:
exclude = true
}
}
if include && !exclude {
out = append(out, roachpb.Span{
Key: roachpb.Key(split.Start),
EndKey: roachpb.Key(split.End),
})
}
}
return out
}
func optsToKVOptions(opts map[string]string) tree.KVOptions {
if len(opts) == 0 {
return nil
}
sortedOpts := make([]string, 0, len(opts))
for k := range opts {
sortedOpts = append(sortedOpts, k)
}
sort.Strings(sortedOpts)
kvopts := make(tree.KVOptions, 0, len(opts))
for _, k := range sortedOpts {
opt := tree.KVOption{Key: tree.Name(k)}
if v := opts[k]; v != "" {
opt.Value = tree.NewDString(v)
}
kvopts = append(kvopts, opt)
}
return kvopts
}
func backupJobDescription(
p sql.PlanHookState,
backup *tree.Backup,
to []string,
incrementalFrom []string,
opts map[string]string,
) (string, error) {
b := &tree.Backup{
AsOf: backup.AsOf,
Options: optsToKVOptions(opts),
Targets: backup.Targets,
}
for _, t := range to {
sanitizedTo, err := cloud.SanitizeExternalStorageURI(t)
if err != nil {
return "", err
}
b.To = append(b.To, tree.NewDString(sanitizedTo))
}
for _, from := range incrementalFrom {
sanitizedFrom, err := cloud.SanitizeExternalStorageURI(from)
if err != nil {
return "", err
}
b.IncrementalFrom = append(b.IncrementalFrom, tree.NewDString(sanitizedFrom))
}
ann := p.ExtendedEvalContext().Annotations
return tree.AsStringWithFQNames(b, ann), nil
}
// clusterNodeCount returns the approximate number of nodes in the cluster.
func clusterNodeCount(g *gossip.Gossip) int {
var nodes int
_ = g.IterateInfos(gossip.KeyNodeIDPrefix, func(_ string, _ gossip.Info) error {
nodes++
return nil
})
return nodes
}
// BackupFileDescriptors is an alias on which to implement sort's interface.
type BackupFileDescriptors []BackupDescriptor_File
func (r BackupFileDescriptors) Len() int { return len(r) }
func (r BackupFileDescriptors) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r BackupFileDescriptors) Less(i, j int) bool {
if cmp := bytes.Compare(r[i].Span.Key, r[j].Span.Key); cmp != 0 {
return cmp < 0
}
return bytes.Compare(r[i].Span.EndKey, r[j].Span.EndKey) < 0
}
func writeBackupDescriptor(
ctx context.Context,
settings *cluster.Settings,
exportStore cloud.ExternalStorage,
filename string,
desc *BackupDescriptor,
) error {
sort.Sort(BackupFileDescriptors(desc.Files))
descBuf, err := protoutil.Marshal(desc)
if err != nil {
return err
}
return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
}
// writeBackupPartitionDescriptor writes metadata (containing a locality KV and
// partial file listing) for a partitioned BACKUP to one of the stores in the
// backup.
func writeBackupPartitionDescriptor(
ctx context.Context,
exportStore cloud.ExternalStorage,
filename string,
desc *BackupPartitionDescriptor,
) error {
descBuf, err := protoutil.Marshal(desc)
if err != nil {
return err
}
return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
}
func loadAllDescs(
ctx context.Context, db *client.DB, asOf hlc.Timestamp,
) ([]sqlbase.Descriptor, error) {
var allDescs []sqlbase.Descriptor
if err := db.Txn(
ctx,
func(ctx context.Context, txn *client.Txn) error {
var err error
txn.SetFixedTimestamp(ctx, asOf)
allDescs, err = allSQLDescriptors(ctx, txn)
return err
}); err != nil {
return nil, err
}
return allDescs, nil
}
// ResolveTargetsToDescriptors performs name resolution on a set of targets and
// returns the resulting descriptors.
func ResolveTargetsToDescriptors(
ctx context.Context, p sql.PlanHookState, endTime hlc.Timestamp, targets tree.TargetList,
) ([]sqlbase.Descriptor, []sqlbase.ID, error) {
allDescs, err := loadAllDescs(ctx, p.ExecCfg().DB, endTime)
if err != nil {
return nil, nil, err
}
var matched descriptorsMatched
if matched, err = descriptorsMatchingTargets(ctx,
p.CurrentDatabase(), p.CurrentSearchPath(), allDescs, targets); err != nil {
return nil, nil, err
}
// Ensure interleaved tables appear after their parent. Since parents must be
// created before their children, simply sorting by ID accomplishes this.
sort.Slice(matched.descs, func(i, j int) bool { return matched.descs[i].GetID() < matched.descs[j].GetID() })
return matched.descs, matched.expandedDB, nil
}
type spanAndTime struct {
span roachpb.Span
start, end hlc.Timestamp
}
// backup exports a snapshot of every kv entry into ranged sstables.
//
// The output is an sstable per range with files in the following locations:
// - <dir>/<unique_int>.sst
// - <dir> is given by the user and may be cloud storage
// - Each file contains data for a key range that doesn't overlap with any other
// file.
func backup(
ctx context.Context,
db *client.DB,
gossip *gossip.Gossip,
settings *cluster.Settings,
defaultStore cloud.ExternalStorage,
storageByLocalityKV map[string]*roachpb.ExternalStorage,
job *jobs.Job,
backupDesc *BackupDescriptor,
checkpointDesc *BackupDescriptor,
resultsCh chan<- tree.Datums,
makeExternalStorage cloud.ExternalStorageFactory,
) (roachpb.BulkOpSummary, error) {
// TODO(dan): Figure out how permissions should work. #6713 is tracking this
// for grpc.
mu := struct {
syncutil.Mutex
files []BackupDescriptor_File
exported roachpb.BulkOpSummary
lastCheckpoint time.Time
}{}
var checkpointMu syncutil.Mutex
var ranges []roachpb.RangeDescriptor
if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error {
var err error
// TODO(benesch): limit the range descriptors we fetch to the ranges that
// are actually relevant in the backup to speed up small backups on large
// clusters.
ranges, err = allRangeDescriptors(ctx, txn)
return err
}); err != nil {
return mu.exported, err
}
var completedSpans, completedIntroducedSpans []roachpb.Span
if checkpointDesc != nil {
// TODO(benesch): verify these files, rather than accepting them as truth
// blindly.
// No concurrency yet, so these assignments are safe.
mu.files = checkpointDesc.Files
mu.exported = checkpointDesc.EntryCounts
for _, file := range checkpointDesc.Files {
if file.StartTime.IsEmpty() && !file.EndTime.IsEmpty() {
completedIntroducedSpans = append(completedIntroducedSpans, file.Span)
} else {
completedSpans = append(completedSpans, file.Span)
}
}
}
// Subtract out any completed spans and split the remaining spans into
// range-sized pieces so that we can use the number of completed requests as a
// rough measure of progress.
spans := splitAndFilterSpans(backupDesc.Spans, completedSpans, ranges)
introducedSpans := splitAndFilterSpans(backupDesc.IntroducedSpans, completedIntroducedSpans, ranges)
allSpans := make([]spanAndTime, 0, len(spans)+len(introducedSpans))
for _, s := range introducedSpans {
allSpans = append(allSpans, spanAndTime{span: s, start: hlc.Timestamp{}, end: backupDesc.StartTime})
}
for _, s := range spans {
allSpans = append(allSpans, spanAndTime{span: s, start: backupDesc.StartTime, end: backupDesc.EndTime})
}
// Sequential ranges may have clustered leaseholders, for example a
// geo-partitioned table likely has all the leaseholders for some contiguous
// span of the table (i.e. a partition) pinned to just the nodes in a region.
// In such cases, sending spans sequentially may under-utilize the rest of the
// cluster given that we have a limit on the number of spans we send out at
// a given time. Randomizing the order of spans should help ensure a more even
// distribution of work across the cluster regardless of how leaseholders may
// or may not be clustered.
rand.Shuffle(len(allSpans), func(i, j int) {
allSpans[i], allSpans[j] = allSpans[j], allSpans[i]
})
progressLogger := jobs.NewChunkProgressLogger(job, len(spans), job.FractionCompleted(), jobs.ProgressUpdateOnly)
// We're already limiting these on the server-side, but sending all the
// Export requests at once would fill up distsender/grpc/something and cause
// all sorts of badness (node liveness timeouts leading to mass leaseholder
// transfers, poor performance on SQL workloads, etc) as well as log spam
// about slow distsender requests. Rate limit them here, too.
//
// Each node limits the number of running Export & Import requests it serves
// to avoid overloading the network, so multiply that by the number of nodes
// in the cluster and use that as the number of outstanding Export requests
// for the rate limiting. This attempts to strike a balance between
// simplicity, not getting slow distsender log spam, and keeping the server
// side limiter full.
//
// TODO(dan): Make this limiting per node.
//
// TODO(dan): See if there's some better solution than rate-limiting #14798.
maxConcurrentExports := clusterNodeCount(gossip) * int(storage.ExportRequestsLimit.Get(&settings.SV)) * 10
exportsSem := make(chan struct{}, maxConcurrentExports)
g := ctxgroup.WithContext(ctx)
requestFinishedCh := make(chan struct{}, len(spans)) // enough buffer to never block
// Only start the progress logger if there are spans, otherwise this will
// block forever. This is needed for TestBackupRestoreResume which doesn't
// have any spans. Users should never hit this.
if len(spans) > 0 {
g.GoCtx(func(ctx context.Context) error {
return progressLogger.Loop(ctx, requestFinishedCh)
})
}
g.GoCtx(func(ctx context.Context) error {
for i := range allSpans {
{
select {
case exportsSem <- struct{}{}:
case <-ctx.Done():
// Break the for loop to avoid creating more work - the backup
// has failed because either the context has been canceled or an
// error has been returned. Either way, Wait() is guaranteed to
// return an error now.
return ctx.Err()
}
}
span := allSpans[i]
g.GoCtx(func(ctx context.Context) error {
defer func() { <-exportsSem }()
header := roachpb.Header{Timestamp: span.end}
req := &roachpb.ExportRequest{
RequestHeader: roachpb.RequestHeaderFromSpan(span.span),
Storage: defaultStore.Conf(),
StorageByLocalityKV: storageByLocalityKV,
StartTime: span.start,
EnableTimeBoundIteratorOptimization: useTBI.Get(&settings.SV),
MVCCFilter: roachpb.MVCCFilter(backupDesc.MVCCFilter),
}
rawRes, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req)
if pErr != nil {
return pErr.GoError()
}
res := rawRes.(*roachpb.ExportResponse)
mu.Lock()
if backupDesc.RevisionStartTime.Less(res.StartTime) {
backupDesc.RevisionStartTime = res.StartTime
}
for _, file := range res.Files {
f := BackupDescriptor_File{
Span: file.Span,
Path: file.Path,
Sha512: file.Sha512,
EntryCounts: file.Exported,
LocalityKV: file.LocalityKV,
}
if span.start != backupDesc.StartTime {
f.StartTime = span.start
f.EndTime = span.end
}
mu.files = append(mu.files, f)
mu.exported.Add(file.Exported)
}
var checkpointFiles BackupFileDescriptors
if timeutil.Since(mu.lastCheckpoint) > BackupCheckpointInterval {
// We optimistically assume the checkpoint will succeed to prevent
// multiple threads from attempting to checkpoint.
mu.lastCheckpoint = timeutil.Now()
checkpointFiles = append(checkpointFiles, mu.files...)
}
mu.Unlock()
requestFinishedCh <- struct{}{}
if checkpointFiles != nil {
checkpointMu.Lock()
backupDesc.Files = checkpointFiles
err := writeBackupDescriptor(
ctx, settings, defaultStore, BackupDescriptorCheckpointName, backupDesc,
)
checkpointMu.Unlock()
if err != nil {
log.Errorf(ctx, "unable to checkpoint backup descriptor: %+v", err)
}
}
return nil
})
}
return nil
})
if err := g.Wait(); err != nil {
return mu.exported, errors.Wrapf(err, "exporting %d ranges", errors.Safe(len(spans)))
}
// No more concurrency, so no need to acquire locks below.
backupDesc.Files = mu.files
backupDesc.EntryCounts = mu.exported
backupID := uuid.MakeV4()
backupDesc.ID = backupID
// Write additional partial descriptors to each node for partitioned backups.
if len(storageByLocalityKV) > 0 {
filesByLocalityKV := make(map[string][]BackupDescriptor_File)
for i := range mu.files {
file := &mu.files[i]
filesByLocalityKV[file.LocalityKV] = append(filesByLocalityKV[file.LocalityKV], *file)
}
nextPartitionedDescFilenameID := 1
for kv, conf := range storageByLocalityKV {
backupDesc.LocalityKVs = append(backupDesc.LocalityKVs, kv)
// Set a unique filename for each partition backup descriptor. The ID
// ensures uniqueness, and the kv string appended to the end is for
// readability.
filename := fmt.Sprintf("%s_%d_%s",
BackupPartitionDescriptorPrefix, nextPartitionedDescFilenameID, sanitizeLocalityKV(kv))
nextPartitionedDescFilenameID++
backupDesc.PartitionDescriptorFilenames = append(backupDesc.PartitionDescriptorFilenames, filename)
desc := BackupPartitionDescriptor{
LocalityKV: kv,
Files: filesByLocalityKV[kv],
BackupID: backupID,
}
if err := func() error {
store, err := makeExternalStorage(ctx, *conf)
if err != nil {
return err
}
defer store.Close()
return writeBackupPartitionDescriptor(ctx, store, filename, &desc)
}(); err != nil {
return mu.exported, err
}
}
}
if err := writeBackupDescriptor(ctx, settings, defaultStore, BackupDescriptorName, backupDesc); err != nil {
return mu.exported, err
}
return mu.exported, nil
}
// sanitizeLocalityKV returns a sanitized version of the input string where all
// characters that are not alphanumeric or -, =, or _ are replaced with _.
func sanitizeLocalityKV(kv string) string {
sanitizedKV := make([]byte, len(kv))
for i := 0; i < len(kv); i++ {
if (kv[i] >= 'a' && kv[i] <= 'z') ||
(kv[i] >= 'A' && kv[i] <= 'Z') ||
(kv[i] >= '0' && kv[i] <= '9') || kv[i] == '-' || kv[i] == '=' {
sanitizedKV[i] = kv[i]
} else {
sanitizedKV[i] = '_'
}
}
return string(sanitizedKV)
}
// VerifyUsableExportTarget ensures that the target location does not already
// contain a BACKUP or checkpoint and writes an empty checkpoint, both verifying
// that the location is writable and locking out accidental concurrent
// operations on that location if subsequently try this check. Callers must
// clean up the written checkpoint file (BackupDescriptorCheckpointName) only
// after writing to the backup file location (BackupDescriptorName).
func VerifyUsableExportTarget(
ctx context.Context,
settings *cluster.Settings,
exportStore cloud.ExternalStorage,
readable string,
) error {
if r, err := exportStore.ReadFile(ctx, BackupDescriptorName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file",
readable, BackupDescriptorName)
}
if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file",
readable, BackupManifestName)
}
if r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
"%s already contains a %s file (is another operation already in progress?)",
readable, BackupDescriptorCheckpointName)
}
if err := writeBackupDescriptor(
ctx, settings, exportStore, BackupDescriptorCheckpointName, &BackupDescriptor{},
); err != nil {
return errors.Wrapf(err, "cannot write to %s", readable)
}
return nil
}
// backupPlanHook implements PlanHookFn.
func backupPlanHook(
_ context.Context, stmt tree.Statement, p sql.PlanHookState,
) (sql.PlanHookRowFn, sqlbase.ResultColumns, []sql.PlanNode, bool, error) {
backupStmt, ok := stmt.(*tree.Backup)
if !ok {
return nil, nil, nil, false, nil
}
toFn, err := p.TypeAsStringArray(tree.Exprs(backupStmt.To), "BACKUP")
if err != nil {
return nil, nil, nil, false, err
}
incrementalFromFn, err := p.TypeAsStringArray(backupStmt.IncrementalFrom, "BACKUP")
if err != nil {
return nil, nil, nil, false, err
}
optsFn, err := p.TypeAsStringOpts(backupStmt.Options, backupOptionExpectValues)
if err != nil {
return nil, nil, nil, false, err
}
header := sqlbase.ResultColumns{
{Name: "job_id", Typ: types.Int},
{Name: "status", Typ: types.String},
{Name: "fraction_completed", Typ: types.Float},
{Name: "rows", Typ: types.Int},
{Name: "index_entries", Typ: types.Int},
{Name: "system_records", Typ: types.Int},
{Name: "bytes", Typ: types.Int},
}
fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error {
// TODO(dan): Move this span into sql.
ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag())
defer tracing.FinishSpan(span)
if err := utilccl.CheckEnterpriseEnabled(
p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "BACKUP",
); err != nil {
return err
}
if err := p.RequireAdminRole(ctx, "BACKUP"); err != nil {
return err
}
if !p.ExtendedEvalContext().TxnImplicit {
return errors.Errorf("BACKUP cannot be used inside a transaction")
}
to, err := toFn()
if err != nil {
return err
}
if len(to) > 1 &&
!cluster.Version.IsActive(ctx, p.ExecCfg().Settings, cluster.VersionPartitionedBackup) {
return errors.Errorf("partitioned backups can only be made on a cluster that has been fully upgraded to version 19.2")
}
incrementalFrom, err := incrementalFromFn()
if err != nil {
return err
}
endTime := p.ExecCfg().Clock.Now()
if backupStmt.AsOf.Expr != nil {
var err error
if endTime, err = p.EvalAsOfTimestamp(backupStmt.AsOf); err != nil {
return err
}
}
defaultURI, urisByLocalityKV, err := getURIsByLocalityKV(to)
if err != nil {
return nil
}
defaultStore, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, defaultURI)
if err != nil {
return err
}
defer defaultStore.Close()
opts, err := optsFn()
if err != nil {
return err
}
mvccFilter := MVCCFilter_Latest
if _, ok := opts[backupOptRevisionHistory]; ok {
mvccFilter = MVCCFilter_All
}
targetDescs, completeDBs, err := ResolveTargetsToDescriptors(ctx, p, endTime, backupStmt.Targets)
if err != nil {
return err
}
statsCache := p.ExecCfg().TableStatsCache
tableStatistics := make([]*stats.TableStatisticProto, 0)
var tables []*sqlbase.TableDescriptor
for _, desc := range targetDescs {
if dbDesc := desc.GetDatabase(); dbDesc != nil {
if err := p.CheckPrivilege(ctx, dbDesc, privilege.SELECT); err != nil {
return err
}
}
if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
if err := p.CheckPrivilege(ctx, tableDesc, privilege.SELECT); err != nil {
return err
}
tables = append(tables, tableDesc)
// Collect all the table stats for this table.
tableStatisticsAcc, err := statsCache.GetTableStats(ctx, tableDesc.GetID())
if err != nil {
return err
}
for i := range tableStatisticsAcc {
tableStatistics = append(tableStatistics, &tableStatisticsAcc[i].TableStatisticProto)
}
}
}
if err := ensureInterleavesIncluded(tables); err != nil {
return err
}
var prevBackups []BackupDescriptor
if len(incrementalFrom) > 0 {
clusterID := p.ExecCfg().ClusterID()
prevBackups = make([]BackupDescriptor, len(incrementalFrom))
for i, uri := range incrementalFrom {
// TODO(lucy): We may want to upgrade the table descs to the newer
// foreign key representation here, in case there are backups from an
// older cluster. Keeping the descriptors as they are works for now
// since all we need to do is get the past backups' table/index spans,
// but it will be safer for future code to avoid having older-style
// descriptors around.
desc, err := ReadBackupDescriptorFromURI(ctx, uri, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI)
if err != nil {
return errors.Wrapf(err, "failed to read backup from %q", uri)
}
// IDs are how we identify tables, and those are only meaningful in the
// context of their own cluster, so we need to ensure we only allow
// incremental previous backups that we created.
if !desc.ClusterID.Equal(clusterID) {
return errors.Newf("previous BACKUP %q belongs to cluster %s", uri, desc.ClusterID.String())
}
prevBackups[i] = desc
}
}
var startTime hlc.Timestamp
var newSpans roachpb.Spans
if len(prevBackups) > 0 {
startTime = prevBackups[len(prevBackups)-1].EndTime
}
var priorIDs map[sqlbase.ID]sqlbase.ID
var revs []BackupDescriptor_DescriptorRevision
if mvccFilter == MVCCFilter_All {
priorIDs = make(map[sqlbase.ID]sqlbase.ID)
revs, err = getRelevantDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, targetDescs, completeDBs, priorIDs)
if err != nil {
return err
}
}
spans := spansForAllTableIndexes(tables, revs)
if len(prevBackups) > 0 {
tablesInPrev := make(map[sqlbase.ID]struct{})
dbsInPrev := make(map[sqlbase.ID]struct{})
for _, d := range prevBackups[len(prevBackups)-1].Descriptors {
if t := d.Table(hlc.Timestamp{}); t != nil {
tablesInPrev[t.ID] = struct{}{}
}
}
for _, d := range prevBackups[len(prevBackups)-1].CompleteDbs {
dbsInPrev[d] = struct{}{}
}
for _, d := range targetDescs {
if t := d.Table(hlc.Timestamp{}); t != nil {
// If we're trying to use a previous backup for this table, ideally it
// actually contains this table.
if _, ok := tablesInPrev[t.ID]; ok {
continue
}
// This table isn't in the previous backup... maybe was added to a
// DB that the previous backup captured?
if _, ok := dbsInPrev[t.ParentID]; ok {
continue
}
// Maybe this table is missing from the previous backup because it was
// truncated?
if t.ReplacementOf.ID != sqlbase.InvalidID {
// Check if we need to lazy-load the priorIDs (i.e. if this is the first
// truncate we've encountered in non-MVCC backup).
if priorIDs == nil {
priorIDs = make(map[sqlbase.ID]sqlbase.ID)
_, err := getAllDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, priorIDs)
if err != nil {
return err
}
}
found := false
for was := t.ReplacementOf.ID; was != sqlbase.InvalidID && !found; was = priorIDs[was] {
_, found = tablesInPrev[was]
}
if found {
continue
}
}
return errors.Errorf("previous backup does not contain table %q", t.Name)
}
}
var err error
_, coveredTime, err := makeImportSpans(
spans,
prevBackups,
nil, /*backupLocalityInfo*/
keys.MinKey,
func(span covering.Range, start, end hlc.Timestamp) error {
if (start == hlc.Timestamp{}) {
newSpans = append(newSpans, roachpb.Span{Key: span.Start, EndKey: span.End})
return nil
}
return errOnMissingRange(span, start, end)
},
)
if err != nil {
return errors.Wrapf(err, "invalid previous backups (a new full backup may be required if a table has been created, dropped or truncated)")
}
if coveredTime != startTime {
return errors.Wrapf(err, "expected previous backups to cover until time %v, got %v", startTime, coveredTime)
}
}
// if CompleteDbs is lost by a 1.x node, FormatDescriptorTrackingVersion
// means that a 2.0 node will disallow `RESTORE DATABASE foo`, but `RESTORE
// foo.table1, foo.table2...` will still work. MVCCFilter would be
// mis-handled, but is disallowed above. IntroducedSpans may also be lost by
// a 1.x node, meaning that if 1.1 nodes may resume a backup, the limitation
// of requiring full backups after schema changes remains.
backupDesc := BackupDescriptor{
StartTime: startTime,
EndTime: endTime,
MVCCFilter: mvccFilter,
Descriptors: targetDescs,
DescriptorChanges: revs,
CompleteDbs: completeDBs,
Spans: spans,
IntroducedSpans: newSpans,
FormatVersion: BackupFormatDescriptorTrackingVersion,
BuildInfo: build.GetInfo(),
NodeID: p.ExecCfg().NodeID.Get(),
ClusterID: p.ExecCfg().ClusterID(),
Statistics: tableStatistics,
}
// Sanity check: re-run the validation that RESTORE will do, but this time
// including this backup, to ensure that the this backup plus any previous
// backups does cover the interval expected.
if _, coveredEnd, err := makeImportSpans(
spans,
append(prevBackups, backupDesc),
nil, /*backupLocalityInfo*/
keys.MinKey,
errOnMissingRange,
); err != nil {
return err
} else if coveredEnd != endTime {
return errors.Errorf("expected backup (along with any previous backups) to cover to %v, not %v", endTime, coveredEnd)
}
descBytes, err := protoutil.Marshal(&backupDesc)
if err != nil {
return err
}
description, err := backupJobDescription(p, backupStmt, to, incrementalFrom, opts)
if err != nil {
return err
}
// TODO (lucy): For partitioned backups, also add verification for other
// stores we are writing to in addition to the default.
if err := VerifyUsableExportTarget(ctx, p.ExecCfg().Settings, defaultStore, defaultURI); err != nil {
return err
}
_, errCh, err := p.ExecCfg().JobRegistry.CreateAndStartJob(ctx, resultsCh, jobs.Record{
Description: description,
Username: p.User(),
DescriptorIDs: func() (sqlDescIDs []sqlbase.ID) {
for _, sqlDesc := range backupDesc.Descriptors {
sqlDescIDs = append(sqlDescIDs, sqlDesc.GetID())
}
return sqlDescIDs
}(),
Details: jobspb.BackupDetails{
StartTime: startTime,
EndTime: endTime,
URI: defaultURI,
URIsByLocalityKV: urisByLocalityKV,
BackupDescriptor: descBytes,
},
Progress: jobspb.BackupProgress{},
})
if err != nil {
return err
}
return <-errCh
}
return fn, header, nil, false, nil
}
type backupResumer struct {
job *jobs.Job
settings *cluster.Settings
res roachpb.BulkOpSummary
makeExternalStorage cloud.ExternalStorageFactory
}
// Resume is part of the jobs.Resumer interface.
func (b *backupResumer) Resume(
ctx context.Context, phs interface{}, resultsCh chan<- tree.Datums,
) error {
details := b.job.Details().(jobspb.BackupDetails)
p := phs.(sql.PlanHookState)
b.makeExternalStorage = p.ExecCfg().DistSQLSrv.ExternalStorage
if len(details.BackupDescriptor) == 0 {
return errors.Newf("missing backup descriptor; cannot resume a backup from an older version")
}
var backupDesc BackupDescriptor
if err := protoutil.Unmarshal(details.BackupDescriptor, &backupDesc); err != nil {
return pgerror.Wrapf(err, pgcode.DataCorrupted,
"unmarshal backup descriptor")
}
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
defaultConf, err := cloud.ExternalStorageConfFromURI(details.URI)
if err != nil {
return errors.Wrapf(err, "export configuration")
}
defaultStore, err := b.makeExternalStorage(ctx, defaultConf)
if err != nil {
return errors.Wrapf(err, "make storage")
}
storageByLocalityKV := make(map[string]*roachpb.ExternalStorage)
for kv, uri := range details.URIsByLocalityKV {
conf, err := cloud.ExternalStorageConfFromURI(uri)
if err != nil {
return err
}
storageByLocalityKV[kv] = &conf
}
var checkpointDesc *BackupDescriptor
// We don't read the table descriptors from the backup descriptor, but
// they could be using either the new or the old foreign key
// representations. We should just preserve whatever representation the
// table descriptors were using and leave them alone.
if desc, err := readBackupDescriptor(ctx, defaultStore, BackupDescriptorCheckpointName); err == nil {
// If the checkpoint is from a different cluster, it's meaningless to us.
// More likely though are dummy/lock-out checkpoints with no ClusterID.
if desc.ClusterID.Equal(p.ExecCfg().ClusterID()) {
checkpointDesc = &desc
}
} else {
// TODO(benesch): distinguish between a missing checkpoint, which simply
// indicates the prior backup attempt made no progress, and a corrupted
// checkpoint, which is more troubling. Sadly, storageccl doesn't provide a
// "not found" error that's consistent across all ExternalStorage
// implementations.
log.Warningf(ctx, "unable to load backup checkpoint while resuming job %d: %v", *b.job.ID(), err)
}
res, err := backup(
ctx,
p.ExecCfg().DB,
p.ExecCfg().Gossip,
p.ExecCfg().Settings,
defaultStore,
storageByLocalityKV,
b.job,
&backupDesc,
checkpointDesc,
resultsCh,
b.makeExternalStorage,
)
b.res = res
return err
}
// OnFailOrCancel is part of the jobs.Resumer interface.
func (b *backupResumer) OnFailOrCancel(context.Context, *client.Txn) error {
return nil
}
// OnSuccess is part of the jobs.Resumer interface.
func (b *backupResumer) OnSuccess(context.Context, *client.Txn) error { return nil }
// OnTerminal is part of the jobs.Resumer interface.
func (b *backupResumer) OnTerminal(
ctx context.Context, status jobs.Status, resultsCh chan<- tree.Datums,
) {
// Attempt to delete BACKUP-CHECKPOINT.
if err := func() error {
details := b.job.Details().(jobspb.BackupDetails)
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
conf, err := cloud.ExternalStorageConfFromURI(details.URI)
if err != nil {
return err
}
exportStore, err := b.makeExternalStorage(ctx, conf)
if err != nil {
return err
}
return exportStore.Delete(ctx, BackupDescriptorCheckpointName)
}(); err != nil {
log.Warningf(ctx, "unable to delete checkpointed backup descriptor: %+v", err)
}
if status == jobs.StatusSucceeded {
// TODO(benesch): emit periodic progress updates.
// TODO(mjibson): if a restore was resumed, then these counts will only have
// the current coordinator's counts.
resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(*b.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(b.res.Rows)),
tree.NewDInt(tree.DInt(b.res.IndexEntries)),
tree.NewDInt(tree.DInt(b.res.SystemRecords)),
tree.NewDInt(tree.DInt(b.res.DataSize)),
}
}
}
type versionedValues struct {
Key roachpb.Key
Values []roachpb.Value
}
// getAllRevisions scans all keys between startKey and endKey getting all
// revisions between startTime and endTime.
// TODO(dt): if/when client gets a ScanRevisionsRequest or similar, use that.
func getAllRevisions(
ctx context.Context,
db *client.DB,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
) ([]versionedValues, error) {
// TODO(dt): version check.
header := roachpb.Header{Timestamp: endTime}
req := &roachpb.ExportRequest{
RequestHeader: roachpb.RequestHeader{Key: startKey, EndKey: endKey},
StartTime: startTime,
MVCCFilter: roachpb.MVCCFilter_All,
ReturnSST: true,
OmitChecksum: true,
}
resp, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req)
if pErr != nil {
return nil, pErr.GoError()
}
var res []versionedValues
for _, file := range resp.(*roachpb.ExportResponse).Files {
sst := engine.MakeRocksDBSstFileReader()
defer sst.Close()
if err := sst.IngestExternalFile(file.SST); err != nil {
return nil, err
}
if err := sst.Iterate(startKey, endKey, func(kv engine.MVCCKeyValue) (bool, error) {
if len(res) == 0 || !res[len(res)-1].Key.Equal(kv.Key.Key) {
res = append(res, versionedValues{Key: kv.Key.Key})
}
res[len(res)-1].Values = append(res[len(res)-1].Values, roachpb.Value{Timestamp: kv.Key.Timestamp, RawBytes: kv.Value})
return false, nil
}); err != nil {
return nil, err
}
}
return res, nil
}
var _ jobs.Resumer = &backupResumer{}
func init() {
sql.AddPlanHook(backupPlanHook)
jobs.RegisterConstructor(
jobspb.TypeBackup,
func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer {
return &backupResumer{
job: job,
settings: settings,
}
},
)
}
// getURIsByLocalityKV takes a slice of URIs for a single (possibly partitioned)
// backup, and returns the default backup destination URI and a map of all other
// URIs by locality KV. The URIs in the result do not include the
// COCKROACH_LOCALITY parameter.
func getURIsByLocalityKV(to []string) (string, map[string]string, error) {
localityAndBaseURI := func(uri string) (string, string, error) {
parsedURI, err := url.Parse(uri)
if err != nil {
return "", "", err
}
q := parsedURI.Query()
localityKV := q.Get(localityURLParam)
// Remove the backup locality parameter.
q.Del(localityURLParam)
parsedURI.RawQuery = q.Encode()
baseURI := parsedURI.String()
return localityKV, baseURI, nil
}
urisByLocalityKV := make(map[string]string)
if len(to) == 1 {
localityKV, baseURI, err := localityAndBaseURI(to[0])
if err != nil {
return "", nil, err
}
if localityKV != "" && localityKV != defaultLocalityValue {
return "", nil, errors.Errorf("%s %s is invalid for a single BACKUP location",
localityURLParam, localityKV)
}
return baseURI, urisByLocalityKV, nil
}
var defaultURI string
for _, uri := range to {
localityKV, baseURI, err := localityAndBaseURI(uri)
if err != nil {
return "", nil, err
}
if localityKV == "" {
return "", nil, errors.Errorf(
"multiple URLs are provided for partitioned BACKUP, but %s is not specified",
localityURLParam,
)
}
if localityKV == defaultLocalityValue {
if defaultURI != "" {
return "", nil, errors.Errorf("multiple default URLs provided for partition backup")
}
defaultURI = baseURI
} else {
kv := roachpb.Tier{}
if err := kv.FromString(localityKV); err != nil {
return "", nil, errors.Wrap(err, "failed to parse backup locality")
}
if _, ok := urisByLocalityKV[localityKV]; ok {
return "", nil, errors.Errorf("duplicate URIs for locality %s", localityKV)
}
urisByLocalityKV[localityKV] = baseURI
}
}
if defaultURI == "" {
return "", nil, errors.Errorf("no default URL provided for partitioned backup")
}
return defaultURI, urisByLocalityKV, nil
}
// maybeUpgradeTableDescsInBackupDescriptors updates the backup descriptors'
// table descriptors to use the newer 19.2-style foreign key representation,
// if they are not already upgraded. This requires resolving cross-table FK
// references, which is done by looking up all table descriptors across all
// backup descriptors provided. if skipFKsWithNoMatchingTable is set, FKs whose
// "other" table is missing from the set provided are omitted during the
// upgrade, instead of causing an error to be returned.
func maybeUpgradeTableDescsInBackupDescriptors(
ctx context.Context, backupDescs []BackupDescriptor, skipFKsWithNoMatchingTable bool,
) error {
protoGetter := sqlbase.MapProtoGetter{
Protos: make(map[interface{}]protoutil.Message),
}
// Populate the protoGetter with all table descriptors in all backup
// descriptors so that they can be looked up.
for _, backupDesc := range backupDescs {
for _, desc := range backupDesc.Descriptors {
if table := desc.Table(hlc.Timestamp{}); table != nil {
protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(table.ID))] =
sqlbase.WrapDescriptor(protoutil.Clone(table).(*sqlbase.TableDescriptor))
}
}
}
for i := range backupDescs {
backupDesc := &backupDescs[i]
for j := range backupDesc.Descriptors {
if table := backupDesc.Descriptors[j].Table(hlc.Timestamp{}); table != nil {
if _, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, skipFKsWithNoMatchingTable); err != nil {
return err
}
// TODO(lucy): Is this necessary?
backupDesc.Descriptors[j] = *sqlbase.WrapDescriptor(table)
}
}
}
return nil
}
| pkg/ccl/backupccl/backup.go | 1 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.0026305504143238068,
0.0002039174287347123,
0.00016061287897173315,
0.00016835406131576747,
0.00022668942983727902
] |
{
"id": 3,
"code_window": [
"\tInvalidFunctionDefinition = \"42P13\"\n",
"\tInvalidPreparedStatementDefinition = \"42P14\"\n",
"\tInvalidSchemaDefinition = \"42P15\"\n",
"\tInvalidTableDefinition = \"42P16\"\n",
"\tInvalidObjectDefinition = \"42P17\"\n",
"\t// Class 44 - WITH CHECK OPTION Violation\n",
"\tWithCheckOptionViolation = \"44000\"\n",
"\t// Class 53 - Insufficient Resources\n",
"\tInsufficientResources = \"53000\"\n",
"\tDiskFull = \"53100\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tFileAlreadyExists = \"42C01\"\n"
],
"file_path": "pkg/sql/pgwire/pgcode/codes.go",
"type": "add",
"edit_start_line_idx": 224
} | # LogicTest: 5node-dist 5node-dist-metadata
# Test that the distSQL MergeJoiner follows SQL NULL semantics for ON predicate
# equivilance. The use of sorts here force the planning of merge join.
statement ok
CREATE TABLE distsql_mj_test (k INT, v INT)
statement ok
INSERT INTO distsql_mj_test VALUES (0, NULL), (0, 1), (2, 4), (NULL, 4)
# If SQL NULL semantics are not followed, NULL = NULL is truthy. This makes the rows with NULL also appear in the inner join.
query IIII rowsort
SELECT l.k, l.v, r.k, r.v FROM (SELECT * FROM distsql_mj_test ORDER BY k, v) l INNER JOIN (SELECT * FROM distsql_mj_test ORDER BY k, v) r ON l.k = r.k AND l.v = r.v
----
0 1 0 1
2 4 2 4
statement ok
DELETE FROM distsql_mj_test WHERE TRUE;
statement ok
INSERT INTO distsql_mj_test VALUES (0, NULL), (1, NULL), (2, NULL)
# We should not have any results for values with NULLs
query IIII rowsort
SELECT l.k, l.v, r.k, r.v FROM (SELECT * FROM distsql_mj_test ORDER BY k, v) l INNER JOIN (SELECT * FROM distsql_mj_test ORDER BY k, v) r ON l.k = r.k AND l.v = r.v
----
statement ok
DELETE FROM distsql_mj_test WHERE TRUE;
statement ok
INSERT INTO distsql_mj_test VALUES (NULL)
# We shouldn't expect a row of (NULL, NULL), otherwise NULL = NULL was joined.
query II rowsort
SELECT l.k, r.k FROM (SELECT * FROM distsql_mj_test ORDER BY k) l INNER JOIN (SELECT * FROM distsql_mj_test ORDER BY k) r ON l.k = r.k
----
# Regression test for #23001.
statement ok
CREATE TABLE tab0(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER);
statement ok
INSERT INTO tab0 VALUES(0,1,2);
statement ok
CREATE INDEX on tab0 (a);
query III
SELECT pk, a, b FROM tab0 WHERE a < 10 AND b = 2 ORDER BY a DESC, pk;
----
0 1 2
query T
SELECT feature_name FROM crdb_internal.feature_usage WHERE feature_name='sql.exec.query.is-distributed' AND usage_count > 0
----
sql.exec.query.is-distributed
| pkg/sql/logictest/testdata/logic_test/distsql_join | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.00019925340893678367,
0.00017119520634878427,
0.0001636785309528932,
0.00016744538152124733,
0.000011605166946537793
] |
{
"id": 3,
"code_window": [
"\tInvalidFunctionDefinition = \"42P13\"\n",
"\tInvalidPreparedStatementDefinition = \"42P14\"\n",
"\tInvalidSchemaDefinition = \"42P15\"\n",
"\tInvalidTableDefinition = \"42P16\"\n",
"\tInvalidObjectDefinition = \"42P17\"\n",
"\t// Class 44 - WITH CHECK OPTION Violation\n",
"\tWithCheckOptionViolation = \"44000\"\n",
"\t// Class 53 - Insufficient Resources\n",
"\tInsufficientResources = \"53000\"\n",
"\tDiskFull = \"53100\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tFileAlreadyExists = \"42C01\"\n"
],
"file_path": "pkg/sql/pgwire/pgcode/codes.go",
"type": "add",
"edit_start_line_idx": 224
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package base_test
import (
"context"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
)
func TestNodeIDContainer(t *testing.T) {
defer leaktest.AfterTest(t)()
n := &base.NodeIDContainer{}
if val := n.Get(); val != 0 {
t.Errorf("initial value should be 0, not %d", val)
}
if str := n.String(); str != "?" {
t.Errorf("initial string should be ?, not %s", str)
}
for i := 0; i < 2; i++ {
n.Set(context.TODO(), 5)
if val := n.Get(); val != 5 {
t.Errorf("value should be 5, not %d", val)
}
if str := n.String(); str != "5" {
t.Errorf("string should be 5, not %s", str)
}
}
n.Reset(6)
if val := n.Get(); val != 6 {
t.Errorf("value should be 6, not %d", val)
}
if str := n.String(); str != "6" {
t.Errorf("string should be 6, not %s", str)
}
}
| pkg/base/node_id_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.00017814419697970152,
0.0001719400897854939,
0.00016383390175178647,
0.00017442405805923045,
0.000005032490662415512
] |
{
"id": 3,
"code_window": [
"\tInvalidFunctionDefinition = \"42P13\"\n",
"\tInvalidPreparedStatementDefinition = \"42P14\"\n",
"\tInvalidSchemaDefinition = \"42P15\"\n",
"\tInvalidTableDefinition = \"42P16\"\n",
"\tInvalidObjectDefinition = \"42P17\"\n",
"\t// Class 44 - WITH CHECK OPTION Violation\n",
"\tWithCheckOptionViolation = \"44000\"\n",
"\t// Class 53 - Insufficient Resources\n",
"\tInsufficientResources = \"53000\"\n",
"\tDiskFull = \"53100\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tFileAlreadyExists = \"42C01\"\n"
],
"file_path": "pkg/sql/pgwire/pgcode/codes.go",
"type": "add",
"edit_start_line_idx": 224
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// +build slow
package main
func init() {
slow = true
}
| pkg/cmd/publish-artifacts/slow_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/431d18dfb53cdd7d53befeb15dc30dcc5d01a531 | [
0.00017807763651944697,
0.00017023092368617654,
0.00016238422540482134,
0.00017023092368617654,
0.000007846705557312816
] |
{
"id": 0,
"code_window": [
"\n",
"\treturn c.OSCommand.RunCommand(\"git apply %s %s\", flagStr, c.OSCommand.Quote(filepath))\n",
"}\n",
"\n",
"func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string) error {\n",
"\treturn c.OSCommand.RunCommand(\"git fetch %s %s:%s\", remoteName, remoteBranchName, branchName)\n",
"}\n",
"\n",
"func (c *GitCommand) RunSkipEditorCommand(command string) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string, promptUserForCredential func(string) string) error {\n",
"\tcommand := fmt.Sprintf(\"git fetch %s %s:%s\", remoteName, remoteBranchName, branchName)\n",
"\treturn c.OSCommand.DetectUnamePass(command, promptUserForCredential)\n"
],
"file_path": "pkg/commands/git.go",
"type": "replace",
"edit_start_line_idx": 793
} | package gui
import (
"fmt"
"strings"
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/commands"
"github.com/jesseduffield/lazygit/pkg/gui/presentation"
"github.com/jesseduffield/lazygit/pkg/utils"
)
// list panel functions
func (gui *Gui) getSelectedBranch() *commands.Branch {
selectedLine := gui.State.Panels.Branches.SelectedLine
if selectedLine == -1 {
return nil
}
return gui.State.Branches[selectedLine]
}
// may want to standardise how these select methods work
func (gui *Gui) handleBranchSelect(g *gocui.Gui, v *gocui.View) error {
if gui.popupPanelFocused() {
return nil
}
gui.State.SplitMainPanel = false
if _, err := gui.g.SetCurrentView(v.Name()); err != nil {
return err
}
gui.getMainView().Title = "Log"
// This really shouldn't happen: there should always be a master branch
if len(gui.State.Branches) == 0 {
return gui.newStringTask("main", gui.Tr.SLocalize("NoBranchesThisRepo"))
}
branch := gui.getSelectedBranch()
v.FocusPoint(0, gui.State.Panels.Branches.SelectedLine)
if gui.inDiffMode() {
return gui.renderDiff()
}
cmd := gui.OSCommand.ExecutableFromString(
gui.GitCommand.GetBranchGraphCmdStr(branch.Name),
)
if err := gui.newCmdTask("main", cmd); err != nil {
gui.Log.Error(err)
}
return nil
}
// gui.refreshStatus is called at the end of this because that's when we can
// be sure there is a state.Branches array to pick the current branch from
func (gui *Gui) refreshBranches() {
reflogCommits := gui.State.FilteredReflogCommits
if gui.inFilterMode() {
// in filter mode we filter our reflog commits to just those containing the path
// however we need all the reflog entries to populate the recencies of our branches
// which allows us to order them correctly. So if we're filtering we'll just
// manually load all the reflog commits here
var err error
reflogCommits, _, err = gui.GitCommand.GetReflogCommits(nil, "")
if err != nil {
gui.Log.Error(err)
}
}
builder, err := commands.NewBranchListBuilder(gui.Log, gui.GitCommand, reflogCommits)
if err != nil {
_ = gui.surfaceError(err)
}
gui.State.Branches = builder.Build()
// TODO: if we're in the remotes view and we've just deleted a remote we need to refresh accordingly
if gui.getBranchesView().Context == "local-branches" {
_ = gui.renderLocalBranchesWithSelection()
}
gui.refreshStatus()
}
func (gui *Gui) renderLocalBranchesWithSelection() error {
branchesView := gui.getBranchesView()
gui.refreshSelectedLine(&gui.State.Panels.Branches.SelectedLine, len(gui.State.Branches))
displayStrings := presentation.GetBranchListDisplayStrings(gui.State.Branches, gui.State.ScreenMode != SCREEN_NORMAL, gui.State.Diff.Ref)
gui.renderDisplayStrings(branchesView, displayStrings)
if gui.g.CurrentView() == branchesView {
if err := gui.handleBranchSelect(gui.g, branchesView); err != nil {
return gui.surfaceError(err)
}
}
return nil
}
// specific functions
func (gui *Gui) handleBranchPress(g *gocui.Gui, v *gocui.View) error {
if gui.State.Panels.Branches.SelectedLine == -1 {
return nil
}
if gui.State.Panels.Branches.SelectedLine == 0 {
return gui.createErrorPanel(gui.Tr.SLocalize("AlreadyCheckedOutBranch"))
}
branch := gui.getSelectedBranch()
return gui.handleCheckoutRef(branch.Name, handleCheckoutRefOptions{})
}
func (gui *Gui) handleCreatePullRequestPress(g *gocui.Gui, v *gocui.View) error {
pullRequest := commands.NewPullRequest(gui.GitCommand)
branch := gui.getSelectedBranch()
if err := pullRequest.Create(branch); err != nil {
return gui.surfaceError(err)
}
return nil
}
func (gui *Gui) handleGitFetch(g *gocui.Gui, v *gocui.View) error {
if err := gui.createLoaderPanel(gui.g, v, gui.Tr.SLocalize("FetchWait")); err != nil {
return err
}
go func() {
err := gui.fetch(true)
gui.handleCredentialsPopup(err)
_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}()
return nil
}
func (gui *Gui) handleForceCheckout(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
message := gui.Tr.SLocalize("SureForceCheckout")
title := gui.Tr.SLocalize("ForceCheckoutBranch")
return gui.createConfirmationPanel(g, v, true, title, message, func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.Checkout(branch.Name, commands.CheckoutOptions{Force: true}); err != nil {
_ = gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}, nil)
}
type handleCheckoutRefOptions struct {
WaitingStatus string
EnvVars []string
onRefNotFound func(ref string) error
}
func (gui *Gui) handleCheckoutRef(ref string, options handleCheckoutRefOptions) error {
waitingStatus := options.WaitingStatus
if waitingStatus == "" {
waitingStatus = gui.Tr.SLocalize("CheckingOutStatus")
}
cmdOptions := commands.CheckoutOptions{Force: false, EnvVars: options.EnvVars}
onSuccess := func() {
gui.State.Panels.Branches.SelectedLine = 0
gui.State.Panels.Commits.SelectedLine = 0
// loading a heap of commits is slow so we limit them whenever doing a reset
gui.State.Panels.Commits.LimitCommits = true
}
return gui.WithWaitingStatus(waitingStatus, func() error {
if err := gui.GitCommand.Checkout(ref, cmdOptions); err != nil {
// note, this will only work for english-language git commands. If we force git to use english, and the error isn't this one, then the user will receive an english command they may not understand. I'm not sure what the best solution to this is. Running the command once in english and a second time in the native language is one option
if options.onRefNotFound != nil && strings.Contains(err.Error(), "did not match any file(s) known to git") {
return options.onRefNotFound(ref)
}
if strings.Contains(err.Error(), "Please commit your changes or stash them before you switch branch") {
// offer to autostash changes
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("AutoStashTitle"), gui.Tr.SLocalize("AutoStashPrompt"), func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.StashSave(gui.Tr.SLocalize("StashPrefix") + ref); err != nil {
return gui.surfaceError(err)
}
if err := gui.GitCommand.Checkout(ref, cmdOptions); err != nil {
return gui.surfaceError(err)
}
onSuccess()
if err := gui.GitCommand.StashDo(0, "pop"); err != nil {
if err := gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI}); err != nil {
return err
}
return gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI})
}, nil)
}
if err := gui.surfaceError(err); err != nil {
return err
}
}
onSuccess()
return gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI})
})
}
func (gui *Gui) handleCheckoutByName(g *gocui.Gui, v *gocui.View) error {
return gui.createPromptPanel(g, v, gui.Tr.SLocalize("BranchName")+":", "", func(g *gocui.Gui, v *gocui.View) error {
return gui.handleCheckoutRef(gui.trimmedContent(v), handleCheckoutRefOptions{
onRefNotFound: func(ref string) error {
return gui.createConfirmationPanel(gui.g, v, true, gui.Tr.SLocalize("BranchNotFoundTitle"), fmt.Sprintf("%s %s%s", gui.Tr.SLocalize("BranchNotFoundPrompt"), ref, "?"), func(_g *gocui.Gui, _v *gocui.View) error {
return gui.createNewBranchWithName(ref)
}, nil)
},
})
})
}
func (gui *Gui) getCheckedOutBranch() *commands.Branch {
if len(gui.State.Branches) == 0 {
return nil
}
return gui.State.Branches[0]
}
func (gui *Gui) handleNewBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
message := gui.Tr.TemplateLocalize(
"NewBranchNameBranchOff",
Teml{
"branchName": branch.Name,
},
)
return gui.createPromptPanel(g, v, message, "", func(g *gocui.Gui, v *gocui.View) error {
return gui.createNewBranchWithName(gui.trimmedContent(v))
})
}
func (gui *Gui) createNewBranchWithName(newBranchName string) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
if err := gui.GitCommand.NewBranch(newBranchName, branch.Name); err != nil {
return gui.surfaceError(err)
}
gui.State.Panels.Branches.SelectedLine = 0
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}
func (gui *Gui) handleDeleteBranch(g *gocui.Gui, v *gocui.View) error {
return gui.deleteBranch(g, v, false)
}
func (gui *Gui) deleteBranch(g *gocui.Gui, v *gocui.View, force bool) error {
selectedBranch := gui.getSelectedBranch()
if selectedBranch == nil {
return nil
}
checkedOutBranch := gui.getCheckedOutBranch()
if checkedOutBranch.Name == selectedBranch.Name {
return gui.createErrorPanel(gui.Tr.SLocalize("CantDeleteCheckOutBranch"))
}
return gui.deleteNamedBranch(g, v, selectedBranch, force)
}
func (gui *Gui) deleteNamedBranch(g *gocui.Gui, v *gocui.View, selectedBranch *commands.Branch, force bool) error {
title := gui.Tr.SLocalize("DeleteBranch")
var messageID string
if force {
messageID = "ForceDeleteBranchMessage"
} else {
messageID = "DeleteBranchMessage"
}
message := gui.Tr.TemplateLocalize(
messageID,
Teml{
"selectedBranchName": selectedBranch.Name,
},
)
return gui.createConfirmationPanel(g, v, true, title, message, func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.DeleteBranch(selectedBranch.Name, force); err != nil {
errMessage := err.Error()
if !force && strings.Contains(errMessage, "is not fully merged") {
return gui.deleteNamedBranch(g, v, selectedBranch, true)
}
return gui.createErrorPanel(errMessage)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})
}, nil)
}
func (gui *Gui) mergeBranchIntoCheckedOutBranch(branchName string) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
if gui.GitCommand.IsHeadDetached() {
return gui.createErrorPanel("Cannot merge branch in detached head state. You might have checked out a commit directly or a remote branch, in which case you should checkout the local branch you want to be on")
}
checkedOutBranchName := gui.getCheckedOutBranch().Name
if checkedOutBranchName == branchName {
return gui.createErrorPanel(gui.Tr.SLocalize("CantMergeBranchIntoItself"))
}
prompt := gui.Tr.TemplateLocalize(
"ConfirmMerge",
Teml{
"checkedOutBranch": checkedOutBranchName,
"selectedBranch": branchName,
},
)
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("MergingTitle"), prompt,
func(g *gocui.Gui, v *gocui.View) error {
err := gui.GitCommand.Merge(branchName, commands.MergeOpts{})
return gui.handleGenericMergeCommandResult(err)
}, nil)
}
func (gui *Gui) handleMerge(g *gocui.Gui, v *gocui.View) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
selectedBranchName := gui.getSelectedBranch().Name
return gui.mergeBranchIntoCheckedOutBranch(selectedBranchName)
}
func (gui *Gui) handleRebaseOntoLocalBranch(g *gocui.Gui, v *gocui.View) error {
selectedBranchName := gui.getSelectedBranch().Name
return gui.handleRebaseOntoBranch(selectedBranchName)
}
func (gui *Gui) handleRebaseOntoBranch(selectedBranchName string) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
checkedOutBranch := gui.getCheckedOutBranch().Name
if selectedBranchName == checkedOutBranch {
return gui.createErrorPanel(gui.Tr.SLocalize("CantRebaseOntoSelf"))
}
prompt := gui.Tr.TemplateLocalize(
"ConfirmRebase",
Teml{
"checkedOutBranch": checkedOutBranch,
"selectedBranch": selectedBranchName,
},
)
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("RebasingTitle"), prompt,
func(g *gocui.Gui, v *gocui.View) error {
err := gui.GitCommand.RebaseBranch(selectedBranchName)
return gui.handleGenericMergeCommandResult(err)
}, nil)
}
func (gui *Gui) handleFastForward(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
if branch.Pushables == "" {
return nil
}
if branch.Pushables == "?" {
return gui.createErrorPanel(gui.Tr.SLocalize("FwdNoUpstream"))
}
if branch.Pushables != "0" {
return gui.createErrorPanel(gui.Tr.SLocalize("FwdCommitsToPush"))
}
upstream, err := gui.GitCommand.GetUpstreamForBranch(branch.Name)
if err != nil {
return gui.surfaceError(err)
}
split := strings.Split(upstream, "/")
remoteName := split[0]
remoteBranchName := strings.Join(split[1:], "/")
message := gui.Tr.TemplateLocalize(
"Fetching",
Teml{
"from": fmt.Sprintf("%s/%s", remoteName, remoteBranchName),
"to": branch.Name,
},
)
go func() {
_ = gui.createLoaderPanel(gui.g, v, message)
if gui.State.Panels.Branches.SelectedLine == 0 {
_ = gui.pullWithMode("ff-only", PullFilesOptions{})
return
} else {
if err := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName); err != nil {
_ = gui.surfaceError(err)
return
}
_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})
}
_ = gui.closeConfirmationPrompt(gui.g, true)
}()
return nil
}
func (gui *Gui) onBranchesTabClick(tabIndex int) error {
contexts := []string{"local-branches", "remotes", "tags"}
branchesView := gui.getBranchesView()
branchesView.TabIndex = tabIndex
return gui.switchBranchesPanelContext(contexts[tabIndex])
}
func (gui *Gui) switchBranchesPanelContext(context string) error {
branchesView := gui.getBranchesView()
branchesView.Context = context
if err := gui.onSearchEscape(); err != nil {
return err
}
contextTabIndexMap := map[string]int{
"local-branches": 0,
"remotes": 1,
"remote-branches": 1,
"tags": 2,
}
branchesView.TabIndex = contextTabIndexMap[context]
return gui.refreshBranchesViewWithSelection()
}
func (gui *Gui) refreshBranchesViewWithSelection() error {
branchesView := gui.getBranchesView()
switch branchesView.Context {
case "local-branches":
return gui.renderLocalBranchesWithSelection()
case "remotes":
return gui.renderRemotesWithSelection()
case "remote-branches":
return gui.renderRemoteBranchesWithSelection()
case "tags":
return gui.renderTagsWithSelection()
}
return nil
}
func (gui *Gui) handleNextBranchesTab(g *gocui.Gui, v *gocui.View) error {
return gui.onBranchesTabClick(
utils.ModuloWithWrap(v.TabIndex+1, len(v.Tabs)),
)
}
func (gui *Gui) handlePrevBranchesTab(g *gocui.Gui, v *gocui.View) error {
return gui.onBranchesTabClick(
utils.ModuloWithWrap(v.TabIndex-1, len(v.Tabs)),
)
}
func (gui *Gui) handleCreateResetToBranchMenu(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
return gui.createResetMenu(branch.Name)
}
func (gui *Gui) onBranchesPanelSearchSelect(selectedLine int) error {
branchesView := gui.getBranchesView()
switch branchesView.Context {
case "local-branches":
gui.State.Panels.Branches.SelectedLine = selectedLine
return gui.handleBranchSelect(gui.g, branchesView)
case "remotes":
gui.State.Panels.Remotes.SelectedLine = selectedLine
return gui.handleRemoteSelect(gui.g, branchesView)
case "remote-branches":
gui.State.Panels.RemoteBranches.SelectedLine = selectedLine
return gui.handleRemoteBranchSelect(gui.g, branchesView)
}
return nil
}
func (gui *Gui) handleRenameBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
// TODO: find a way to not checkout the branch here if it's not the current branch (i.e. find some
// way to get it to show up in the reflog)
promptForNewName := func() error {
return gui.createPromptPanel(g, v, gui.Tr.SLocalize("NewBranchNamePrompt")+" "+branch.Name+":", "", func(g *gocui.Gui, v *gocui.View) error {
newName := gui.trimmedContent(v)
if err := gui.GitCommand.RenameBranch(branch.Name, newName); err != nil {
return gui.surfaceError(err)
}
// need to checkout so that the branch shows up in our reflog and therefore
// doesn't get lost among all the other branches when we switch to something else
if err := gui.GitCommand.Checkout(newName, commands.CheckoutOptions{Force: false}); err != nil {
return gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
})
}
// I could do an explicit check here for whether the branch is tracking a remote branch
// but if we've selected it we'll already know that via Pullables and Pullables.
// Bit of a hack but I'm lazy.
notTrackingRemote := branch.Pullables == "?"
if notTrackingRemote {
return promptForNewName()
}
return gui.createConfirmationPanel(gui.g, v, true, gui.Tr.SLocalize("renameBranch"), gui.Tr.SLocalize("RenameBranchWarning"), func(_g *gocui.Gui, _v *gocui.View) error {
return promptForNewName()
}, nil)
}
func (gui *Gui) currentBranch() *commands.Branch {
if len(gui.State.Branches) == 0 {
return nil
}
return gui.State.Branches[0]
}
func (gui *Gui) handleClipboardCopyBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
return gui.OSCommand.CopyToClipboard(branch.Name)
}
| pkg/gui/branches_panel.go | 1 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.998767614364624,
0.03802431374788284,
0.00016844767378643155,
0.0004903953522443771,
0.18551947176456451
] |
{
"id": 0,
"code_window": [
"\n",
"\treturn c.OSCommand.RunCommand(\"git apply %s %s\", flagStr, c.OSCommand.Quote(filepath))\n",
"}\n",
"\n",
"func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string) error {\n",
"\treturn c.OSCommand.RunCommand(\"git fetch %s %s:%s\", remoteName, remoteBranchName, branchName)\n",
"}\n",
"\n",
"func (c *GitCommand) RunSkipEditorCommand(command string) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string, promptUserForCredential func(string) string) error {\n",
"\tcommand := fmt.Sprintf(\"git fetch %s %s:%s\", remoteName, remoteBranchName, branchName)\n",
"\treturn c.OSCommand.DetectUnamePass(command, promptUserForCredential)\n"
],
"file_path": "pkg/commands/git.go",
"type": "replace",
"edit_start_line_idx": 793
} | // Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin,386,!go1.12
package unix
//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64
| vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.00017472950275987387,
0.00017472950275987387,
0.00017472950275987387,
0.00017472950275987387,
0
] |
{
"id": 0,
"code_window": [
"\n",
"\treturn c.OSCommand.RunCommand(\"git apply %s %s\", flagStr, c.OSCommand.Quote(filepath))\n",
"}\n",
"\n",
"func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string) error {\n",
"\treturn c.OSCommand.RunCommand(\"git fetch %s %s:%s\", remoteName, remoteBranchName, branchName)\n",
"}\n",
"\n",
"func (c *GitCommand) RunSkipEditorCommand(command string) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string, promptUserForCredential func(string) string) error {\n",
"\tcommand := fmt.Sprintf(\"git fetch %s %s:%s\", remoteName, remoteBranchName, branchName)\n",
"\treturn c.OSCommand.DetectUnamePass(command, promptUserForCredential)\n"
],
"file_path": "pkg/commands/git.go",
"type": "replace",
"edit_start_line_idx": 793
} | # This is an example goreleaser.yaml file with some sane defaults.
# Make sure to check the documentation at http://goreleaser.com
builds:
- env:
- CGO_ENABLED=0
goos:
- freebsd
- windows
- darwin
- linux
goarch:
- amd64
- arm
- arm64
- 386
# Default is `-s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}}`.
ldflags:
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} -X main.buildSource=binaryRelease
archives:
- replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: 32-bit
amd64: x86_64
format_overrides:
- goos: windows
format: zip
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: '{{ .Tag }}-next'
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
- '^bump'
brews:
-
# Repository to push the tap to.
tap:
owner: jesseduffield
name: homebrew-lazygit
# Your app's homepage.
# Default is empty.
homepage: 'https://github.com/jesseduffield/lazygit/'
# Your app's description.
# Default is empty.
description: 'A simple terminal UI for git commands, written in Go'
# # Packages your package depends on.
# dependencies:
# - git
# - zsh
# # Packages that conflict with your package.
# conflicts:
# - svn
# - bash
| .goreleaser.yml | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.0002243573107989505,
0.00017829796706791967,
0.00016631383914500475,
0.00017074802599381655,
0.000018915498003480025
] |
{
"id": 0,
"code_window": [
"\n",
"\treturn c.OSCommand.RunCommand(\"git apply %s %s\", flagStr, c.OSCommand.Quote(filepath))\n",
"}\n",
"\n",
"func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string) error {\n",
"\treturn c.OSCommand.RunCommand(\"git fetch %s %s:%s\", remoteName, remoteBranchName, branchName)\n",
"}\n",
"\n",
"func (c *GitCommand) RunSkipEditorCommand(command string) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string, promptUserForCredential func(string) string) error {\n",
"\tcommand := fmt.Sprintf(\"git fetch %s %s:%s\", remoteName, remoteBranchName, branchName)\n",
"\treturn c.OSCommand.DetectUnamePass(command, promptUserForCredential)\n"
],
"file_path": "pkg/commands/git.go",
"type": "replace",
"edit_start_line_idx": 793
} | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
// +build arm,darwin
#include "textflag.h"
//
// System call support for ARM, Darwin
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
B syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB)
| vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.0002997243427671492,
0.00020456781203392893,
0.00016792872338555753,
0.00017530907643958926,
0.00005503532156581059
] |
{
"id": 1,
"code_window": [
"\tgo func() {\n",
"\t\t_ = gui.createLoaderPanel(gui.g, v, message)\n",
"\n",
"\t\tif gui.State.Panels.Branches.SelectedLine == 0 {\n",
"\t\t\t_ = gui.pullWithMode(\"ff-only\", PullFilesOptions{})\n",
"\t\t\treturn\n",
"\t\t} else {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 402
} | package commands
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/mgutz/str"
"github.com/go-errors/errors"
gogit "github.com/go-git/go-git/v5"
"github.com/jesseduffield/lazygit/pkg/config"
"github.com/jesseduffield/lazygit/pkg/i18n"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/sirupsen/logrus"
gitconfig "github.com/tcnksm/go-gitconfig"
)
// this takes something like:
// * (HEAD detached at 264fc6f5)
// remotes
// and returns '264fc6f5' as the second match
const CurrentBranchNameRegex = `(?m)^\*.*?([^ ]*?)\)?$`
func verifyInGitRepo(runCmd func(string, ...interface{}) error) error {
return runCmd("git status")
}
func navigateToRepoRootDirectory(stat func(string) (os.FileInfo, error), chdir func(string) error) error {
for {
_, err := stat(".git")
if err == nil {
return nil
}
if !os.IsNotExist(err) {
return WrapError(err)
}
if err = chdir(".."); err != nil {
return WrapError(err)
}
}
}
func setupRepositoryAndWorktree(openGitRepository func(string) (*gogit.Repository, error), sLocalize func(string) string) (repository *gogit.Repository, worktree *gogit.Worktree, err error) {
repository, err = openGitRepository(".")
if err != nil {
if strings.Contains(err.Error(), `unquoted '\' must be followed by new line`) {
return nil, nil, errors.New(sLocalize("GitconfigParseErr"))
}
return
}
worktree, err = repository.Worktree()
if err != nil {
return
}
return
}
// GitCommand is our main git interface
type GitCommand struct {
Log *logrus.Entry
OSCommand *OSCommand
Worktree *gogit.Worktree
Repo *gogit.Repository
Tr *i18n.Localizer
Config config.AppConfigurer
getGlobalGitConfig func(string) (string, error)
getLocalGitConfig func(string) (string, error)
removeFile func(string) error
DotGitDir string
onSuccessfulContinue func() error
PatchManager *PatchManager
// Push to current determines whether the user has configured to push to the remote branch of the same name as the current or not
PushToCurrent bool
}
// NewGitCommand it runs git commands
func NewGitCommand(log *logrus.Entry, osCommand *OSCommand, tr *i18n.Localizer, config config.AppConfigurer) (*GitCommand, error) {
var worktree *gogit.Worktree
var repo *gogit.Repository
// see what our default push behaviour is
output, err := osCommand.RunCommandWithOutput("git config --get push.default")
pushToCurrent := false
if err != nil {
log.Errorf("error reading git config: %v", err)
} else {
pushToCurrent = strings.TrimSpace(output) == "current"
}
fs := []func() error{
func() error {
return verifyInGitRepo(osCommand.RunCommand)
},
func() error {
return navigateToRepoRootDirectory(os.Stat, os.Chdir)
},
func() error {
var err error
repo, worktree, err = setupRepositoryAndWorktree(gogit.PlainOpen, tr.SLocalize)
return err
},
}
for _, f := range fs {
if err := f(); err != nil {
return nil, err
}
}
dotGitDir, err := findDotGitDir(os.Stat, ioutil.ReadFile)
if err != nil {
return nil, err
}
gitCommand := &GitCommand{
Log: log,
OSCommand: osCommand,
Tr: tr,
Worktree: worktree,
Repo: repo,
Config: config,
getGlobalGitConfig: gitconfig.Global,
getLocalGitConfig: gitconfig.Local,
removeFile: os.RemoveAll,
DotGitDir: dotGitDir,
PushToCurrent: pushToCurrent,
}
gitCommand.PatchManager = NewPatchManager(log, gitCommand.ApplyPatch)
return gitCommand, nil
}
func findDotGitDir(stat func(string) (os.FileInfo, error), readFile func(filename string) ([]byte, error)) (string, error) {
f, err := stat(".git")
if err != nil {
return "", err
}
if f.IsDir() {
return ".git", nil
}
fileBytes, err := readFile(".git")
if err != nil {
return "", err
}
fileContent := string(fileBytes)
if !strings.HasPrefix(fileContent, "gitdir: ") {
return "", errors.New(".git is a file which suggests we are in a submodule but the file's contents do not contain a gitdir pointing to the actual .git directory")
}
return strings.TrimSpace(strings.TrimPrefix(fileContent, "gitdir: ")), nil
}
func (c *GitCommand) getUnfilteredStashEntries() []*StashEntry {
unescaped := "git stash list --pretty='%gs'"
rawString, _ := c.OSCommand.RunCommandWithOutput(unescaped)
stashEntries := []*StashEntry{}
for i, line := range utils.SplitLines(rawString) {
stashEntries = append(stashEntries, stashEntryFromLine(line, i))
}
return stashEntries
}
// GetStashEntries stash entries
func (c *GitCommand) GetStashEntries(filterPath string) []*StashEntry {
if filterPath == "" {
return c.getUnfilteredStashEntries()
}
unescaped := fmt.Sprintf("git stash list --name-only")
rawString, err := c.OSCommand.RunCommandWithOutput(unescaped)
if err != nil {
return c.getUnfilteredStashEntries()
}
stashEntries := []*StashEntry{}
var currentStashEntry *StashEntry
lines := utils.SplitLines(rawString)
isAStash := func(line string) bool { return strings.HasPrefix(line, "stash@{") }
re := regexp.MustCompile(`stash@\{(\d+)\}`)
outer:
for i := 0; i < len(lines); i++ {
if !isAStash(lines[i]) {
continue
}
match := re.FindStringSubmatch(lines[i])
idx, err := strconv.Atoi(match[1])
if err != nil {
return c.getUnfilteredStashEntries()
}
currentStashEntry = stashEntryFromLine(lines[i], idx)
for i+1 < len(lines) && !isAStash(lines[i+1]) {
i++
if lines[i] == filterPath {
stashEntries = append(stashEntries, currentStashEntry)
continue outer
}
}
}
return stashEntries
}
func stashEntryFromLine(line string, index int) *StashEntry {
return &StashEntry{
Name: line,
Index: index,
}
}
// GetStashEntryDiff stash diff
func (c *GitCommand) ShowStashEntryCmdStr(index int) string {
return fmt.Sprintf("git stash show -p --color=%s stash@{%d}", c.colorArg(), index)
}
// GetStatusFiles git status files
type GetStatusFileOptions struct {
NoRenames bool
}
func (c *GitCommand) GetStatusFiles(opts GetStatusFileOptions) []*File {
statusOutput, _ := c.GitStatus(GitStatusOptions{NoRenames: opts.NoRenames})
statusStrings := utils.SplitLines(statusOutput)
files := []*File{}
for _, statusString := range statusStrings {
change := statusString[0:2]
stagedChange := change[0:1]
unstagedChange := statusString[1:2]
filename := c.OSCommand.Unquote(statusString[3:])
untracked := utils.IncludesString([]string{"??", "A ", "AM"}, change)
hasNoStagedChanges := utils.IncludesString([]string{" ", "U", "?"}, stagedChange)
hasMergeConflicts := utils.IncludesString([]string{"DD", "AA", "UU", "AU", "UA", "UD", "DU"}, change)
hasInlineMergeConflicts := utils.IncludesString([]string{"UU", "AA"}, change)
file := &File{
Name: filename,
DisplayString: statusString,
HasStagedChanges: !hasNoStagedChanges,
HasUnstagedChanges: unstagedChange != " ",
Tracked: !untracked,
Deleted: unstagedChange == "D" || stagedChange == "D",
HasMergeConflicts: hasMergeConflicts,
HasInlineMergeConflicts: hasInlineMergeConflicts,
Type: c.OSCommand.FileType(filename),
ShortStatus: change,
}
files = append(files, file)
}
return files
}
// StashDo modify stash
func (c *GitCommand) StashDo(index int, method string) error {
return c.OSCommand.RunCommand("git stash %s stash@{%d}", method, index)
}
// StashSave save stash
// TODO: before calling this, check if there is anything to save
func (c *GitCommand) StashSave(message string) error {
return c.OSCommand.RunCommand("git stash save %s", c.OSCommand.Quote(message))
}
// MergeStatusFiles merge status files
func (c *GitCommand) MergeStatusFiles(oldFiles, newFiles []*File, selectedFile *File) []*File {
if len(oldFiles) == 0 {
return newFiles
}
appendedIndexes := []int{}
// retain position of files we already could see
result := []*File{}
for _, oldFile := range oldFiles {
for newIndex, newFile := range newFiles {
if includesInt(appendedIndexes, newIndex) {
continue
}
// if we just staged B and in doing so created 'A -> B' and we are currently have oldFile: A and newFile: 'A -> B', we want to wait until we come across B so the our cursor isn't jumping anywhere
waitForMatchingFile := selectedFile != nil && newFile.IsRename() && !selectedFile.IsRename() && newFile.Matches(selectedFile) && !oldFile.Matches(selectedFile)
if oldFile.Matches(newFile) && !waitForMatchingFile {
result = append(result, newFile)
appendedIndexes = append(appendedIndexes, newIndex)
}
}
}
// append any new files to the end
for index, newFile := range newFiles {
if !includesInt(appendedIndexes, index) {
result = append(result, newFile)
}
}
return result
}
func includesInt(list []int, a int) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
// ResetAndClean removes all unstaged changes and removes all untracked files
func (c *GitCommand) ResetAndClean() error {
if err := c.ResetHard("HEAD"); err != nil {
return err
}
return c.RemoveUntrackedFiles()
}
func (c *GitCommand) GetCurrentBranchUpstreamDifferenceCount() (string, string) {
return c.GetCommitDifferences("HEAD", "HEAD@{u}")
}
func (c *GitCommand) GetBranchUpstreamDifferenceCount(branchName string) (string, string) {
return c.GetCommitDifferences(branchName, branchName+"@{u}")
}
// GetCommitDifferences checks how many pushables/pullables there are for the
// current branch
func (c *GitCommand) GetCommitDifferences(from, to string) (string, string) {
command := "git rev-list %s..%s --count"
pushableCount, err := c.OSCommand.RunCommandWithOutput(command, to, from)
if err != nil {
return "?", "?"
}
pullableCount, err := c.OSCommand.RunCommandWithOutput(command, from, to)
if err != nil {
return "?", "?"
}
return strings.TrimSpace(pushableCount), strings.TrimSpace(pullableCount)
}
// RenameCommit renames the topmost commit with the given name
func (c *GitCommand) RenameCommit(name string) error {
return c.OSCommand.RunCommand("git commit --allow-empty --amend -m %s", c.OSCommand.Quote(name))
}
// RebaseBranch interactive rebases onto a branch
func (c *GitCommand) RebaseBranch(branchName string) error {
cmd, err := c.PrepareInteractiveRebaseCommand(branchName, "", false)
if err != nil {
return err
}
return c.OSCommand.RunPreparedCommand(cmd)
}
type FetchOptions struct {
PromptUserForCredential func(string) string
RemoteName string
BranchName string
}
// Fetch fetch git repo
func (c *GitCommand) Fetch(opts FetchOptions) error {
command := "git fetch"
if opts.RemoteName != "" {
command = fmt.Sprintf("%s %s", command, opts.RemoteName)
}
if opts.BranchName != "" {
command = fmt.Sprintf("%s %s", command, opts.BranchName)
}
return c.OSCommand.DetectUnamePass(command, func(question string) string {
if opts.PromptUserForCredential != nil {
return opts.PromptUserForCredential(question)
}
return "\n"
})
}
// ResetToCommit reset to commit
func (c *GitCommand) ResetToCommit(sha string, strength string, options RunCommandOptions) error {
return c.OSCommand.RunCommandWithOptions(fmt.Sprintf("git reset --%s %s", strength, sha), options)
}
// NewBranch create new branch
func (c *GitCommand) NewBranch(name string, baseBranch string) error {
return c.OSCommand.RunCommand("git checkout -b %s %s", name, baseBranch)
}
// CurrentBranchName get the current branch name and displayname.
// the first returned string is the name and the second is the displayname
// e.g. name is 123asdf and displayname is '(HEAD detached at 123asdf)'
func (c *GitCommand) CurrentBranchName() (string, string, error) {
branchName, err := c.OSCommand.RunCommandWithOutput("git symbolic-ref --short HEAD")
if err == nil && branchName != "HEAD\n" {
trimmedBranchName := strings.TrimSpace(branchName)
return trimmedBranchName, trimmedBranchName, nil
}
output, err := c.OSCommand.RunCommandWithOutput("git branch --contains")
if err != nil {
return "", "", err
}
for _, line := range utils.SplitLines(output) {
re := regexp.MustCompile(CurrentBranchNameRegex)
match := re.FindStringSubmatch(line)
if len(match) > 0 {
branchName = match[1]
displayBranchName := match[0][2:]
return branchName, displayBranchName, nil
}
}
return "HEAD", "HEAD", nil
}
// DeleteBranch delete branch
func (c *GitCommand) DeleteBranch(branch string, force bool) error {
command := "git branch -d"
if force {
command = "git branch -D"
}
return c.OSCommand.RunCommand("%s %s", command, branch)
}
// ListStash list stash
func (c *GitCommand) ListStash() (string, error) {
return c.OSCommand.RunCommandWithOutput("git stash list")
}
type MergeOpts struct {
FastForwardOnly bool
}
// Merge merge
func (c *GitCommand) Merge(branchName string, opts MergeOpts) error {
mergeArgs := c.Config.GetUserConfig().GetString("git.merging.args")
command := fmt.Sprintf("git merge --no-edit %s %s", mergeArgs, branchName)
if opts.FastForwardOnly {
command = fmt.Sprintf("%s --ff-only", command)
}
return c.OSCommand.RunCommand(command)
}
// AbortMerge abort merge
func (c *GitCommand) AbortMerge() error {
return c.OSCommand.RunCommand("git merge --abort")
}
// usingGpg tells us whether the user has gpg enabled so that we can know
// whether we need to run a subprocess to allow them to enter their password
func (c *GitCommand) usingGpg() bool {
overrideGpg := c.Config.GetUserConfig().GetBool("git.overrideGpg")
if overrideGpg {
return false
}
gpgsign, _ := c.getLocalGitConfig("commit.gpgsign")
if gpgsign == "" {
gpgsign, _ = c.getGlobalGitConfig("commit.gpgsign")
}
value := strings.ToLower(gpgsign)
return value == "true" || value == "1" || value == "yes" || value == "on"
}
// Commit commits to git
func (c *GitCommand) Commit(message string, flags string) (*exec.Cmd, error) {
command := fmt.Sprintf("git commit %s -m %s", flags, strconv.Quote(message))
if c.usingGpg() {
return c.OSCommand.ShellCommandFromString(command), nil
}
return nil, c.OSCommand.RunCommand(command)
}
// Get the subject of the HEAD commit
func (c *GitCommand) GetHeadCommitMessage() (string, error) {
cmdStr := "git log -1 --pretty=%s"
message, err := c.OSCommand.RunCommandWithOutput(cmdStr)
return strings.TrimSpace(message), err
}
// AmendHead amends HEAD with whatever is staged in your working tree
func (c *GitCommand) AmendHead() (*exec.Cmd, error) {
command := "git commit --amend --no-edit --allow-empty"
if c.usingGpg() {
return c.OSCommand.ShellCommandFromString(command), nil
}
return nil, c.OSCommand.RunCommand(command)
}
// Push pushes to a branch
func (c *GitCommand) Push(branchName string, force bool, upstream string, args string, promptUserForCredential func(string) string) error {
forceFlag := ""
if force {
forceFlag = "--force-with-lease"
}
setUpstreamArg := ""
if upstream != "" {
setUpstreamArg = "--set-upstream " + upstream
}
cmd := fmt.Sprintf("git push --follow-tags %s %s %s", forceFlag, setUpstreamArg, args)
return c.OSCommand.DetectUnamePass(cmd, promptUserForCredential)
}
// CatFile obtains the content of a file
func (c *GitCommand) CatFile(fileName string) (string, error) {
return c.OSCommand.RunCommandWithOutput("%s %s", c.OSCommand.Platform.catCmd, c.OSCommand.Quote(fileName))
}
// StageFile stages a file
func (c *GitCommand) StageFile(fileName string) error {
// renamed files look like "file1 -> file2"
fileNames := strings.Split(fileName, " -> ")
return c.OSCommand.RunCommand("git add %s", c.OSCommand.Quote(fileNames[len(fileNames)-1]))
}
// StageAll stages all files
func (c *GitCommand) StageAll() error {
return c.OSCommand.RunCommand("git add -A")
}
// UnstageAll stages all files
func (c *GitCommand) UnstageAll() error {
return c.OSCommand.RunCommand("git reset")
}
// UnStageFile unstages a file
func (c *GitCommand) UnStageFile(fileName string, tracked bool) error {
command := "git rm --cached %s"
if tracked {
command = "git reset HEAD %s"
}
// renamed files look like "file1 -> file2"
fileNames := strings.Split(fileName, " -> ")
for _, name := range fileNames {
if err := c.OSCommand.RunCommand(command, c.OSCommand.Quote(name)); err != nil {
return err
}
}
return nil
}
// GitStatus returns the plaintext short status of the repo
type GitStatusOptions struct {
NoRenames bool
}
func (c *GitCommand) GitStatus(opts GitStatusOptions) (string, error) {
noRenamesFlag := ""
if opts.NoRenames {
noRenamesFlag = "--no-renames"
}
return c.OSCommand.RunCommandWithOutput("git status --untracked-files=all --porcelain %s", noRenamesFlag)
}
// IsInMergeState states whether we are still mid-merge
func (c *GitCommand) IsInMergeState() (bool, error) {
return c.OSCommand.FileExists(fmt.Sprintf("%s/MERGE_HEAD", c.DotGitDir))
}
// RebaseMode returns "" for non-rebase mode, "normal" for normal rebase
// and "interactive" for interactive rebase
func (c *GitCommand) RebaseMode() (string, error) {
exists, err := c.OSCommand.FileExists(fmt.Sprintf("%s/rebase-apply", c.DotGitDir))
if err != nil {
return "", err
}
if exists {
return "normal", nil
}
exists, err = c.OSCommand.FileExists(fmt.Sprintf("%s/rebase-merge", c.DotGitDir))
if exists {
return "interactive", err
} else {
return "", err
}
}
func (c *GitCommand) BeforeAndAfterFileForRename(file *File) (*File, *File, error) {
if !file.IsRename() {
return nil, nil, errors.New("Expected renamed file")
}
// we've got a file that represents a rename from one file to another. Unfortunately
// our File abstraction fails to consider this case, so here we will refetch
// all files, passing the --no-renames flag and then recursively call the function
// again for the before file and after file. At some point we should fix the abstraction itself
split := strings.Split(file.Name, " -> ")
filesWithoutRenames := c.GetStatusFiles(GetStatusFileOptions{NoRenames: true})
var beforeFile *File
var afterFile *File
for _, f := range filesWithoutRenames {
if f.Name == split[0] {
beforeFile = f
}
if f.Name == split[1] {
afterFile = f
}
}
if beforeFile == nil || afterFile == nil {
return nil, nil, errors.New("Could not find deleted file or new file for file rename")
}
if beforeFile.IsRename() || afterFile.IsRename() {
// probably won't happen but we want to ensure we don't get an infinite loop
return nil, nil, errors.New("Nested rename found")
}
return beforeFile, afterFile, nil
}
// DiscardAllFileChanges directly
func (c *GitCommand) DiscardAllFileChanges(file *File) error {
if file.IsRename() {
beforeFile, afterFile, err := c.BeforeAndAfterFileForRename(file)
if err != nil {
return err
}
if err := c.DiscardAllFileChanges(beforeFile); err != nil {
return err
}
if err := c.DiscardAllFileChanges(afterFile); err != nil {
return err
}
return nil
}
// if the file isn't tracked, we assume you want to delete it
quotedFileName := c.OSCommand.Quote(file.Name)
if file.HasStagedChanges || file.HasMergeConflicts {
if err := c.OSCommand.RunCommand("git reset -- %s", quotedFileName); err != nil {
return err
}
}
if !file.Tracked {
return c.removeFile(file.Name)
}
return c.DiscardUnstagedFileChanges(file)
}
// DiscardUnstagedFileChanges directly
func (c *GitCommand) DiscardUnstagedFileChanges(file *File) error {
quotedFileName := c.OSCommand.Quote(file.Name)
return c.OSCommand.RunCommand("git checkout -- %s", quotedFileName)
}
// Checkout checks out a branch (or commit), with --force if you set the force arg to true
type CheckoutOptions struct {
Force bool
EnvVars []string
}
func (c *GitCommand) Checkout(branch string, options CheckoutOptions) error {
forceArg := ""
if options.Force {
forceArg = "--force "
}
return c.OSCommand.RunCommandWithOptions(fmt.Sprintf("git checkout %s %s", forceArg, branch), RunCommandOptions{EnvVars: options.EnvVars})
}
// PrepareCommitSubProcess prepares a subprocess for `git commit`
func (c *GitCommand) PrepareCommitSubProcess() *exec.Cmd {
return c.OSCommand.PrepareSubProcess("git", "commit")
}
// PrepareCommitAmendSubProcess prepares a subprocess for `git commit --amend --allow-empty`
func (c *GitCommand) PrepareCommitAmendSubProcess() *exec.Cmd {
return c.OSCommand.PrepareSubProcess("git", "commit", "--amend", "--allow-empty")
}
// GetBranchGraph gets the color-formatted graph of the log for the given branch
// Currently it limits the result to 100 commits, but when we get async stuff
// working we can do lazy loading
func (c *GitCommand) GetBranchGraph(branchName string) (string, error) {
cmdStr := c.GetBranchGraphCmdStr(branchName)
return c.OSCommand.RunCommandWithOutput(cmdStr)
}
func (c *GitCommand) GetUpstreamForBranch(branchName string) (string, error) {
output, err := c.OSCommand.RunCommandWithOutput("git rev-parse --abbrev-ref --symbolic-full-name %s@{u}", branchName)
return strings.TrimSpace(output), err
}
// Ignore adds a file to the gitignore for the repo
func (c *GitCommand) Ignore(filename string) error {
return c.OSCommand.AppendLineToFile(".gitignore", filename)
}
func (c *GitCommand) ShowCmdStr(sha string, filterPath string) string {
filterPathArg := ""
if filterPath != "" {
filterPathArg = fmt.Sprintf(" -- %s", c.OSCommand.Quote(filterPath))
}
return fmt.Sprintf("git show --color=%s --no-renames --stat -p %s %s", c.colorArg(), sha, filterPathArg)
}
func (c *GitCommand) GetBranchGraphCmdStr(branchName string) string {
branchLogCmdTemplate := c.Config.GetUserConfig().GetString("git.branchLogCmd")
templateValues := map[string]string{
"branchName": branchName,
}
return utils.ResolvePlaceholderString(branchLogCmdTemplate, templateValues)
}
// GetRemoteURL returns current repo remote url
func (c *GitCommand) GetRemoteURL() string {
url, _ := c.OSCommand.RunCommandWithOutput("git config --get remote.origin.url")
return utils.TrimTrailingNewline(url)
}
// CheckRemoteBranchExists Returns remote branch
func (c *GitCommand) CheckRemoteBranchExists(branch *Branch) bool {
_, err := c.OSCommand.RunCommandWithOutput(
"git show-ref --verify -- refs/remotes/origin/%s",
branch.Name,
)
return err == nil
}
// Diff returns the diff of a file
func (c *GitCommand) Diff(file *File, plain bool, cached bool) string {
// for now we assume an error means the file was deleted
s, _ := c.OSCommand.RunCommandWithOutput(c.DiffCmdStr(file, plain, cached))
return s
}
func (c *GitCommand) DiffCmdStr(file *File, plain bool, cached bool) string {
cachedArg := ""
trackedArg := "--"
colorArg := c.colorArg()
split := strings.Split(file.Name, " -> ") // in case of a renamed file we get the new filename
fileName := c.OSCommand.Quote(split[len(split)-1])
if cached {
cachedArg = "--cached"
}
if !file.Tracked && !file.HasStagedChanges && !cached {
trackedArg = "--no-index /dev/null"
}
if plain {
colorArg = "never"
}
return fmt.Sprintf("git diff --color=%s %s %s %s", colorArg, cachedArg, trackedArg, fileName)
}
func (c *GitCommand) ApplyPatch(patch string, flags ...string) error {
c.Log.Warn(patch)
filepath := filepath.Join(c.Config.GetUserConfigDir(), utils.GetCurrentRepoName(), time.Now().Format("Jan _2 15.04.05.000000000")+".patch")
if err := c.OSCommand.CreateFileWithContent(filepath, patch); err != nil {
return err
}
flagStr := ""
for _, flag := range flags {
flagStr += " --" + flag
}
return c.OSCommand.RunCommand("git apply %s %s", flagStr, c.OSCommand.Quote(filepath))
}
func (c *GitCommand) FastForward(branchName string, remoteName string, remoteBranchName string) error {
return c.OSCommand.RunCommand("git fetch %s %s:%s", remoteName, remoteBranchName, branchName)
}
func (c *GitCommand) RunSkipEditorCommand(command string) error {
cmd := c.OSCommand.ExecutableFromString(command)
lazyGitPath := c.OSCommand.GetLazygitPath()
cmd.Env = append(
cmd.Env,
"LAZYGIT_CLIENT_COMMAND=EXIT_IMMEDIATELY",
"GIT_EDITOR="+lazyGitPath,
"EDITOR="+lazyGitPath,
"VISUAL="+lazyGitPath,
)
return c.OSCommand.RunExecutable(cmd)
}
// GenericMerge takes a commandType of "merge" or "rebase" and a command of "abort", "skip" or "continue"
// By default we skip the editor in the case where a commit will be made
func (c *GitCommand) GenericMerge(commandType string, command string) error {
err := c.RunSkipEditorCommand(
fmt.Sprintf(
"git %s --%s",
commandType,
command,
),
)
if err != nil {
if !strings.Contains(err.Error(), "no rebase in progress") {
return err
}
c.Log.Warn(err)
}
// sometimes we need to do a sequence of things in a rebase but the user needs to
// fix merge conflicts along the way. When this happens we queue up the next step
// so that after the next successful rebase continue we can continue from where we left off
if commandType == "rebase" && command == "continue" && c.onSuccessfulContinue != nil {
f := c.onSuccessfulContinue
c.onSuccessfulContinue = nil
return f()
}
if command == "abort" {
c.onSuccessfulContinue = nil
}
return nil
}
func (c *GitCommand) RewordCommit(commits []*Commit, index int) (*exec.Cmd, error) {
todo, sha, err := c.GenerateGenericRebaseTodo(commits, index, "reword")
if err != nil {
return nil, err
}
return c.PrepareInteractiveRebaseCommand(sha, todo, false)
}
func (c *GitCommand) MoveCommitDown(commits []*Commit, index int) error {
// we must ensure that we have at least two commits after the selected one
if len(commits) <= index+2 {
// assuming they aren't picking the bottom commit
return errors.New(c.Tr.SLocalize("NoRoom"))
}
todo := ""
orderedCommits := append(commits[0:index], commits[index+1], commits[index])
for _, commit := range orderedCommits {
todo = "pick " + commit.Sha + " " + commit.Name + "\n" + todo
}
cmd, err := c.PrepareInteractiveRebaseCommand(commits[index+2].Sha, todo, true)
if err != nil {
return err
}
return c.OSCommand.RunPreparedCommand(cmd)
}
func (c *GitCommand) InteractiveRebase(commits []*Commit, index int, action string) error {
todo, sha, err := c.GenerateGenericRebaseTodo(commits, index, action)
if err != nil {
return err
}
cmd, err := c.PrepareInteractiveRebaseCommand(sha, todo, true)
if err != nil {
return err
}
return c.OSCommand.RunPreparedCommand(cmd)
}
// PrepareInteractiveRebaseCommand returns the cmd for an interactive rebase
// we tell git to run lazygit to edit the todo list, and we pass the client
// lazygit a todo string to write to the todo file
func (c *GitCommand) PrepareInteractiveRebaseCommand(baseSha string, todo string, overrideEditor bool) (*exec.Cmd, error) {
ex := c.OSCommand.GetLazygitPath()
debug := "FALSE"
if c.OSCommand.Config.GetDebug() {
debug = "TRUE"
}
splitCmd := str.ToArgv(fmt.Sprintf("git rebase --interactive --autostash --keep-empty --rebase-merges %s", baseSha))
cmd := c.OSCommand.command(splitCmd[0], splitCmd[1:]...)
gitSequenceEditor := ex
if todo == "" {
gitSequenceEditor = "true"
}
cmd.Env = os.Environ()
cmd.Env = append(
cmd.Env,
"LAZYGIT_CLIENT_COMMAND=INTERACTIVE_REBASE",
"LAZYGIT_REBASE_TODO="+todo,
"DEBUG="+debug,
"LANG=en_US.UTF-8", // Force using EN as language
"LC_ALL=en_US.UTF-8", // Force using EN as language
"GIT_SEQUENCE_EDITOR="+gitSequenceEditor,
)
if overrideEditor {
cmd.Env = append(cmd.Env, "GIT_EDITOR="+ex)
}
return cmd, nil
}
func (c *GitCommand) HardReset(baseSha string) error {
return c.OSCommand.RunCommand("git reset --hard " + baseSha)
}
func (c *GitCommand) SoftReset(baseSha string) error {
return c.OSCommand.RunCommand("git reset --soft " + baseSha)
}
func (c *GitCommand) GenerateGenericRebaseTodo(commits []*Commit, actionIndex int, action string) (string, string, error) {
baseIndex := actionIndex + 1
if len(commits) <= baseIndex {
return "", "", errors.New(c.Tr.SLocalize("CannotRebaseOntoFirstCommit"))
}
if action == "squash" || action == "fixup" {
baseIndex++
if len(commits) <= baseIndex {
return "", "", errors.New(c.Tr.SLocalize("CannotSquashOntoSecondCommit"))
}
}
todo := ""
for i, commit := range commits[0:baseIndex] {
a := "pick"
if i == actionIndex {
a = action
}
todo = a + " " + commit.Sha + " " + commit.Name + "\n" + todo
}
return todo, commits[baseIndex].Sha, nil
}
// AmendTo amends the given commit with whatever files are staged
func (c *GitCommand) AmendTo(sha string) error {
if err := c.CreateFixupCommit(sha); err != nil {
return err
}
return c.SquashAllAboveFixupCommits(sha)
}
// EditRebaseTodo sets the action at a given index in the git-rebase-todo file
func (c *GitCommand) EditRebaseTodo(index int, action string) error {
fileName := fmt.Sprintf("%s/rebase-merge/git-rebase-todo", c.DotGitDir)
bytes, err := ioutil.ReadFile(fileName)
if err != nil {
return err
}
content := strings.Split(string(bytes), "\n")
commitCount := c.getTodoCommitCount(content)
// we have the most recent commit at the bottom whereas the todo file has
// it at the bottom, so we need to subtract our index from the commit count
contentIndex := commitCount - 1 - index
splitLine := strings.Split(content[contentIndex], " ")
content[contentIndex] = action + " " + strings.Join(splitLine[1:], " ")
result := strings.Join(content, "\n")
return ioutil.WriteFile(fileName, []byte(result), 0644)
}
func (c *GitCommand) getTodoCommitCount(content []string) int {
// count lines that are not blank and are not comments
commitCount := 0
for _, line := range content {
if line != "" && !strings.HasPrefix(line, "#") {
commitCount++
}
}
return commitCount
}
// MoveTodoDown moves a rebase todo item down by one position
func (c *GitCommand) MoveTodoDown(index int) error {
fileName := fmt.Sprintf("%s/rebase-merge/git-rebase-todo", c.DotGitDir)
bytes, err := ioutil.ReadFile(fileName)
if err != nil {
return err
}
content := strings.Split(string(bytes), "\n")
commitCount := c.getTodoCommitCount(content)
contentIndex := commitCount - 1 - index
rearrangedContent := append(content[0:contentIndex-1], content[contentIndex], content[contentIndex-1])
rearrangedContent = append(rearrangedContent, content[contentIndex+1:]...)
result := strings.Join(rearrangedContent, "\n")
return ioutil.WriteFile(fileName, []byte(result), 0644)
}
// Revert reverts the selected commit by sha
func (c *GitCommand) Revert(sha string) error {
return c.OSCommand.RunCommand("git revert %s", sha)
}
// CherryPickCommits begins an interactive rebase with the given shas being cherry picked onto HEAD
func (c *GitCommand) CherryPickCommits(commits []*Commit) error {
todo := ""
for _, commit := range commits {
todo = "pick " + commit.Sha + " " + commit.Name + "\n" + todo
}
cmd, err := c.PrepareInteractiveRebaseCommand("HEAD", todo, false)
if err != nil {
return err
}
return c.OSCommand.RunPreparedCommand(cmd)
}
// GetCommitFiles get the specified commit files
func (c *GitCommand) GetCommitFiles(commitSha string, patchManager *PatchManager) ([]*CommitFile, error) {
files, err := c.OSCommand.RunCommandWithOutput("git diff-tree --no-commit-id --name-only -r --no-renames %s", commitSha)
if err != nil {
return nil, err
}
commitFiles := make([]*CommitFile, 0)
for _, file := range strings.Split(strings.TrimRight(files, "\n"), "\n") {
status := UNSELECTED
if patchManager != nil && patchManager.CommitSha == commitSha {
status = patchManager.GetFileStatus(file)
}
commitFiles = append(commitFiles, &CommitFile{
Sha: commitSha,
Name: file,
DisplayString: file,
Status: status,
})
}
return commitFiles, nil
}
// ShowCommitFile get the diff of specified commit file
func (c *GitCommand) ShowCommitFile(commitSha, fileName string, plain bool) (string, error) {
cmdStr := c.ShowCommitFileCmdStr(commitSha, fileName, plain)
return c.OSCommand.RunCommandWithOutput(cmdStr)
}
func (c *GitCommand) ShowCommitFileCmdStr(commitSha, fileName string, plain bool) string {
colorArg := c.colorArg()
if plain {
colorArg = "never"
}
return fmt.Sprintf("git show --no-renames --color=%s %s -- %s", colorArg, commitSha, fileName)
}
// CheckoutFile checks out the file for the given commit
func (c *GitCommand) CheckoutFile(commitSha, fileName string) error {
return c.OSCommand.RunCommand("git checkout %s %s", commitSha, fileName)
}
// DiscardOldFileChanges discards changes to a file from an old commit
func (c *GitCommand) DiscardOldFileChanges(commits []*Commit, commitIndex int, fileName string) error {
if err := c.BeginInteractiveRebaseForCommit(commits, commitIndex); err != nil {
return err
}
// check if file exists in previous commit (this command returns an error if the file doesn't exist)
if err := c.OSCommand.RunCommand("git cat-file -e HEAD^:%s", fileName); err != nil {
if err := c.OSCommand.Remove(fileName); err != nil {
return err
}
if err := c.StageFile(fileName); err != nil {
return err
}
} else if err := c.CheckoutFile("HEAD^", fileName); err != nil {
return err
}
// amend the commit
cmd, err := c.AmendHead()
if cmd != nil {
return errors.New("received unexpected pointer to cmd")
}
if err != nil {
return err
}
// continue
return c.GenericMerge("rebase", "continue")
}
// DiscardAnyUnstagedFileChanges discards any unstages file changes via `git checkout -- .`
func (c *GitCommand) DiscardAnyUnstagedFileChanges() error {
return c.OSCommand.RunCommand("git checkout -- .")
}
// RemoveTrackedFiles will delete the given file(s) even if they are currently tracked
func (c *GitCommand) RemoveTrackedFiles(name string) error {
return c.OSCommand.RunCommand("git rm -r --cached %s", name)
}
// RemoveUntrackedFiles runs `git clean -fd`
func (c *GitCommand) RemoveUntrackedFiles() error {
return c.OSCommand.RunCommand("git clean -fd")
}
// ResetHardHead runs `git reset --hard`
func (c *GitCommand) ResetHard(ref string) error {
return c.OSCommand.RunCommand("git reset --hard " + ref)
}
// ResetSoft runs `git reset --soft HEAD`
func (c *GitCommand) ResetSoft(ref string) error {
return c.OSCommand.RunCommand("git reset --soft " + ref)
}
// CreateFixupCommit creates a commit that fixes up a previous commit
func (c *GitCommand) CreateFixupCommit(sha string) error {
return c.OSCommand.RunCommand("git commit --fixup=%s", sha)
}
// SquashAllAboveFixupCommits squashes all fixup! commits above the given one
func (c *GitCommand) SquashAllAboveFixupCommits(sha string) error {
return c.RunSkipEditorCommand(
fmt.Sprintf(
"git rebase --interactive --autostash --autosquash %s^",
sha,
),
)
}
// StashSaveStagedChanges stashes only the currently staged changes. This takes a few steps
// shoutouts to Joe on https://stackoverflow.com/questions/14759748/stashing-only-staged-changes-in-git-is-it-possible
func (c *GitCommand) StashSaveStagedChanges(message string) error {
if err := c.OSCommand.RunCommand("git stash --keep-index"); err != nil {
return err
}
if err := c.StashSave(message); err != nil {
return err
}
if err := c.OSCommand.RunCommand("git stash apply stash@{1}"); err != nil {
return err
}
if err := c.OSCommand.PipeCommands("git stash show -p", "git apply -R"); err != nil {
return err
}
if err := c.OSCommand.RunCommand("git stash drop stash@{1}"); err != nil {
return err
}
// if you had staged an untracked file, that will now appear as 'AD' in git status
// meaning it's deleted in your working tree but added in your index. Given that it's
// now safely stashed, we need to remove it.
files := c.GetStatusFiles(GetStatusFileOptions{})
for _, file := range files {
if file.ShortStatus == "AD" {
if err := c.UnStageFile(file.Name, false); err != nil {
return err
}
}
}
return nil
}
// BeginInteractiveRebaseForCommit starts an interactive rebase to edit the current
// commit and pick all others. After this you'll want to call `c.GenericMerge("rebase", "continue")`
func (c *GitCommand) BeginInteractiveRebaseForCommit(commits []*Commit, commitIndex int) error {
if len(commits)-1 < commitIndex {
return errors.New("index outside of range of commits")
}
// we can make this GPG thing possible it just means we need to do this in two parts:
// one where we handle the possibility of a credential request, and the other
// where we continue the rebase
if c.usingGpg() {
return errors.New(c.Tr.SLocalize("DisabledForGPG"))
}
todo, sha, err := c.GenerateGenericRebaseTodo(commits, commitIndex, "edit")
if err != nil {
return err
}
cmd, err := c.PrepareInteractiveRebaseCommand(sha, todo, true)
if err != nil {
return err
}
if err := c.OSCommand.RunPreparedCommand(cmd); err != nil {
return err
}
return nil
}
func (c *GitCommand) SetUpstreamBranch(upstream string) error {
return c.OSCommand.RunCommand("git branch -u %s", upstream)
}
func (c *GitCommand) AddRemote(name string, url string) error {
return c.OSCommand.RunCommand("git remote add %s %s", name, url)
}
func (c *GitCommand) RemoveRemote(name string) error {
return c.OSCommand.RunCommand("git remote remove %s", name)
}
func (c *GitCommand) IsHeadDetached() bool {
err := c.OSCommand.RunCommand("git symbolic-ref -q HEAD")
return err != nil
}
func (c *GitCommand) DeleteRemoteBranch(remoteName string, branchName string) error {
return c.OSCommand.RunCommand("git push %s --delete %s", remoteName, branchName)
}
func (c *GitCommand) SetBranchUpstream(remoteName string, remoteBranchName string, branchName string) error {
return c.OSCommand.RunCommand("git branch --set-upstream-to=%s/%s %s", remoteName, remoteBranchName, branchName)
}
func (c *GitCommand) RenameRemote(oldRemoteName string, newRemoteName string) error {
return c.OSCommand.RunCommand("git remote rename %s %s", oldRemoteName, newRemoteName)
}
func (c *GitCommand) UpdateRemoteUrl(remoteName string, updatedUrl string) error {
return c.OSCommand.RunCommand("git remote set-url %s %s", remoteName, updatedUrl)
}
func (c *GitCommand) CreateLightweightTag(tagName string, commitSha string) error {
return c.OSCommand.RunCommand("git tag %s %s", tagName, commitSha)
}
func (c *GitCommand) DeleteTag(tagName string) error {
return c.OSCommand.RunCommand("git tag -d %s", tagName)
}
func (c *GitCommand) PushTag(remoteName string, tagName string) error {
return c.OSCommand.RunCommand("git push %s %s", remoteName, tagName)
}
func (c *GitCommand) FetchRemote(remoteName string) error {
return c.OSCommand.RunCommand("git fetch %s", remoteName)
}
// GetReflogCommits only returns the new reflog commits since the given lastReflogCommit
// if none is passed (i.e. it's value is nil) then we get all the reflog commits
func (c *GitCommand) GetReflogCommits(lastReflogCommit *Commit, filterPath string) ([]*Commit, bool, error) {
commits := make([]*Commit, 0)
re := regexp.MustCompile(`(\w+).*HEAD@\{([^\}]+)\}: (.*)`)
filterPathArg := ""
if filterPath != "" {
filterPathArg = fmt.Sprintf(" --follow -- %s", c.OSCommand.Quote(filterPath))
}
cmd := c.OSCommand.ExecutableFromString(fmt.Sprintf("git reflog --abbrev=20 --date=unix %s", filterPathArg))
onlyObtainedNewReflogCommits := false
err := RunLineOutputCmd(cmd, func(line string) (bool, error) {
match := re.FindStringSubmatch(line)
if len(match) <= 1 {
return false, nil
}
unixTimestamp, _ := strconv.Atoi(match[2])
commit := &Commit{
Sha: match[1],
Name: match[3],
UnixTimestamp: int64(unixTimestamp),
Status: "reflog",
}
if lastReflogCommit != nil && commit.Sha == lastReflogCommit.Sha && commit.UnixTimestamp == lastReflogCommit.UnixTimestamp {
onlyObtainedNewReflogCommits = true
// after this point we already have these reflogs loaded so we'll simply return the new ones
return true, nil
}
commits = append(commits, commit)
return false, nil
})
if err != nil {
return nil, false, err
}
return commits, onlyObtainedNewReflogCommits, nil
}
func (c *GitCommand) ConfiguredPager() string {
if os.Getenv("GIT_PAGER") != "" {
return os.Getenv("GIT_PAGER")
}
if os.Getenv("PAGER") != "" {
return os.Getenv("PAGER")
}
output, err := c.OSCommand.RunCommandWithOutput("git config --get-all core.pager")
if err != nil {
return ""
}
trimmedOutput := strings.TrimSpace(output)
return strings.Split(trimmedOutput, "\n")[0]
}
func (c *GitCommand) GetPager(width int) string {
useConfig := c.Config.GetUserConfig().GetBool("git.paging.useConfig")
if useConfig {
pager := c.ConfiguredPager()
return strings.Split(pager, "| less")[0]
}
templateValues := map[string]string{
"columnWidth": strconv.Itoa(width/2 - 6),
}
pagerTemplate := c.Config.GetUserConfig().GetString("git.paging.pager")
return utils.ResolvePlaceholderString(pagerTemplate, templateValues)
}
func (c *GitCommand) colorArg() string {
return c.Config.GetUserConfig().GetString("git.paging.colorArg")
}
func (c *GitCommand) RenameBranch(oldName string, newName string) error {
return c.OSCommand.RunCommand("git branch --move %s %s", oldName, newName)
}
func (c *GitCommand) WorkingTreeState() string {
rebaseMode, _ := c.RebaseMode()
if rebaseMode != "" {
return "rebasing"
}
merging, _ := c.IsInMergeState()
if merging {
return "merging"
}
return "normal"
}
| pkg/commands/git.go | 1 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.08195635676383972,
0.001000697840936482,
0.00015959158190526068,
0.00017055340867955238,
0.006973220035433769
] |
{
"id": 1,
"code_window": [
"\tgo func() {\n",
"\t\t_ = gui.createLoaderPanel(gui.g, v, message)\n",
"\n",
"\t\tif gui.State.Panels.Branches.SelectedLine == 0 {\n",
"\t\t\t_ = gui.pullWithMode(\"ff-only\", PullFilesOptions{})\n",
"\t\t\treturn\n",
"\t\t} else {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 402
} | package i18n
import (
"fmt"
"text/template"
"github.com/nicksnyder/go-i18n/v2/internal/plural"
"golang.org/x/text/language"
)
// Localizer provides Localize and MustLocalize methods that return localized messages.
type Localizer struct {
// bundle contains the messages that can be returned by the Localizer.
bundle *Bundle
// tags is the list of language tags that the Localizer checks
// in order when localizing a message.
tags []language.Tag
}
// NewLocalizer returns a new Localizer that looks up messages
// in the bundle according to the language preferences in langs.
// It can parse Accept-Language headers as defined in http://www.ietf.org/rfc/rfc2616.txt.
func NewLocalizer(bundle *Bundle, langs ...string) *Localizer {
return &Localizer{
bundle: bundle,
tags: parseTags(langs),
}
}
func parseTags(langs []string) []language.Tag {
tags := []language.Tag{}
for _, lang := range langs {
t, _, err := language.ParseAcceptLanguage(lang)
if err != nil {
continue
}
tags = append(tags, t...)
}
return tags
}
// LocalizeConfig configures a call to the Localize method on Localizer.
type LocalizeConfig struct {
// MessageID is the id of the message to lookup.
// This field is ignored if DefaultMessage is set.
MessageID string
// TemplateData is the data passed when executing the message's template.
// If TemplateData is nil and PluralCount is not nil, then the message template
// will be executed with data that contains the plural count.
TemplateData interface{}
// PluralCount determines which plural form of the message is used.
PluralCount interface{}
// DefaultMessage is used if the message is not found in any message files.
DefaultMessage *Message
// Funcs is used to extend the Go template engine's built in functions
Funcs template.FuncMap
}
type invalidPluralCountErr struct {
messageID string
pluralCount interface{}
err error
}
func (e *invalidPluralCountErr) Error() string {
return fmt.Sprintf("invalid plural count %#v for message id %q: %s", e.pluralCount, e.messageID, e.err)
}
// MessageNotFoundErr is returned from Localize when a message could not be found.
type MessageNotFoundErr struct {
messageID string
}
func (e *MessageNotFoundErr) Error() string {
return fmt.Sprintf("message %q not found", e.messageID)
}
type pluralizeErr struct {
messageID string
tag language.Tag
}
func (e *pluralizeErr) Error() string {
return fmt.Sprintf("unable to pluralize %q because there no plural rule for %q", e.messageID, e.tag)
}
type messageIDMismatchErr struct {
messageID string
defaultMessageID string
}
func (e *messageIDMismatchErr) Error() string {
return fmt.Sprintf("message id %q does not match default message id %q", e.messageID, e.defaultMessageID)
}
// Localize returns a localized message.
func (l *Localizer) Localize(lc *LocalizeConfig) (string, error) {
msg, _, err := l.LocalizeWithTag(lc)
return msg, err
}
// Localize returns a localized message.
func (l *Localizer) LocalizeMessage(msg *Message) (string, error) {
return l.Localize(&LocalizeConfig{
DefaultMessage: msg,
})
}
// TODO: uncomment this (and the test) when extract has been updated to extract these call sites too.
// Localize returns a localized message.
// func (l *Localizer) LocalizeMessageID(messageID string) (string, error) {
// return l.Localize(&LocalizeConfig{
// MessageID: messageID,
// })
// }
// LocalizeWithTag returns a localized message and the language tag.
// It may return a best effort localized message even if an error happens.
func (l *Localizer) LocalizeWithTag(lc *LocalizeConfig) (string, language.Tag, error) {
messageID := lc.MessageID
if lc.DefaultMessage != nil {
if messageID != "" && messageID != lc.DefaultMessage.ID {
return "", language.Und, &messageIDMismatchErr{messageID: messageID, defaultMessageID: lc.DefaultMessage.ID}
}
messageID = lc.DefaultMessage.ID
}
var operands *plural.Operands
templateData := lc.TemplateData
if lc.PluralCount != nil {
var err error
operands, err = plural.NewOperands(lc.PluralCount)
if err != nil {
return "", language.Und, &invalidPluralCountErr{messageID: messageID, pluralCount: lc.PluralCount, err: err}
}
if templateData == nil {
templateData = map[string]interface{}{
"PluralCount": lc.PluralCount,
}
}
}
tag, template := l.getTemplate(messageID, lc.DefaultMessage)
if template == nil {
return "", language.Und, &MessageNotFoundErr{messageID: messageID}
}
pluralForm := l.pluralForm(tag, operands)
if pluralForm == plural.Invalid {
return "", language.Und, &pluralizeErr{messageID: messageID, tag: tag}
}
msg, err := template.Execute(pluralForm, templateData, lc.Funcs)
if err != nil {
// Attempt to fallback to "Other" pluralization in case translations are incomplete.
if pluralForm != plural.Other {
msg2, err2 := template.Execute(plural.Other, templateData, lc.Funcs)
if err2 == nil {
return msg2, tag, err
}
}
return "", language.Und, err
}
return msg, tag, nil
}
func (l *Localizer) getTemplate(id string, defaultMessage *Message) (language.Tag, *MessageTemplate) {
// Fast path.
// Optimistically assume this message id is defined in each language.
fastTag, template := l.matchTemplate(id, defaultMessage, l.bundle.matcher, l.bundle.tags)
if template != nil {
return fastTag, template
}
if len(l.bundle.tags) <= 1 {
return l.bundle.defaultLanguage, nil
}
// Slow path.
// We didn't find a translation for the tag suggested by the default matcher
// so we need to create a new matcher that contains only the tags in the bundle
// that have this message.
foundTags := make([]language.Tag, 0, len(l.bundle.messageTemplates)+1)
foundTags = append(foundTags, l.bundle.defaultLanguage)
for t, templates := range l.bundle.messageTemplates {
template := templates[id]
if template == nil || template.Other == "" {
continue
}
foundTags = append(foundTags, t)
}
return l.matchTemplate(id, defaultMessage, language.NewMatcher(foundTags), foundTags)
}
func (l *Localizer) matchTemplate(id string, defaultMessage *Message, matcher language.Matcher, tags []language.Tag) (language.Tag, *MessageTemplate) {
_, i, _ := matcher.Match(l.tags...)
tag := tags[i]
templates := l.bundle.messageTemplates[tag]
if templates != nil && templates[id] != nil {
return tag, templates[id]
}
if tag == l.bundle.defaultLanguage && defaultMessage != nil {
return tag, NewMessageTemplate(defaultMessage)
}
return tag, nil
}
func (l *Localizer) pluralForm(tag language.Tag, operands *plural.Operands) plural.Form {
if operands == nil {
return plural.Other
}
pluralRule := l.bundle.pluralRules.Rule(tag)
if pluralRule == nil {
return plural.Invalid
}
return pluralRule.PluralFormFunc(operands)
}
// MustLocalize is similar to Localize, except it panics if an error happens.
func (l *Localizer) MustLocalize(lc *LocalizeConfig) string {
localized, err := l.Localize(lc)
if err != nil {
panic(err)
}
return localized
}
| vendor/github.com/nicksnyder/go-i18n/v2/i18n/localizer.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.0007668600301258266,
0.00021468597697094083,
0.0001594011700944975,
0.00017070784815587103,
0.00012214132584631443
] |
{
"id": 1,
"code_window": [
"\tgo func() {\n",
"\t\t_ = gui.createLoaderPanel(gui.g, v, message)\n",
"\n",
"\t\tif gui.State.Panels.Branches.SelectedLine == 0 {\n",
"\t\t\t_ = gui.pullWithMode(\"ff-only\", PullFilesOptions{})\n",
"\t\t\treturn\n",
"\t\t} else {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 402
} | // Package str is a comprehensive set of string functions to build more
// Go awesomeness. Str complements Go's standard packages and does not duplicate
// functionality found in `strings` or `strconv`.
//
// Str is based on plain functions instead of object-based methods,
// consistent with Go standard string packages.
//
// str.Between("<a>foo</a>", "<a>", "</a>") == "foo"
//
// Str supports pipelining instead of chaining
//
// s := str.Pipe("\nabcdef\n", Clean, BetweenF("a", "f"), ChompLeftF("bc"))
//
// User-defined filters can be added to the pipeline by inserting a function
// or closure that returns a function with this signature
//
// func(string) string
//
package str
| vendor/github.com/mgutz/str/doc.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.0007383771589957178,
0.0004519656067714095,
0.00016555408365093172,
0.0004519656067714095,
0.00028641155222430825
] |
{
"id": 1,
"code_window": [
"\tgo func() {\n",
"\t\t_ = gui.createLoaderPanel(gui.g, v, message)\n",
"\n",
"\t\tif gui.State.Panels.Branches.SelectedLine == 0 {\n",
"\t\t\t_ = gui.pullWithMode(\"ff-only\", PullFilesOptions{})\n",
"\t\t\treturn\n",
"\t\t} else {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 402
} | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms
// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01.
package chacha20
import (
"crypto/cipher"
"encoding/binary"
"errors"
"math/bits"
"golang.org/x/crypto/internal/subtle"
)
const (
// KeySize is the size of the key used by this cipher, in bytes.
KeySize = 32
// NonceSize is the size of the nonce used with the standard variant of this
// cipher, in bytes.
//
// Note that this is too short to be safely generated at random if the same
// key is reused more than 2³² times.
NonceSize = 12
// NonceSizeX is the size of the nonce used with the XChaCha20 variant of
// this cipher, in bytes.
NonceSizeX = 24
)
// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key
// and nonce. A *Cipher implements the cipher.Stream interface.
type Cipher struct {
// The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter
// (incremented after each block), and 3 of nonce.
key [8]uint32
counter uint32
nonce [3]uint32
// The last len bytes of buf are leftover key stream bytes from the previous
// XORKeyStream invocation. The size of buf depends on how many blocks are
// computed at a time.
buf [bufSize]byte
len int
// The counter-independent results of the first round are cached after they
// are computed the first time.
precompDone bool
p1, p5, p9, p13 uint32
p2, p6, p10, p14 uint32
p3, p7, p11, p15 uint32
}
var _ cipher.Stream = (*Cipher)(nil)
// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given
// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided,
// the XChaCha20 construction will be used. It returns an error if key or nonce
// have any other length.
//
// Note that ChaCha20, like all stream ciphers, is not authenticated and allows
// attackers to silently tamper with the plaintext. For this reason, it is more
// appropriate as a building block than as a standalone encryption mechanism.
// Instead, consider using package golang.org/x/crypto/chacha20poly1305.
func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) {
// This function is split into a wrapper so that the Cipher allocation will
// be inlined, and depending on how the caller uses the return value, won't
// escape to the heap.
c := &Cipher{}
return newUnauthenticatedCipher(c, key, nonce)
}
func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) {
if len(key) != KeySize {
return nil, errors.New("chacha20: wrong key size")
}
if len(nonce) == NonceSizeX {
// XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a
// derived key, allowing it to operate on a nonce of 24 bytes. See
// draft-irtf-cfrg-xchacha-01, Section 2.3.
key, _ = HChaCha20(key, nonce[0:16])
cNonce := make([]byte, NonceSize)
copy(cNonce[4:12], nonce[16:24])
nonce = cNonce
} else if len(nonce) != NonceSize {
return nil, errors.New("chacha20: wrong nonce size")
}
c.key = [8]uint32{
binary.LittleEndian.Uint32(key[0:4]),
binary.LittleEndian.Uint32(key[4:8]),
binary.LittleEndian.Uint32(key[8:12]),
binary.LittleEndian.Uint32(key[12:16]),
binary.LittleEndian.Uint32(key[16:20]),
binary.LittleEndian.Uint32(key[20:24]),
binary.LittleEndian.Uint32(key[24:28]),
binary.LittleEndian.Uint32(key[28:32]),
}
c.nonce = [3]uint32{
binary.LittleEndian.Uint32(nonce[0:4]),
binary.LittleEndian.Uint32(nonce[4:8]),
binary.LittleEndian.Uint32(nonce[8:12]),
}
return c, nil
}
// The constant first 4 words of the ChaCha20 state.
const (
j0 uint32 = 0x61707865 // expa
j1 uint32 = 0x3320646e // nd 3
j2 uint32 = 0x79622d32 // 2-by
j3 uint32 = 0x6b206574 // te k
)
const blockSize = 64
// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words.
// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16
// words each round, in columnar or diagonal groups of 4 at a time.
func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
a += b
d ^= a
d = bits.RotateLeft32(d, 16)
c += d
b ^= c
b = bits.RotateLeft32(b, 12)
a += b
d ^= a
d = bits.RotateLeft32(d, 8)
c += d
b ^= c
b = bits.RotateLeft32(b, 7)
return a, b, c, d
}
// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will
// behave as if (64 * counter) bytes had been encrypted so far.
//
// To prevent accidental counter reuse, SetCounter panics if counter is
// less than the current value.
func (s *Cipher) SetCounter(counter uint32) {
// Internally, s may buffer multiple blocks, which complicates this
// implementation slightly. When checking whether the counter has rolled
// back, we must use both s.counter and s.len to determine how many blocks
// we have already output.
outputCounter := s.counter - uint32(s.len)/blockSize
if counter < outputCounter {
panic("chacha20: SetCounter attempted to rollback counter")
}
// In the general case, we set the new counter value and reset s.len to 0,
// causing the next call to XORKeyStream to refill the buffer. However, if
// we're advancing within the existing buffer, we can save work by simply
// setting s.len.
if counter < s.counter {
s.len = int(s.counter-counter) * blockSize
} else {
s.counter = counter
s.len = 0
}
}
// XORKeyStream XORs each byte in the given slice with a byte from the
// cipher's key stream. Dst and src must overlap entirely or not at all.
//
// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
// to pass a dst bigger than src, and in that case, XORKeyStream will
// only update dst[:len(src)] and will not touch the rest of dst.
//
// Multiple calls to XORKeyStream behave as if the concatenation of
// the src buffers was passed in a single run. That is, Cipher
// maintains state and does not reset at each XORKeyStream call.
func (s *Cipher) XORKeyStream(dst, src []byte) {
if len(src) == 0 {
return
}
if len(dst) < len(src) {
panic("chacha20: output smaller than input")
}
dst = dst[:len(src)]
if subtle.InexactOverlap(dst, src) {
panic("chacha20: invalid buffer overlap")
}
// First, drain any remaining key stream from a previous XORKeyStream.
if s.len != 0 {
keyStream := s.buf[bufSize-s.len:]
if len(src) < len(keyStream) {
keyStream = keyStream[:len(src)]
}
_ = src[len(keyStream)-1] // bounds check elimination hint
for i, b := range keyStream {
dst[i] = src[i] ^ b
}
s.len -= len(keyStream)
src = src[len(keyStream):]
dst = dst[len(keyStream):]
}
const blocksPerBuf = bufSize / blockSize
numBufs := (uint64(len(src)) + bufSize - 1) / bufSize
if uint64(s.counter)+numBufs*blocksPerBuf >= 1<<32 {
panic("chacha20: counter overflow")
}
// xorKeyStreamBlocks implementations expect input lengths that are a
// multiple of bufSize. Platform-specific ones process multiple blocks at a
// time, so have bufSizes that are a multiple of blockSize.
rem := len(src) % bufSize
full := len(src) - rem
if full > 0 {
s.xorKeyStreamBlocks(dst[:full], src[:full])
}
// If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and
// keep the leftover keystream for the next XORKeyStream invocation.
if rem > 0 {
s.buf = [bufSize]byte{}
copy(s.buf[:], src[full:])
s.xorKeyStreamBlocks(s.buf[:], s.buf[:])
s.len = bufSize - copy(dst[full:], s.buf[:])
}
}
func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
if len(dst) != len(src) || len(dst)%blockSize != 0 {
panic("chacha20: internal error: wrong dst and/or src length")
}
// To generate each block of key stream, the initial cipher state
// (represented below) is passed through 20 rounds of shuffling,
// alternatively applying quarterRounds by columns (like 1, 5, 9, 13)
// or by diagonals (like 1, 6, 11, 12).
//
// 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc
// 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk
// 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk
// 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn
//
// c=constant k=key b=blockcount n=nonce
var (
c0, c1, c2, c3 = j0, j1, j2, j3
c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3]
c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7]
_, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2]
)
// Three quarters of the first round don't depend on the counter, so we can
// calculate them here, and reuse them for multiple blocks in the loop, and
// for future XORKeyStream invocations.
if !s.precompDone {
s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13)
s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14)
s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15)
s.precompDone = true
}
for i := 0; i < len(src); i += blockSize {
// The remainder of the first column round.
fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter)
// The second diagonal round.
x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15)
x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12)
x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13)
x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14)
// The remaining 18 rounds.
for i := 0; i < 9; i++ {
// Column round.
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
// Diagonal round.
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
// Finally, add back the initial state to generate the key stream.
x0 += c0
x1 += c1
x2 += c2
x3 += c3
x4 += c4
x5 += c5
x6 += c6
x7 += c7
x8 += c8
x9 += c9
x10 += c10
x11 += c11
x12 += s.counter
x13 += c13
x14 += c14
x15 += c15
s.counter += 1
if s.counter == 0 {
panic("chacha20: internal error: counter overflow")
}
in, out := src[i:], dst[i:]
in, out = in[:blockSize], out[:blockSize] // bounds check elimination hint
// XOR the key stream with the source and write out the result.
xor(out[0:], in[0:], x0)
xor(out[4:], in[4:], x1)
xor(out[8:], in[8:], x2)
xor(out[12:], in[12:], x3)
xor(out[16:], in[16:], x4)
xor(out[20:], in[20:], x5)
xor(out[24:], in[24:], x6)
xor(out[28:], in[28:], x7)
xor(out[32:], in[32:], x8)
xor(out[36:], in[36:], x9)
xor(out[40:], in[40:], x10)
xor(out[44:], in[44:], x11)
xor(out[48:], in[48:], x12)
xor(out[52:], in[52:], x13)
xor(out[56:], in[56:], x14)
xor(out[60:], in[60:], x15)
}
}
// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes
// key and a 16 bytes nonce. It returns an error if key or nonce have any other
// length. It is used as part of the XChaCha20 construction.
func HChaCha20(key, nonce []byte) ([]byte, error) {
// This function is split into a wrapper so that the slice allocation will
// be inlined, and depending on how the caller uses the return value, won't
// escape to the heap.
out := make([]byte, 32)
return hChaCha20(out, key, nonce)
}
func hChaCha20(out, key, nonce []byte) ([]byte, error) {
if len(key) != KeySize {
return nil, errors.New("chacha20: wrong HChaCha20 key size")
}
if len(nonce) != 16 {
return nil, errors.New("chacha20: wrong HChaCha20 nonce size")
}
x0, x1, x2, x3 := j0, j1, j2, j3
x4 := binary.LittleEndian.Uint32(key[0:4])
x5 := binary.LittleEndian.Uint32(key[4:8])
x6 := binary.LittleEndian.Uint32(key[8:12])
x7 := binary.LittleEndian.Uint32(key[12:16])
x8 := binary.LittleEndian.Uint32(key[16:20])
x9 := binary.LittleEndian.Uint32(key[20:24])
x10 := binary.LittleEndian.Uint32(key[24:28])
x11 := binary.LittleEndian.Uint32(key[28:32])
x12 := binary.LittleEndian.Uint32(nonce[0:4])
x13 := binary.LittleEndian.Uint32(nonce[4:8])
x14 := binary.LittleEndian.Uint32(nonce[8:12])
x15 := binary.LittleEndian.Uint32(nonce[12:16])
for i := 0; i < 10; i++ {
// Diagonal round.
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
// Column round.
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
_ = out[31] // bounds check elimination hint
binary.LittleEndian.PutUint32(out[0:4], x0)
binary.LittleEndian.PutUint32(out[4:8], x1)
binary.LittleEndian.PutUint32(out[8:12], x2)
binary.LittleEndian.PutUint32(out[12:16], x3)
binary.LittleEndian.PutUint32(out[16:20], x12)
binary.LittleEndian.PutUint32(out[20:24], x13)
binary.LittleEndian.PutUint32(out[24:28], x14)
binary.LittleEndian.PutUint32(out[28:32], x15)
return out, nil
}
| vendor/golang.org/x/crypto/chacha20/chacha_generic.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.0015809836331754923,
0.0002644601627252996,
0.0001612189516890794,
0.0001714301761239767,
0.000286108028376475
] |
{
"id": 2,
"code_window": [
"\t\t} else {\n",
"\t\t\tif err := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName); err != nil {\n",
"\t\t\t\t_ = gui.surfaceError(err)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n",
"\t\t\t_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})\n",
"\t\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\terr := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName, gui.promptUserForCredential)\n",
"\t\t\tgui.handleCredentialsPopup(err)\n"
],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 404
} | package gui
import (
"fmt"
"strings"
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/commands"
"github.com/jesseduffield/lazygit/pkg/gui/presentation"
"github.com/jesseduffield/lazygit/pkg/utils"
)
// list panel functions
func (gui *Gui) getSelectedBranch() *commands.Branch {
selectedLine := gui.State.Panels.Branches.SelectedLine
if selectedLine == -1 {
return nil
}
return gui.State.Branches[selectedLine]
}
// may want to standardise how these select methods work
func (gui *Gui) handleBranchSelect(g *gocui.Gui, v *gocui.View) error {
if gui.popupPanelFocused() {
return nil
}
gui.State.SplitMainPanel = false
if _, err := gui.g.SetCurrentView(v.Name()); err != nil {
return err
}
gui.getMainView().Title = "Log"
// This really shouldn't happen: there should always be a master branch
if len(gui.State.Branches) == 0 {
return gui.newStringTask("main", gui.Tr.SLocalize("NoBranchesThisRepo"))
}
branch := gui.getSelectedBranch()
v.FocusPoint(0, gui.State.Panels.Branches.SelectedLine)
if gui.inDiffMode() {
return gui.renderDiff()
}
cmd := gui.OSCommand.ExecutableFromString(
gui.GitCommand.GetBranchGraphCmdStr(branch.Name),
)
if err := gui.newCmdTask("main", cmd); err != nil {
gui.Log.Error(err)
}
return nil
}
// gui.refreshStatus is called at the end of this because that's when we can
// be sure there is a state.Branches array to pick the current branch from
func (gui *Gui) refreshBranches() {
reflogCommits := gui.State.FilteredReflogCommits
if gui.inFilterMode() {
// in filter mode we filter our reflog commits to just those containing the path
// however we need all the reflog entries to populate the recencies of our branches
// which allows us to order them correctly. So if we're filtering we'll just
// manually load all the reflog commits here
var err error
reflogCommits, _, err = gui.GitCommand.GetReflogCommits(nil, "")
if err != nil {
gui.Log.Error(err)
}
}
builder, err := commands.NewBranchListBuilder(gui.Log, gui.GitCommand, reflogCommits)
if err != nil {
_ = gui.surfaceError(err)
}
gui.State.Branches = builder.Build()
// TODO: if we're in the remotes view and we've just deleted a remote we need to refresh accordingly
if gui.getBranchesView().Context == "local-branches" {
_ = gui.renderLocalBranchesWithSelection()
}
gui.refreshStatus()
}
func (gui *Gui) renderLocalBranchesWithSelection() error {
branchesView := gui.getBranchesView()
gui.refreshSelectedLine(&gui.State.Panels.Branches.SelectedLine, len(gui.State.Branches))
displayStrings := presentation.GetBranchListDisplayStrings(gui.State.Branches, gui.State.ScreenMode != SCREEN_NORMAL, gui.State.Diff.Ref)
gui.renderDisplayStrings(branchesView, displayStrings)
if gui.g.CurrentView() == branchesView {
if err := gui.handleBranchSelect(gui.g, branchesView); err != nil {
return gui.surfaceError(err)
}
}
return nil
}
// specific functions
func (gui *Gui) handleBranchPress(g *gocui.Gui, v *gocui.View) error {
if gui.State.Panels.Branches.SelectedLine == -1 {
return nil
}
if gui.State.Panels.Branches.SelectedLine == 0 {
return gui.createErrorPanel(gui.Tr.SLocalize("AlreadyCheckedOutBranch"))
}
branch := gui.getSelectedBranch()
return gui.handleCheckoutRef(branch.Name, handleCheckoutRefOptions{})
}
func (gui *Gui) handleCreatePullRequestPress(g *gocui.Gui, v *gocui.View) error {
pullRequest := commands.NewPullRequest(gui.GitCommand)
branch := gui.getSelectedBranch()
if err := pullRequest.Create(branch); err != nil {
return gui.surfaceError(err)
}
return nil
}
func (gui *Gui) handleGitFetch(g *gocui.Gui, v *gocui.View) error {
if err := gui.createLoaderPanel(gui.g, v, gui.Tr.SLocalize("FetchWait")); err != nil {
return err
}
go func() {
err := gui.fetch(true)
gui.handleCredentialsPopup(err)
_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}()
return nil
}
func (gui *Gui) handleForceCheckout(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
message := gui.Tr.SLocalize("SureForceCheckout")
title := gui.Tr.SLocalize("ForceCheckoutBranch")
return gui.createConfirmationPanel(g, v, true, title, message, func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.Checkout(branch.Name, commands.CheckoutOptions{Force: true}); err != nil {
_ = gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}, nil)
}
type handleCheckoutRefOptions struct {
WaitingStatus string
EnvVars []string
onRefNotFound func(ref string) error
}
func (gui *Gui) handleCheckoutRef(ref string, options handleCheckoutRefOptions) error {
waitingStatus := options.WaitingStatus
if waitingStatus == "" {
waitingStatus = gui.Tr.SLocalize("CheckingOutStatus")
}
cmdOptions := commands.CheckoutOptions{Force: false, EnvVars: options.EnvVars}
onSuccess := func() {
gui.State.Panels.Branches.SelectedLine = 0
gui.State.Panels.Commits.SelectedLine = 0
// loading a heap of commits is slow so we limit them whenever doing a reset
gui.State.Panels.Commits.LimitCommits = true
}
return gui.WithWaitingStatus(waitingStatus, func() error {
if err := gui.GitCommand.Checkout(ref, cmdOptions); err != nil {
// note, this will only work for english-language git commands. If we force git to use english, and the error isn't this one, then the user will receive an english command they may not understand. I'm not sure what the best solution to this is. Running the command once in english and a second time in the native language is one option
if options.onRefNotFound != nil && strings.Contains(err.Error(), "did not match any file(s) known to git") {
return options.onRefNotFound(ref)
}
if strings.Contains(err.Error(), "Please commit your changes or stash them before you switch branch") {
// offer to autostash changes
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("AutoStashTitle"), gui.Tr.SLocalize("AutoStashPrompt"), func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.StashSave(gui.Tr.SLocalize("StashPrefix") + ref); err != nil {
return gui.surfaceError(err)
}
if err := gui.GitCommand.Checkout(ref, cmdOptions); err != nil {
return gui.surfaceError(err)
}
onSuccess()
if err := gui.GitCommand.StashDo(0, "pop"); err != nil {
if err := gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI}); err != nil {
return err
}
return gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI})
}, nil)
}
if err := gui.surfaceError(err); err != nil {
return err
}
}
onSuccess()
return gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI})
})
}
func (gui *Gui) handleCheckoutByName(g *gocui.Gui, v *gocui.View) error {
return gui.createPromptPanel(g, v, gui.Tr.SLocalize("BranchName")+":", "", func(g *gocui.Gui, v *gocui.View) error {
return gui.handleCheckoutRef(gui.trimmedContent(v), handleCheckoutRefOptions{
onRefNotFound: func(ref string) error {
return gui.createConfirmationPanel(gui.g, v, true, gui.Tr.SLocalize("BranchNotFoundTitle"), fmt.Sprintf("%s %s%s", gui.Tr.SLocalize("BranchNotFoundPrompt"), ref, "?"), func(_g *gocui.Gui, _v *gocui.View) error {
return gui.createNewBranchWithName(ref)
}, nil)
},
})
})
}
func (gui *Gui) getCheckedOutBranch() *commands.Branch {
if len(gui.State.Branches) == 0 {
return nil
}
return gui.State.Branches[0]
}
func (gui *Gui) handleNewBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
message := gui.Tr.TemplateLocalize(
"NewBranchNameBranchOff",
Teml{
"branchName": branch.Name,
},
)
return gui.createPromptPanel(g, v, message, "", func(g *gocui.Gui, v *gocui.View) error {
return gui.createNewBranchWithName(gui.trimmedContent(v))
})
}
func (gui *Gui) createNewBranchWithName(newBranchName string) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
if err := gui.GitCommand.NewBranch(newBranchName, branch.Name); err != nil {
return gui.surfaceError(err)
}
gui.State.Panels.Branches.SelectedLine = 0
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}
func (gui *Gui) handleDeleteBranch(g *gocui.Gui, v *gocui.View) error {
return gui.deleteBranch(g, v, false)
}
func (gui *Gui) deleteBranch(g *gocui.Gui, v *gocui.View, force bool) error {
selectedBranch := gui.getSelectedBranch()
if selectedBranch == nil {
return nil
}
checkedOutBranch := gui.getCheckedOutBranch()
if checkedOutBranch.Name == selectedBranch.Name {
return gui.createErrorPanel(gui.Tr.SLocalize("CantDeleteCheckOutBranch"))
}
return gui.deleteNamedBranch(g, v, selectedBranch, force)
}
func (gui *Gui) deleteNamedBranch(g *gocui.Gui, v *gocui.View, selectedBranch *commands.Branch, force bool) error {
title := gui.Tr.SLocalize("DeleteBranch")
var messageID string
if force {
messageID = "ForceDeleteBranchMessage"
} else {
messageID = "DeleteBranchMessage"
}
message := gui.Tr.TemplateLocalize(
messageID,
Teml{
"selectedBranchName": selectedBranch.Name,
},
)
return gui.createConfirmationPanel(g, v, true, title, message, func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.DeleteBranch(selectedBranch.Name, force); err != nil {
errMessage := err.Error()
if !force && strings.Contains(errMessage, "is not fully merged") {
return gui.deleteNamedBranch(g, v, selectedBranch, true)
}
return gui.createErrorPanel(errMessage)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})
}, nil)
}
func (gui *Gui) mergeBranchIntoCheckedOutBranch(branchName string) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
if gui.GitCommand.IsHeadDetached() {
return gui.createErrorPanel("Cannot merge branch in detached head state. You might have checked out a commit directly or a remote branch, in which case you should checkout the local branch you want to be on")
}
checkedOutBranchName := gui.getCheckedOutBranch().Name
if checkedOutBranchName == branchName {
return gui.createErrorPanel(gui.Tr.SLocalize("CantMergeBranchIntoItself"))
}
prompt := gui.Tr.TemplateLocalize(
"ConfirmMerge",
Teml{
"checkedOutBranch": checkedOutBranchName,
"selectedBranch": branchName,
},
)
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("MergingTitle"), prompt,
func(g *gocui.Gui, v *gocui.View) error {
err := gui.GitCommand.Merge(branchName, commands.MergeOpts{})
return gui.handleGenericMergeCommandResult(err)
}, nil)
}
func (gui *Gui) handleMerge(g *gocui.Gui, v *gocui.View) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
selectedBranchName := gui.getSelectedBranch().Name
return gui.mergeBranchIntoCheckedOutBranch(selectedBranchName)
}
func (gui *Gui) handleRebaseOntoLocalBranch(g *gocui.Gui, v *gocui.View) error {
selectedBranchName := gui.getSelectedBranch().Name
return gui.handleRebaseOntoBranch(selectedBranchName)
}
func (gui *Gui) handleRebaseOntoBranch(selectedBranchName string) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
checkedOutBranch := gui.getCheckedOutBranch().Name
if selectedBranchName == checkedOutBranch {
return gui.createErrorPanel(gui.Tr.SLocalize("CantRebaseOntoSelf"))
}
prompt := gui.Tr.TemplateLocalize(
"ConfirmRebase",
Teml{
"checkedOutBranch": checkedOutBranch,
"selectedBranch": selectedBranchName,
},
)
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("RebasingTitle"), prompt,
func(g *gocui.Gui, v *gocui.View) error {
err := gui.GitCommand.RebaseBranch(selectedBranchName)
return gui.handleGenericMergeCommandResult(err)
}, nil)
}
func (gui *Gui) handleFastForward(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
if branch.Pushables == "" {
return nil
}
if branch.Pushables == "?" {
return gui.createErrorPanel(gui.Tr.SLocalize("FwdNoUpstream"))
}
if branch.Pushables != "0" {
return gui.createErrorPanel(gui.Tr.SLocalize("FwdCommitsToPush"))
}
upstream, err := gui.GitCommand.GetUpstreamForBranch(branch.Name)
if err != nil {
return gui.surfaceError(err)
}
split := strings.Split(upstream, "/")
remoteName := split[0]
remoteBranchName := strings.Join(split[1:], "/")
message := gui.Tr.TemplateLocalize(
"Fetching",
Teml{
"from": fmt.Sprintf("%s/%s", remoteName, remoteBranchName),
"to": branch.Name,
},
)
go func() {
_ = gui.createLoaderPanel(gui.g, v, message)
if gui.State.Panels.Branches.SelectedLine == 0 {
_ = gui.pullWithMode("ff-only", PullFilesOptions{})
return
} else {
if err := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName); err != nil {
_ = gui.surfaceError(err)
return
}
_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})
}
_ = gui.closeConfirmationPrompt(gui.g, true)
}()
return nil
}
func (gui *Gui) onBranchesTabClick(tabIndex int) error {
contexts := []string{"local-branches", "remotes", "tags"}
branchesView := gui.getBranchesView()
branchesView.TabIndex = tabIndex
return gui.switchBranchesPanelContext(contexts[tabIndex])
}
func (gui *Gui) switchBranchesPanelContext(context string) error {
branchesView := gui.getBranchesView()
branchesView.Context = context
if err := gui.onSearchEscape(); err != nil {
return err
}
contextTabIndexMap := map[string]int{
"local-branches": 0,
"remotes": 1,
"remote-branches": 1,
"tags": 2,
}
branchesView.TabIndex = contextTabIndexMap[context]
return gui.refreshBranchesViewWithSelection()
}
func (gui *Gui) refreshBranchesViewWithSelection() error {
branchesView := gui.getBranchesView()
switch branchesView.Context {
case "local-branches":
return gui.renderLocalBranchesWithSelection()
case "remotes":
return gui.renderRemotesWithSelection()
case "remote-branches":
return gui.renderRemoteBranchesWithSelection()
case "tags":
return gui.renderTagsWithSelection()
}
return nil
}
func (gui *Gui) handleNextBranchesTab(g *gocui.Gui, v *gocui.View) error {
return gui.onBranchesTabClick(
utils.ModuloWithWrap(v.TabIndex+1, len(v.Tabs)),
)
}
func (gui *Gui) handlePrevBranchesTab(g *gocui.Gui, v *gocui.View) error {
return gui.onBranchesTabClick(
utils.ModuloWithWrap(v.TabIndex-1, len(v.Tabs)),
)
}
func (gui *Gui) handleCreateResetToBranchMenu(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
return gui.createResetMenu(branch.Name)
}
func (gui *Gui) onBranchesPanelSearchSelect(selectedLine int) error {
branchesView := gui.getBranchesView()
switch branchesView.Context {
case "local-branches":
gui.State.Panels.Branches.SelectedLine = selectedLine
return gui.handleBranchSelect(gui.g, branchesView)
case "remotes":
gui.State.Panels.Remotes.SelectedLine = selectedLine
return gui.handleRemoteSelect(gui.g, branchesView)
case "remote-branches":
gui.State.Panels.RemoteBranches.SelectedLine = selectedLine
return gui.handleRemoteBranchSelect(gui.g, branchesView)
}
return nil
}
func (gui *Gui) handleRenameBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
// TODO: find a way to not checkout the branch here if it's not the current branch (i.e. find some
// way to get it to show up in the reflog)
promptForNewName := func() error {
return gui.createPromptPanel(g, v, gui.Tr.SLocalize("NewBranchNamePrompt")+" "+branch.Name+":", "", func(g *gocui.Gui, v *gocui.View) error {
newName := gui.trimmedContent(v)
if err := gui.GitCommand.RenameBranch(branch.Name, newName); err != nil {
return gui.surfaceError(err)
}
// need to checkout so that the branch shows up in our reflog and therefore
// doesn't get lost among all the other branches when we switch to something else
if err := gui.GitCommand.Checkout(newName, commands.CheckoutOptions{Force: false}); err != nil {
return gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
})
}
// I could do an explicit check here for whether the branch is tracking a remote branch
// but if we've selected it we'll already know that via Pullables and Pullables.
// Bit of a hack but I'm lazy.
notTrackingRemote := branch.Pullables == "?"
if notTrackingRemote {
return promptForNewName()
}
return gui.createConfirmationPanel(gui.g, v, true, gui.Tr.SLocalize("renameBranch"), gui.Tr.SLocalize("RenameBranchWarning"), func(_g *gocui.Gui, _v *gocui.View) error {
return promptForNewName()
}, nil)
}
func (gui *Gui) currentBranch() *commands.Branch {
if len(gui.State.Branches) == 0 {
return nil
}
return gui.State.Branches[0]
}
func (gui *Gui) handleClipboardCopyBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
return gui.OSCommand.CopyToClipboard(branch.Name)
}
| pkg/gui/branches_panel.go | 1 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.998276948928833,
0.02537980116903782,
0.0001654481893638149,
0.002434724709019065,
0.13428211212158203
] |
{
"id": 2,
"code_window": [
"\t\t} else {\n",
"\t\t\tif err := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName); err != nil {\n",
"\t\t\t\t_ = gui.surfaceError(err)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n",
"\t\t\t_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})\n",
"\t\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\terr := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName, gui.promptUserForCredential)\n",
"\t\t\tgui.handleCredentialsPopup(err)\n"
],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 404
} | package storer
import "github.com/go-git/go-git/v5/plumbing"
// ShallowStorer is a storage of references to shallow commits by hash,
// meaning that these commits have missing parents because of a shallow fetch.
type ShallowStorer interface {
SetShallow([]plumbing.Hash) error
Shallow() ([]plumbing.Hash, error)
}
| vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.00017221173038706183,
0.00017079256940633059,
0.00016937342297751456,
0.00017079256940633059,
0.0000014191537047736347
] |
{
"id": 2,
"code_window": [
"\t\t} else {\n",
"\t\t\tif err := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName); err != nil {\n",
"\t\t\t\t_ = gui.surfaceError(err)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n",
"\t\t\t_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})\n",
"\t\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\terr := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName, gui.promptUserForCredential)\n",
"\t\t\tgui.handleCredentialsPopup(err)\n"
],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 404
} | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
// +build mips mipsle
// +build !gccgo
#include "textflag.h"
//
// System calls for mips, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
JMP syscall·Syscall9(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
JAL runtime·entersyscall(SB)
MOVW a1+4(FP), R4
MOVW a2+8(FP), R5
MOVW a3+12(FP), R6
MOVW R0, R7
MOVW trap+0(FP), R2 // syscall entry
SYSCALL
MOVW R2, r1+16(FP) // r1
MOVW R3, r2+20(FP) // r2
JAL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVW a1+4(FP), R4
MOVW a2+8(FP), R5
MOVW a3+12(FP), R6
MOVW trap+0(FP), R2 // syscall entry
SYSCALL
MOVW R2, r1+16(FP)
MOVW R3, r2+20(FP)
RET
| vendor/golang.org/x/sys/unix/asm_linux_mipsx.s | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.00017685133207123727,
0.00017060614482033998,
0.00016084920207504183,
0.00017254921840503812,
0.000005681259153789142
] |
{
"id": 2,
"code_window": [
"\t\t} else {\n",
"\t\t\tif err := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName); err != nil {\n",
"\t\t\t\t_ = gui.surfaceError(err)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n",
"\t\t\t_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})\n",
"\t\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\terr := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName, gui.promptUserForCredential)\n",
"\t\t\tgui.handleCredentialsPopup(err)\n"
],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 404
} | // Copyright 2015 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"strconv"
"strings"
"unicode"
)
const minReaderBufferSize = 4096
var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
type parserOptions struct {
IgnoreContinuation bool
IgnoreInlineComment bool
AllowPythonMultilineValues bool
SpaceBeforeInlineComment bool
UnescapeValueDoubleQuotes bool
UnescapeValueCommentSymbols bool
PreserveSurroundedQuote bool
DebugFunc DebugFunc
ReaderBufferSize int
}
type parser struct {
buf *bufio.Reader
options parserOptions
isEOF bool
count int
comment *bytes.Buffer
}
func (p *parser) debug(format string, args ...interface{}) {
if p.options.DebugFunc != nil {
p.options.DebugFunc(fmt.Sprintf(format, args...))
}
}
func newParser(r io.Reader, opts parserOptions) *parser {
size := opts.ReaderBufferSize
if size < minReaderBufferSize {
size = minReaderBufferSize
}
return &parser{
buf: bufio.NewReaderSize(r, size),
options: opts,
count: 1,
comment: &bytes.Buffer{},
}
}
// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
func (p *parser) BOM() error {
mask, err := p.buf.Peek(2)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 2 {
return nil
}
switch {
case mask[0] == 254 && mask[1] == 255:
fallthrough
case mask[0] == 255 && mask[1] == 254:
p.buf.Read(mask)
case mask[0] == 239 && mask[1] == 187:
mask, err := p.buf.Peek(3)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 3 {
return nil
}
if mask[2] == 191 {
p.buf.Read(mask)
}
}
return nil
}
func (p *parser) readUntil(delim byte) ([]byte, error) {
data, err := p.buf.ReadBytes(delim)
if err != nil {
if err == io.EOF {
p.isEOF = true
} else {
return nil, err
}
}
return data, nil
}
func cleanComment(in []byte) ([]byte, bool) {
i := bytes.IndexAny(in, "#;")
if i == -1 {
return nil, false
}
return in[i:], true
}
func readKeyName(delimiters string, in []byte) (string, int, error) {
line := string(in)
// Check if key name surrounded by quotes.
var keyQuote string
if line[0] == '"' {
if len(line) > 6 && string(line[0:3]) == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
}
} else if line[0] == '`' {
keyQuote = "`"
}
// Get out key name
endIdx := -1
if len(keyQuote) > 0 {
startIdx := len(keyQuote)
// FIXME: fail case -> """"""name"""=value
pos := strings.Index(line[startIdx:], keyQuote)
if pos == -1 {
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
}
pos += startIdx
// Find key-value delimiter
i := strings.IndexAny(line[pos+startIdx:], delimiters)
if i < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
endIdx = pos + i
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
}
endIdx = strings.IndexAny(line, delimiters)
if endIdx < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
}
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := string(data)
pos := strings.LastIndex(next, valQuote)
if pos > -1 {
val += next[:pos]
comment, has := cleanComment([]byte(next[pos:]))
if has {
p.comment.Write(bytes.TrimSpace(comment))
}
break
}
val += next
if p.isEOF {
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
}
}
return val, nil
}
func (p *parser) readContinuationLines(val string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := strings.TrimSpace(string(data))
if len(next) == 0 {
break
}
val += next
if val[len(val)-1] != '\\' {
break
}
val = val[:len(val)-1]
}
return val, nil
}
// hasSurroundedQuote check if and only if the first and last characters
// are quotes \" or \'.
// It returns false if any other parts also contain same kind of quotes.
func hasSurroundedQuote(in string, quote byte) bool {
return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
return p.readPythonMultilines(line, bufferSize)
}
return "", nil
}
var valQuote string
if len(line) > 3 && string(line[0:3]) == `"""` {
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
} else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
valQuote = `"`
}
if len(valQuote) > 0 {
startIdx := len(valQuote)
pos := strings.LastIndex(line[startIdx:], valQuote)
// Check for multi-line value
if pos == -1 {
return p.readMultilines(line, line[startIdx:], valQuote)
}
if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
}
return line[startIdx : pos+startIdx], nil
}
lastChar := line[len(line)-1]
// Won't be able to reach here if value only contains whitespace
line = strings.TrimSpace(line)
trimmedLastChar := line[len(line)-1]
// Check continuation lines when desired
if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
// Check if ignore inline comment
if !p.options.IgnoreInlineComment {
var i int
if p.options.SpaceBeforeInlineComment {
i = strings.Index(line, " #")
if i == -1 {
i = strings.Index(line, " ;")
}
} else {
i = strings.IndexAny(line, "#;")
}
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
}
// Trim single and double quotes
if (hasSurroundedQuote(line, '\'') ||
hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
line = line[1 : len(line)-1]
} else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
if strings.Contains(line, `\;`) {
line = strings.Replace(line, `\;`, ";", -1)
}
if strings.Contains(line, `\#`) {
line = strings.Replace(line, `\#`, "#", -1)
}
} else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
return p.readPythonMultilines(line, bufferSize)
}
return line, nil
}
func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
indentSize := 0
for {
peekData, peekErr := peekBuffer.ReadBytes('\n')
if peekErr != nil {
if peekErr == io.EOF {
p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line)
return line, nil
}
p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
return "", peekErr
}
p.debug("readPythonMultilines: parsing %q", string(peekData))
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
for n, v := range peekMatches {
p.debug(" %d: %q", n, v)
}
// Return if not a Python multiline value.
if len(peekMatches) != 3 {
p.debug("readPythonMultilines: end of value, got: %q", line)
return line, nil
}
// Determine indent size and line prefix.
currentIndentSize := len(peekMatches[1])
if indentSize < 1 {
indentSize = currentIndentSize
p.debug("readPythonMultilines: indent size is %d", indentSize)
}
// Make sure each line is indented at least as far as first line.
if currentIndentSize < indentSize {
p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line)
return line, nil
}
// Advance the parser reader (buffer) in-sync with the peek buffer.
_, err := p.buf.Discard(len(peekData))
if err != nil {
p.debug("readPythonMultilines: failed to skip to the end, returning error")
return "", err
}
// Handle indented empty line.
line += "\n" + peekMatches[1][indentSize:] + peekMatches[2]
}
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) (err error) {
p := newParser(reader, parserOptions{
IgnoreContinuation: f.options.IgnoreContinuation,
IgnoreInlineComment: f.options.IgnoreInlineComment,
AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
DebugFunc: f.options.DebugFunc,
ReaderBufferSize: f.options.ReaderBufferSize,
})
if err = p.BOM(); err != nil {
return fmt.Errorf("BOM: %v", err)
}
// Ignore error because default section name is never empty string.
name := DefaultSection
if f.options.Insensitive {
name = strings.ToLower(DefaultSection)
}
section, _ := f.NewSection(name)
// This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
var isLastValueEmpty bool
var lastRegularKey *Key
var line []byte
var inUnparseableSection bool
// NOTE: Iterate and increase `currentPeekSize` until
// the size of the parser buffer is found.
// TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
parserBufferSize := 0
// NOTE: Peek 4kb at a time.
currentPeekSize := minReaderBufferSize
if f.options.AllowPythonMultilineValues {
for {
peekBytes, _ := p.buf.Peek(currentPeekSize)
peekBytesLength := len(peekBytes)
if parserBufferSize >= peekBytesLength {
break
}
currentPeekSize *= 2
parserBufferSize = peekBytesLength
}
}
for !p.isEOF {
line, err = p.readUntil('\n')
if err != nil {
return err
}
if f.options.AllowNestedValues &&
isLastValueEmpty && len(line) > 0 {
if line[0] == ' ' || line[0] == '\t' {
lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
continue
}
}
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
if len(line) == 0 {
continue
}
// Comments
if line[0] == '#' || line[0] == ';' {
// Note: we do not care ending line break,
// it is needed for adding second line,
// so just clean it once at the end when set to value.
p.comment.Write(line)
continue
}
// Section
if line[0] == '[' {
// Read to the next ']' (TODO: support quoted strings)
closeIdx := bytes.LastIndexByte(line, ']')
if closeIdx == -1 {
return fmt.Errorf("unclosed section: %s", line)
}
name := string(line[1:closeIdx])
section, err = f.NewSection(name)
if err != nil {
return err
}
comment, has := cleanComment(line[closeIdx+1:])
if has {
p.comment.Write(comment)
}
section.Comment = strings.TrimSpace(p.comment.String())
// Reset aotu-counter and comments
p.comment.Reset()
p.count = 1
inUnparseableSection = false
for i := range f.options.UnparseableSections {
if f.options.UnparseableSections[i] == name ||
(f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
inUnparseableSection = true
continue
}
}
continue
}
if inUnparseableSection {
section.isRawSection = true
section.rawBody += string(line)
continue
}
kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) {
switch {
case f.options.AllowBooleanKeys:
kname, err := p.readValue(line, parserBufferSize)
if err != nil {
return err
}
key, err := section.NewBooleanKey(kname)
if err != nil {
return err
}
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
continue
case f.options.SkipUnrecognizableLines:
continue
}
}
return err
}
// Auto increment.
isAutoIncr := false
if kname == "-" {
isAutoIncr = true
kname = "#" + strconv.Itoa(p.count)
p.count++
}
value, err := p.readValue(line[offset:], parserBufferSize)
if err != nil {
return err
}
isLastValueEmpty = len(value) == 0
key, err := section.NewKey(kname, value)
if err != nil {
return err
}
key.isAutoIncrement = isAutoIncr
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
lastRegularKey = key
}
return nil
}
| vendor/gopkg.in/ini.v1/parser.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.002328883158043027,
0.00025929693947546184,
0.00015825834998395294,
0.00017226740601472557,
0.00038384433719329536
] |
{
"id": 3,
"code_window": [
"\t\t\t_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})\n",
"\t\t}\n",
"\n",
"\t\t_ = gui.closeConfirmationPrompt(gui.g, true)\n",
"\t}()\n",
"\treturn nil\n",
"}\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 410
} | package gui
import (
"fmt"
"strings"
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/commands"
"github.com/jesseduffield/lazygit/pkg/gui/presentation"
"github.com/jesseduffield/lazygit/pkg/utils"
)
// list panel functions
func (gui *Gui) getSelectedBranch() *commands.Branch {
selectedLine := gui.State.Panels.Branches.SelectedLine
if selectedLine == -1 {
return nil
}
return gui.State.Branches[selectedLine]
}
// may want to standardise how these select methods work
func (gui *Gui) handleBranchSelect(g *gocui.Gui, v *gocui.View) error {
if gui.popupPanelFocused() {
return nil
}
gui.State.SplitMainPanel = false
if _, err := gui.g.SetCurrentView(v.Name()); err != nil {
return err
}
gui.getMainView().Title = "Log"
// This really shouldn't happen: there should always be a master branch
if len(gui.State.Branches) == 0 {
return gui.newStringTask("main", gui.Tr.SLocalize("NoBranchesThisRepo"))
}
branch := gui.getSelectedBranch()
v.FocusPoint(0, gui.State.Panels.Branches.SelectedLine)
if gui.inDiffMode() {
return gui.renderDiff()
}
cmd := gui.OSCommand.ExecutableFromString(
gui.GitCommand.GetBranchGraphCmdStr(branch.Name),
)
if err := gui.newCmdTask("main", cmd); err != nil {
gui.Log.Error(err)
}
return nil
}
// gui.refreshStatus is called at the end of this because that's when we can
// be sure there is a state.Branches array to pick the current branch from
func (gui *Gui) refreshBranches() {
reflogCommits := gui.State.FilteredReflogCommits
if gui.inFilterMode() {
// in filter mode we filter our reflog commits to just those containing the path
// however we need all the reflog entries to populate the recencies of our branches
// which allows us to order them correctly. So if we're filtering we'll just
// manually load all the reflog commits here
var err error
reflogCommits, _, err = gui.GitCommand.GetReflogCommits(nil, "")
if err != nil {
gui.Log.Error(err)
}
}
builder, err := commands.NewBranchListBuilder(gui.Log, gui.GitCommand, reflogCommits)
if err != nil {
_ = gui.surfaceError(err)
}
gui.State.Branches = builder.Build()
// TODO: if we're in the remotes view and we've just deleted a remote we need to refresh accordingly
if gui.getBranchesView().Context == "local-branches" {
_ = gui.renderLocalBranchesWithSelection()
}
gui.refreshStatus()
}
func (gui *Gui) renderLocalBranchesWithSelection() error {
branchesView := gui.getBranchesView()
gui.refreshSelectedLine(&gui.State.Panels.Branches.SelectedLine, len(gui.State.Branches))
displayStrings := presentation.GetBranchListDisplayStrings(gui.State.Branches, gui.State.ScreenMode != SCREEN_NORMAL, gui.State.Diff.Ref)
gui.renderDisplayStrings(branchesView, displayStrings)
if gui.g.CurrentView() == branchesView {
if err := gui.handleBranchSelect(gui.g, branchesView); err != nil {
return gui.surfaceError(err)
}
}
return nil
}
// specific functions
func (gui *Gui) handleBranchPress(g *gocui.Gui, v *gocui.View) error {
if gui.State.Panels.Branches.SelectedLine == -1 {
return nil
}
if gui.State.Panels.Branches.SelectedLine == 0 {
return gui.createErrorPanel(gui.Tr.SLocalize("AlreadyCheckedOutBranch"))
}
branch := gui.getSelectedBranch()
return gui.handleCheckoutRef(branch.Name, handleCheckoutRefOptions{})
}
func (gui *Gui) handleCreatePullRequestPress(g *gocui.Gui, v *gocui.View) error {
pullRequest := commands.NewPullRequest(gui.GitCommand)
branch := gui.getSelectedBranch()
if err := pullRequest.Create(branch); err != nil {
return gui.surfaceError(err)
}
return nil
}
func (gui *Gui) handleGitFetch(g *gocui.Gui, v *gocui.View) error {
if err := gui.createLoaderPanel(gui.g, v, gui.Tr.SLocalize("FetchWait")); err != nil {
return err
}
go func() {
err := gui.fetch(true)
gui.handleCredentialsPopup(err)
_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}()
return nil
}
func (gui *Gui) handleForceCheckout(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
message := gui.Tr.SLocalize("SureForceCheckout")
title := gui.Tr.SLocalize("ForceCheckoutBranch")
return gui.createConfirmationPanel(g, v, true, title, message, func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.Checkout(branch.Name, commands.CheckoutOptions{Force: true}); err != nil {
_ = gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}, nil)
}
type handleCheckoutRefOptions struct {
WaitingStatus string
EnvVars []string
onRefNotFound func(ref string) error
}
func (gui *Gui) handleCheckoutRef(ref string, options handleCheckoutRefOptions) error {
waitingStatus := options.WaitingStatus
if waitingStatus == "" {
waitingStatus = gui.Tr.SLocalize("CheckingOutStatus")
}
cmdOptions := commands.CheckoutOptions{Force: false, EnvVars: options.EnvVars}
onSuccess := func() {
gui.State.Panels.Branches.SelectedLine = 0
gui.State.Panels.Commits.SelectedLine = 0
// loading a heap of commits is slow so we limit them whenever doing a reset
gui.State.Panels.Commits.LimitCommits = true
}
return gui.WithWaitingStatus(waitingStatus, func() error {
if err := gui.GitCommand.Checkout(ref, cmdOptions); err != nil {
// note, this will only work for english-language git commands. If we force git to use english, and the error isn't this one, then the user will receive an english command they may not understand. I'm not sure what the best solution to this is. Running the command once in english and a second time in the native language is one option
if options.onRefNotFound != nil && strings.Contains(err.Error(), "did not match any file(s) known to git") {
return options.onRefNotFound(ref)
}
if strings.Contains(err.Error(), "Please commit your changes or stash them before you switch branch") {
// offer to autostash changes
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("AutoStashTitle"), gui.Tr.SLocalize("AutoStashPrompt"), func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.StashSave(gui.Tr.SLocalize("StashPrefix") + ref); err != nil {
return gui.surfaceError(err)
}
if err := gui.GitCommand.Checkout(ref, cmdOptions); err != nil {
return gui.surfaceError(err)
}
onSuccess()
if err := gui.GitCommand.StashDo(0, "pop"); err != nil {
if err := gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI}); err != nil {
return err
}
return gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI})
}, nil)
}
if err := gui.surfaceError(err); err != nil {
return err
}
}
onSuccess()
return gui.refreshSidePanels(refreshOptions{mode: BLOCK_UI})
})
}
func (gui *Gui) handleCheckoutByName(g *gocui.Gui, v *gocui.View) error {
return gui.createPromptPanel(g, v, gui.Tr.SLocalize("BranchName")+":", "", func(g *gocui.Gui, v *gocui.View) error {
return gui.handleCheckoutRef(gui.trimmedContent(v), handleCheckoutRefOptions{
onRefNotFound: func(ref string) error {
return gui.createConfirmationPanel(gui.g, v, true, gui.Tr.SLocalize("BranchNotFoundTitle"), fmt.Sprintf("%s %s%s", gui.Tr.SLocalize("BranchNotFoundPrompt"), ref, "?"), func(_g *gocui.Gui, _v *gocui.View) error {
return gui.createNewBranchWithName(ref)
}, nil)
},
})
})
}
func (gui *Gui) getCheckedOutBranch() *commands.Branch {
if len(gui.State.Branches) == 0 {
return nil
}
return gui.State.Branches[0]
}
func (gui *Gui) handleNewBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
message := gui.Tr.TemplateLocalize(
"NewBranchNameBranchOff",
Teml{
"branchName": branch.Name,
},
)
return gui.createPromptPanel(g, v, message, "", func(g *gocui.Gui, v *gocui.View) error {
return gui.createNewBranchWithName(gui.trimmedContent(v))
})
}
func (gui *Gui) createNewBranchWithName(newBranchName string) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
if err := gui.GitCommand.NewBranch(newBranchName, branch.Name); err != nil {
return gui.surfaceError(err)
}
gui.State.Panels.Branches.SelectedLine = 0
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
}
func (gui *Gui) handleDeleteBranch(g *gocui.Gui, v *gocui.View) error {
return gui.deleteBranch(g, v, false)
}
func (gui *Gui) deleteBranch(g *gocui.Gui, v *gocui.View, force bool) error {
selectedBranch := gui.getSelectedBranch()
if selectedBranch == nil {
return nil
}
checkedOutBranch := gui.getCheckedOutBranch()
if checkedOutBranch.Name == selectedBranch.Name {
return gui.createErrorPanel(gui.Tr.SLocalize("CantDeleteCheckOutBranch"))
}
return gui.deleteNamedBranch(g, v, selectedBranch, force)
}
func (gui *Gui) deleteNamedBranch(g *gocui.Gui, v *gocui.View, selectedBranch *commands.Branch, force bool) error {
title := gui.Tr.SLocalize("DeleteBranch")
var messageID string
if force {
messageID = "ForceDeleteBranchMessage"
} else {
messageID = "DeleteBranchMessage"
}
message := gui.Tr.TemplateLocalize(
messageID,
Teml{
"selectedBranchName": selectedBranch.Name,
},
)
return gui.createConfirmationPanel(g, v, true, title, message, func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.DeleteBranch(selectedBranch.Name, force); err != nil {
errMessage := err.Error()
if !force && strings.Contains(errMessage, "is not fully merged") {
return gui.deleteNamedBranch(g, v, selectedBranch, true)
}
return gui.createErrorPanel(errMessage)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})
}, nil)
}
func (gui *Gui) mergeBranchIntoCheckedOutBranch(branchName string) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
if gui.GitCommand.IsHeadDetached() {
return gui.createErrorPanel("Cannot merge branch in detached head state. You might have checked out a commit directly or a remote branch, in which case you should checkout the local branch you want to be on")
}
checkedOutBranchName := gui.getCheckedOutBranch().Name
if checkedOutBranchName == branchName {
return gui.createErrorPanel(gui.Tr.SLocalize("CantMergeBranchIntoItself"))
}
prompt := gui.Tr.TemplateLocalize(
"ConfirmMerge",
Teml{
"checkedOutBranch": checkedOutBranchName,
"selectedBranch": branchName,
},
)
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("MergingTitle"), prompt,
func(g *gocui.Gui, v *gocui.View) error {
err := gui.GitCommand.Merge(branchName, commands.MergeOpts{})
return gui.handleGenericMergeCommandResult(err)
}, nil)
}
func (gui *Gui) handleMerge(g *gocui.Gui, v *gocui.View) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
selectedBranchName := gui.getSelectedBranch().Name
return gui.mergeBranchIntoCheckedOutBranch(selectedBranchName)
}
func (gui *Gui) handleRebaseOntoLocalBranch(g *gocui.Gui, v *gocui.View) error {
selectedBranchName := gui.getSelectedBranch().Name
return gui.handleRebaseOntoBranch(selectedBranchName)
}
func (gui *Gui) handleRebaseOntoBranch(selectedBranchName string) error {
if ok, err := gui.validateNotInFilterMode(); err != nil || !ok {
return err
}
checkedOutBranch := gui.getCheckedOutBranch().Name
if selectedBranchName == checkedOutBranch {
return gui.createErrorPanel(gui.Tr.SLocalize("CantRebaseOntoSelf"))
}
prompt := gui.Tr.TemplateLocalize(
"ConfirmRebase",
Teml{
"checkedOutBranch": checkedOutBranch,
"selectedBranch": selectedBranchName,
},
)
return gui.createConfirmationPanel(gui.g, gui.getBranchesView(), true, gui.Tr.SLocalize("RebasingTitle"), prompt,
func(g *gocui.Gui, v *gocui.View) error {
err := gui.GitCommand.RebaseBranch(selectedBranchName)
return gui.handleGenericMergeCommandResult(err)
}, nil)
}
func (gui *Gui) handleFastForward(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
if branch.Pushables == "" {
return nil
}
if branch.Pushables == "?" {
return gui.createErrorPanel(gui.Tr.SLocalize("FwdNoUpstream"))
}
if branch.Pushables != "0" {
return gui.createErrorPanel(gui.Tr.SLocalize("FwdCommitsToPush"))
}
upstream, err := gui.GitCommand.GetUpstreamForBranch(branch.Name)
if err != nil {
return gui.surfaceError(err)
}
split := strings.Split(upstream, "/")
remoteName := split[0]
remoteBranchName := strings.Join(split[1:], "/")
message := gui.Tr.TemplateLocalize(
"Fetching",
Teml{
"from": fmt.Sprintf("%s/%s", remoteName, remoteBranchName),
"to": branch.Name,
},
)
go func() {
_ = gui.createLoaderPanel(gui.g, v, message)
if gui.State.Panels.Branches.SelectedLine == 0 {
_ = gui.pullWithMode("ff-only", PullFilesOptions{})
return
} else {
if err := gui.GitCommand.FastForward(branch.Name, remoteName, remoteBranchName); err != nil {
_ = gui.surfaceError(err)
return
}
_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})
}
_ = gui.closeConfirmationPrompt(gui.g, true)
}()
return nil
}
func (gui *Gui) onBranchesTabClick(tabIndex int) error {
contexts := []string{"local-branches", "remotes", "tags"}
branchesView := gui.getBranchesView()
branchesView.TabIndex = tabIndex
return gui.switchBranchesPanelContext(contexts[tabIndex])
}
func (gui *Gui) switchBranchesPanelContext(context string) error {
branchesView := gui.getBranchesView()
branchesView.Context = context
if err := gui.onSearchEscape(); err != nil {
return err
}
contextTabIndexMap := map[string]int{
"local-branches": 0,
"remotes": 1,
"remote-branches": 1,
"tags": 2,
}
branchesView.TabIndex = contextTabIndexMap[context]
return gui.refreshBranchesViewWithSelection()
}
func (gui *Gui) refreshBranchesViewWithSelection() error {
branchesView := gui.getBranchesView()
switch branchesView.Context {
case "local-branches":
return gui.renderLocalBranchesWithSelection()
case "remotes":
return gui.renderRemotesWithSelection()
case "remote-branches":
return gui.renderRemoteBranchesWithSelection()
case "tags":
return gui.renderTagsWithSelection()
}
return nil
}
func (gui *Gui) handleNextBranchesTab(g *gocui.Gui, v *gocui.View) error {
return gui.onBranchesTabClick(
utils.ModuloWithWrap(v.TabIndex+1, len(v.Tabs)),
)
}
func (gui *Gui) handlePrevBranchesTab(g *gocui.Gui, v *gocui.View) error {
return gui.onBranchesTabClick(
utils.ModuloWithWrap(v.TabIndex-1, len(v.Tabs)),
)
}
func (gui *Gui) handleCreateResetToBranchMenu(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
return gui.createResetMenu(branch.Name)
}
func (gui *Gui) onBranchesPanelSearchSelect(selectedLine int) error {
branchesView := gui.getBranchesView()
switch branchesView.Context {
case "local-branches":
gui.State.Panels.Branches.SelectedLine = selectedLine
return gui.handleBranchSelect(gui.g, branchesView)
case "remotes":
gui.State.Panels.Remotes.SelectedLine = selectedLine
return gui.handleRemoteSelect(gui.g, branchesView)
case "remote-branches":
gui.State.Panels.RemoteBranches.SelectedLine = selectedLine
return gui.handleRemoteBranchSelect(gui.g, branchesView)
}
return nil
}
func (gui *Gui) handleRenameBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
// TODO: find a way to not checkout the branch here if it's not the current branch (i.e. find some
// way to get it to show up in the reflog)
promptForNewName := func() error {
return gui.createPromptPanel(g, v, gui.Tr.SLocalize("NewBranchNamePrompt")+" "+branch.Name+":", "", func(g *gocui.Gui, v *gocui.View) error {
newName := gui.trimmedContent(v)
if err := gui.GitCommand.RenameBranch(branch.Name, newName); err != nil {
return gui.surfaceError(err)
}
// need to checkout so that the branch shows up in our reflog and therefore
// doesn't get lost among all the other branches when we switch to something else
if err := gui.GitCommand.Checkout(newName, commands.CheckoutOptions{Force: false}); err != nil {
return gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC})
})
}
// I could do an explicit check here for whether the branch is tracking a remote branch
// but if we've selected it we'll already know that via Pullables and Pullables.
// Bit of a hack but I'm lazy.
notTrackingRemote := branch.Pullables == "?"
if notTrackingRemote {
return promptForNewName()
}
return gui.createConfirmationPanel(gui.g, v, true, gui.Tr.SLocalize("renameBranch"), gui.Tr.SLocalize("RenameBranchWarning"), func(_g *gocui.Gui, _v *gocui.View) error {
return promptForNewName()
}, nil)
}
func (gui *Gui) currentBranch() *commands.Branch {
if len(gui.State.Branches) == 0 {
return nil
}
return gui.State.Branches[0]
}
func (gui *Gui) handleClipboardCopyBranch(g *gocui.Gui, v *gocui.View) error {
branch := gui.getSelectedBranch()
if branch == nil {
return nil
}
return gui.OSCommand.CopyToClipboard(branch.Name)
}
| pkg/gui/branches_panel.go | 1 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.9975887537002563,
0.12615418434143066,
0.00016529082495253533,
0.001097103115171194,
0.3239666521549225
] |
{
"id": 3,
"code_window": [
"\t\t\t_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})\n",
"\t\t}\n",
"\n",
"\t\t_ = gui.closeConfirmationPrompt(gui.g, true)\n",
"\t}()\n",
"\treturn nil\n",
"}\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 410
} | package gui
import (
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/commands"
"github.com/jesseduffield/lazygit/pkg/gui/presentation"
)
// list panel functions
func (gui *Gui) getSelectedTag() *commands.Tag {
selectedLine := gui.State.Panels.Tags.SelectedLine
if selectedLine == -1 || len(gui.State.Tags) == 0 {
return nil
}
return gui.State.Tags[selectedLine]
}
func (gui *Gui) handleTagSelect(g *gocui.Gui, v *gocui.View) error {
if gui.popupPanelFocused() {
return nil
}
gui.State.SplitMainPanel = false
if _, err := gui.g.SetCurrentView(v.Name()); err != nil {
return err
}
gui.getMainView().Title = "Tag"
tag := gui.getSelectedTag()
if tag == nil {
return gui.newStringTask("main", "No tags")
}
v.FocusPoint(0, gui.State.Panels.Tags.SelectedLine)
if gui.inDiffMode() {
return gui.renderDiff()
}
cmd := gui.OSCommand.ExecutableFromString(
gui.GitCommand.GetBranchGraphCmdStr(tag.Name),
)
if err := gui.newCmdTask("main", cmd); err != nil {
gui.Log.Error(err)
}
return nil
}
func (gui *Gui) refreshTags() error {
tags, err := gui.GitCommand.GetTags()
if err != nil {
return gui.surfaceError(err)
}
gui.State.Tags = tags
if gui.getBranchesView().Context == "tags" {
return gui.renderTagsWithSelection()
}
return nil
}
func (gui *Gui) renderTagsWithSelection() error {
branchesView := gui.getBranchesView()
gui.refreshSelectedLine(&gui.State.Panels.Tags.SelectedLine, len(gui.State.Tags))
displayStrings := presentation.GetTagListDisplayStrings(gui.State.Tags, gui.State.Diff.Ref)
gui.renderDisplayStrings(branchesView, displayStrings)
if gui.g.CurrentView() == branchesView && branchesView.Context == "tags" {
if err := gui.handleTagSelect(gui.g, branchesView); err != nil {
return gui.surfaceError(err)
}
}
return nil
}
func (gui *Gui) handleCheckoutTag(g *gocui.Gui, v *gocui.View) error {
tag := gui.getSelectedTag()
if tag == nil {
return nil
}
if err := gui.handleCheckoutRef(tag.Name, handleCheckoutRefOptions{}); err != nil {
return err
}
return gui.switchBranchesPanelContext("local-branches")
}
func (gui *Gui) handleDeleteTag(g *gocui.Gui, v *gocui.View) error {
tag := gui.getSelectedTag()
if tag == nil {
return nil
}
prompt := gui.Tr.TemplateLocalize(
"DeleteTagPrompt",
Teml{
"tagName": tag.Name,
},
)
return gui.createConfirmationPanel(gui.g, v, true, gui.Tr.SLocalize("DeleteTagTitle"), prompt, func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.DeleteTag(tag.Name); err != nil {
return gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{COMMITS, TAGS}})
}, nil)
}
func (gui *Gui) handlePushTag(g *gocui.Gui, v *gocui.View) error {
tag := gui.getSelectedTag()
if tag == nil {
return nil
}
title := gui.Tr.TemplateLocalize(
"PushTagTitle",
Teml{
"tagName": tag.Name,
},
)
return gui.createPromptPanel(gui.g, v, title, "origin", func(g *gocui.Gui, v *gocui.View) error {
if err := gui.GitCommand.PushTag(v.Buffer(), tag.Name); err != nil {
return gui.surfaceError(err)
}
return nil
})
}
func (gui *Gui) handleCreateTag(g *gocui.Gui, v *gocui.View) error {
return gui.createPromptPanel(gui.g, v, gui.Tr.SLocalize("CreateTagTitle"), "", func(g *gocui.Gui, v *gocui.View) error {
// leaving commit SHA blank so that we're just creating the tag for the current commit
tagName := v.Buffer()
if err := gui.GitCommand.CreateLightweightTag(tagName, ""); err != nil {
return gui.surfaceError(err)
}
return gui.refreshSidePanels(refreshOptions{scope: []int{COMMITS, TAGS}, then: func() {
// find the index of the tag and set that as the currently selected line
for i, tag := range gui.State.Tags {
if tag.Name == tagName {
gui.State.Panels.Tags.SelectedLine = i
gui.renderTagsWithSelection()
return
}
}
},
})
})
}
func (gui *Gui) handleCreateResetToTagMenu(g *gocui.Gui, v *gocui.View) error {
tag := gui.getSelectedTag()
if tag == nil {
return nil
}
return gui.createResetMenu(tag.Name)
}
| pkg/gui/tags_panel.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.006434324197471142,
0.0013993419706821442,
0.00016569557192269713,
0.0006411783397197723,
0.0015829929616302252
] |
{
"id": 3,
"code_window": [
"\t\t\t_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})\n",
"\t\t}\n",
"\n",
"\t\t_ = gui.closeConfirmationPrompt(gui.g, true)\n",
"\t}()\n",
"\treturn nil\n",
"}\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 410
} | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
//
// Blowfish is a legacy cipher and its short block size makes it vulnerable to
// birthday bound attacks (see https://sweet32.info). It should only be used
// where compatibility with legacy systems, not security, is the goal.
//
// Deprecated: any new system should use AES (from crypto/aes, if necessary in
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
// golang.org/x/crypto/chacha20poly1305).
package blowfish // import "golang.org/x/crypto/blowfish"
// The code is a port of Bruce Schneier's C implementation.
// See https://www.schneier.com/blowfish.html.
import "strconv"
// The Blowfish block size in bytes.
const BlockSize = 8
// A Cipher is an instance of Blowfish encryption using a particular key.
type Cipher struct {
p [18]uint32
s0, s1, s2, s3 [256]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a Cipher.
// The key argument should be the Blowfish key, from 1 to 56 bytes.
func NewCipher(key []byte) (*Cipher, error) {
var result Cipher
if k := len(key); k < 1 || k > 56 {
return nil, KeySizeError(k)
}
initCipher(&result)
ExpandKey(key, &result)
return &result, nil
}
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
// bytes.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
if len(salt) == 0 {
return NewCipher(key)
}
var result Cipher
if k := len(key); k < 1 {
return nil, KeySizeError(k)
}
initCipher(&result)
expandKeyWithSalt(key, salt, &result)
return &result, nil
}
// BlockSize returns the Blowfish block size, 8 bytes.
// It is necessary to satisfy the Block interface in the
// package "crypto/cipher".
func (c *Cipher) BlockSize() int { return BlockSize }
// Encrypt encrypts the 8-byte buffer src using the key k
// and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = encryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
// Decrypt decrypts the 8-byte buffer src using the key k
// and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = decryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
func initCipher(c *Cipher) {
copy(c.p[0:], p[0:])
copy(c.s0[0:], s0[0:])
copy(c.s1[0:], s1[0:])
copy(c.s2[0:], s2[0:])
copy(c.s3[0:], s3[0:])
}
| vendor/golang.org/x/crypto/blowfish/cipher.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.00026724295457825065,
0.0001796771102817729,
0.00016350630903616548,
0.00016772575327195227,
0.000029923765396233648
] |
{
"id": 3,
"code_window": [
"\t\t\t_ = gui.refreshSidePanels(refreshOptions{mode: ASYNC, scope: []int{BRANCHES}})\n",
"\t\t}\n",
"\n",
"\t\t_ = gui.closeConfirmationPrompt(gui.g, true)\n",
"\t}()\n",
"\treturn nil\n",
"}\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/gui/branches_panel.go",
"type": "replace",
"edit_start_line_idx": 410
} | // Package idxfile implements encoding and decoding of packfile idx files.
//
// == Original (version 1) pack-*.idx files have the following format:
//
// - The header consists of 256 4-byte network byte order
// integers. N-th entry of this table records the number of
// objects in the corresponding pack, the first byte of whose
// object name is less than or equal to N. This is called the
// 'first-level fan-out' table.
//
// - The header is followed by sorted 24-byte entries, one entry
// per object in the pack. Each entry is:
//
// 4-byte network byte order integer, recording where the
// object is stored in the packfile as the offset from the
// beginning.
//
// 20-byte object name.
//
// - The file is concluded with a trailer:
//
// A copy of the 20-byte SHA1 checksum at the end of
// corresponding packfile.
//
// 20-byte SHA1-checksum of all of the above.
//
// Pack Idx file:
//
// -- +--------------------------------+
// fanout | fanout[0] = 2 (for example) |-.
// table +--------------------------------+ |
// | fanout[1] | |
// +--------------------------------+ |
// | fanout[2] | |
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
// | fanout[255] = total objects |---.
// -- +--------------------------------+ | |
// main | offset | | |
// index | object name 00XXXXXXXXXXXXXXXX | | |
// tab +--------------------------------+ | |
// | offset | | |
// | object name 00XXXXXXXXXXXXXXXX | | |
// +--------------------------------+<+ |
// .-| offset | |
// | | object name 01XXXXXXXXXXXXXXXX | |
// | +--------------------------------+ |
// | | offset | |
// | | object name 01XXXXXXXXXXXXXXXX | |
// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
// | | offset | |
// | | object name FFXXXXXXXXXXXXXXXX | |
// --| +--------------------------------+<--+
// trailer | | packfile checksum |
// | +--------------------------------+
// | | idxfile checksum |
// | +--------------------------------+
// .---------.
// |
// Pack file entry: <+
//
// packed object header:
// 1-byte size extension bit (MSB)
// type (next 3 bit)
// size0 (lower 4-bit)
// n-byte sizeN (as long as MSB is set, each 7-bit)
// size0..sizeN form 4+7+7+..+7 bit integer, size0
// is the least significant part, and sizeN is the
// most significant part.
// packed object data:
// If it is not DELTA, then deflated bytes (the size above
// is the size before compression).
// If it is REF_DELTA, then
// 20-byte base object name SHA1 (the size above is the
// size of the delta data that follows).
// delta data, deflated.
// If it is OFS_DELTA, then
// n-byte offset (see below) interpreted as a negative
// offset from the type-byte of the header of the
// ofs-delta entry (the size above is the size of
// the delta data that follows).
// delta data, deflated.
//
// offset encoding:
// n bytes with MSB set in all but the last one.
// The offset is then the number constructed by
// concatenating the lower 7 bit of each byte, and
// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
// to the result.
//
// == Version 2 pack-*.idx files support packs larger than 4 GiB, and
// have some other reorganizations. They have the format:
//
// - A 4-byte magic number '\377tOc' which is an unreasonable
// fanout[0] value.
//
// - A 4-byte version number (= 2)
//
// - A 256-entry fan-out table just like v1.
//
// - A table of sorted 20-byte SHA1 object names. These are
// packed together without offset values to reduce the cache
// footprint of the binary search for a specific object name.
//
// - A table of 4-byte CRC32 values of the packed object data.
// This is new in v2 so compressed data can be copied directly
// from pack to pack during repacking without undetected
// data corruption.
//
// - A table of 4-byte offset values (in network byte order).
// These are usually 31-bit pack file offsets, but large
// offsets are encoded as an index into the next table with
// the msbit set.
//
// - A table of 8-byte offset entries (empty for pack files less
// than 2 GiB). Pack files are organized with heavily used
// objects toward the front, so most object references should
// not need to refer to this table.
//
// - The same trailer as a v1 pack file:
//
// A copy of the 20-byte SHA1 checksum at the end of
// corresponding packfile.
//
// 20-byte SHA1-checksum of all of the above.
//
// Source:
// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt
package idxfile
| vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go | 0 | https://github.com/jesseduffield/lazygit/commit/2d18d089ce046e7f858b8d1e41469350f879d49c | [
0.00017635829863138497,
0.00016998214414343238,
0.00016283619333989918,
0.00016948314441833645,
0.000003686554464366054
] |
{
"id": 0,
"code_window": [
"\t\tinput := &logical.LogInput{\n",
"\t\t\tRequest: req,\n",
"\t\t}\n",
"\t\tcore.AuditLogger().AuditRequest(r.Context(), input)\n",
"\t\tcw := newCopyResponseWriter(w)\n",
"\t\th.ServeHTTP(cw, r)\n",
"\t\tdata := make(map[string]interface{})\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr = core.AuditLogger().AuditRequest(r.Context(), input)\n",
"\t\tif err != nil {\n",
"\t\t\trespondError(w, status, err)\n",
"\t\t\treturn\n",
"\t\t}\n"
],
"file_path": "http/handler.go",
"type": "replace",
"edit_start_line_idx": 237
} | package vault
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"strings"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/salt"
"github.com/hashicorp/vault/sdk/logical"
)
const (
// coreAuditConfigPath is used to store the audit configuration.
// Audit configuration is protected within the Vault itself, which means it
// can only be viewed or modified after an unseal.
coreAuditConfigPath = "core/audit"
// coreLocalAuditConfigPath is used to store audit information for local
// (non-replicated) mounts
coreLocalAuditConfigPath = "core/local-audit"
// auditBarrierPrefix is the prefix to the UUID used in the
// barrier view for the audit backends.
auditBarrierPrefix = "audit/"
// auditTableType is the value we expect to find for the audit table and
// corresponding entries
auditTableType = "audit"
)
var (
// loadAuditFailed if loading audit tables encounters an error
errLoadAuditFailed = errors.New("failed to setup audit table")
)
// enableAudit is used to enable a new audit backend
func (c *Core) enableAudit(ctx context.Context, entry *MountEntry, updateStorage bool) error {
// Ensure we end the path in a slash
if !strings.HasSuffix(entry.Path, "/") {
entry.Path += "/"
}
// Ensure there is a name
if entry.Path == "/" {
return fmt.Errorf("backend path must be specified")
}
// Update the audit table
c.auditLock.Lock()
defer c.auditLock.Unlock()
// Look for matching name
for _, ent := range c.audit.Entries {
switch {
// Existing is sql/mysql/ new is sql/ or
// existing is sql/ and new is sql/mysql/
case strings.HasPrefix(ent.Path, entry.Path):
fallthrough
case strings.HasPrefix(entry.Path, ent.Path):
return fmt.Errorf("path already in use")
}
}
// Generate a new UUID and view
if entry.UUID == "" {
entryUUID, err := uuid.GenerateUUID()
if err != nil {
return err
}
entry.UUID = entryUUID
}
if entry.Accessor == "" {
accessor, err := c.generateMountAccessor("audit_" + entry.Type)
if err != nil {
return err
}
entry.Accessor = accessor
}
viewPath := entry.ViewPath()
view := NewBarrierView(c.barrier, viewPath)
addAuditPathChecker(c, entry, view, viewPath)
origViewReadOnlyErr := view.getReadOnlyErr()
// Mark the view as read-only until the mounting is complete and
// ensure that it is reset after. This ensures that there will be no
// writes during the construction of the backend.
view.setReadOnlyErr(logical.ErrSetupReadOnly)
defer view.setReadOnlyErr(origViewReadOnlyErr)
// Lookup the new backend
backend, err := c.newAuditBackend(ctx, entry, view, entry.Options)
if err != nil {
return err
}
if backend == nil {
return fmt.Errorf("nil audit backend of type %q returned from factory", entry.Type)
}
newTable := c.audit.shallowClone()
newTable.Entries = append(newTable.Entries, entry)
ns, err := namespace.FromContext(ctx)
if err != nil {
return err
}
entry.NamespaceID = ns.ID
entry.namespace = ns
if updateStorage {
if err := c.persistAudit(ctx, newTable, entry.Local); err != nil {
return errors.New("failed to update audit table")
}
}
c.audit = newTable
// Register the backend
c.auditBroker.Register(entry.Path, backend, view, entry.Local)
if c.logger.IsInfo() {
c.logger.Info("enabled audit backend", "path", entry.Path, "type", entry.Type)
}
return nil
}
// disableAudit is used to disable an existing audit backend
func (c *Core) disableAudit(ctx context.Context, path string, updateStorage bool) (bool, error) {
// Ensure we end the path in a slash
if !strings.HasSuffix(path, "/") {
path += "/"
}
// Ensure there is a name
if path == "/" {
return false, fmt.Errorf("backend path must be specified")
}
// Remove the entry from the mount table
c.auditLock.Lock()
defer c.auditLock.Unlock()
newTable := c.audit.shallowClone()
entry, err := newTable.remove(ctx, path)
if err != nil {
return false, err
}
// Ensure there was a match
if entry == nil {
return false, fmt.Errorf("no matching backend")
}
c.removeAuditReloadFunc(entry)
// When unmounting all entries the JSON code will load back up from storage
// as a nil slice, which kills tests...just set it nil explicitly
if len(newTable.Entries) == 0 {
newTable.Entries = nil
}
if updateStorage {
// Update the audit table
if err := c.persistAudit(ctx, newTable, entry.Local); err != nil {
return true, errors.New("failed to update audit table")
}
}
c.audit = newTable
// Unmount the backend
c.auditBroker.Deregister(path)
if c.logger.IsInfo() {
c.logger.Info("disabled audit backend", "path", path)
}
removeAuditPathChecker(c, entry)
return true, nil
}
// loadAudits is invoked as part of postUnseal to load the audit table
func (c *Core) loadAudits(ctx context.Context) error {
auditTable := &MountTable{}
localAuditTable := &MountTable{}
// Load the existing audit table
raw, err := c.barrier.Get(ctx, coreAuditConfigPath)
if err != nil {
c.logger.Error("failed to read audit table", "error", err)
return errLoadAuditFailed
}
rawLocal, err := c.barrier.Get(ctx, coreLocalAuditConfigPath)
if err != nil {
c.logger.Error("failed to read local audit table", "error", err)
return errLoadAuditFailed
}
c.auditLock.Lock()
defer c.auditLock.Unlock()
if raw != nil {
if err := jsonutil.DecodeJSON(raw.Value, auditTable); err != nil {
c.logger.Error("failed to decode audit table", "error", err)
return errLoadAuditFailed
}
c.audit = auditTable
}
var needPersist bool
if c.audit == nil {
c.audit = defaultAuditTable()
needPersist = true
}
if rawLocal != nil {
if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil {
c.logger.Error("failed to decode local audit table", "error", err)
return errLoadAuditFailed
}
if localAuditTable != nil && len(localAuditTable.Entries) > 0 {
c.audit.Entries = append(c.audit.Entries, localAuditTable.Entries...)
}
}
// Upgrade to typed auth table
if c.audit.Type == "" {
c.audit.Type = auditTableType
needPersist = true
}
// Upgrade to table-scoped entries
for _, entry := range c.audit.Entries {
if entry.Table == "" {
entry.Table = c.audit.Type
needPersist = true
}
if entry.Accessor == "" {
accessor, err := c.generateMountAccessor("audit_" + entry.Type)
if err != nil {
return err
}
entry.Accessor = accessor
needPersist = true
}
if entry.NamespaceID == "" {
entry.NamespaceID = namespace.RootNamespaceID
needPersist = true
}
// Get the namespace from the namespace ID and load it in memory
ns, err := NamespaceByID(ctx, entry.NamespaceID, c)
if err != nil {
return err
}
if ns == nil {
return namespace.ErrNoNamespace
}
entry.namespace = ns
}
if !needPersist || c.perfStandby {
return nil
}
if err := c.persistAudit(ctx, c.audit, false); err != nil {
return errLoadAuditFailed
}
return nil
}
// persistAudit is used to persist the audit table after modification
func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bool) error {
if table.Type != auditTableType {
c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", auditTableType)
return fmt.Errorf("invalid table type given, not persisting")
}
for _, entry := range table.Entries {
if entry.Table != table.Type {
c.logger.Error("given entry to persist in audit table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
return fmt.Errorf("invalid audit entry found, not persisting")
}
}
nonLocalAudit := &MountTable{
Type: auditTableType,
}
localAudit := &MountTable{
Type: auditTableType,
}
for _, entry := range table.Entries {
if entry.Local {
localAudit.Entries = append(localAudit.Entries, entry)
} else {
nonLocalAudit.Entries = append(nonLocalAudit.Entries, entry)
}
}
if !localOnly {
// Marshal the table
compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAudit, nil)
if err != nil {
c.logger.Error("failed to encode and/or compress audit table", "error", err)
return err
}
// Create an entry
entry := &logical.StorageEntry{
Key: coreAuditConfigPath,
Value: compressedBytes,
}
// Write to the physical backend
if err := c.barrier.Put(ctx, entry); err != nil {
c.logger.Error("failed to persist audit table", "error", err)
return err
}
}
// Repeat with local audit
compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAudit, nil)
if err != nil {
c.logger.Error("failed to encode and/or compress local audit table", "error", err)
return err
}
entry := &logical.StorageEntry{
Key: coreLocalAuditConfigPath,
Value: compressedBytes,
}
if err := c.barrier.Put(ctx, entry); err != nil {
c.logger.Error("failed to persist local audit table", "error", err)
return err
}
return nil
}
// setupAudit is invoked after we've loaded the audit able to
// initialize the audit backends
func (c *Core) setupAudits(ctx context.Context) error {
brokerLogger := c.baseLogger.Named("audit")
c.AddLogger(brokerLogger)
broker := NewAuditBroker(brokerLogger)
c.auditLock.Lock()
defer c.auditLock.Unlock()
var successCount int
for _, entry := range c.audit.Entries {
// Create a barrier view using the UUID
viewPath := entry.ViewPath()
view := NewBarrierView(c.barrier, viewPath)
addAuditPathChecker(c, entry, view, viewPath)
origViewReadOnlyErr := view.getReadOnlyErr()
// Mark the view as read-only until the mounting is complete and
// ensure that it is reset after. This ensures that there will be no
// writes during the construction of the backend.
view.setReadOnlyErr(logical.ErrSetupReadOnly)
c.postUnsealFuncs = append(c.postUnsealFuncs, func() {
view.setReadOnlyErr(origViewReadOnlyErr)
})
// Initialize the backend
backend, err := c.newAuditBackend(ctx, entry, view, entry.Options)
if err != nil {
c.logger.Error("failed to create audit entry", "path", entry.Path, "error", err)
continue
}
if backend == nil {
c.logger.Error("created audit entry was nil", "path", entry.Path, "type", entry.Type)
continue
}
// Mount the backend
broker.Register(entry.Path, backend, view, entry.Local)
successCount++
}
if len(c.audit.Entries) > 0 && successCount == 0 {
return errLoadAuditFailed
}
c.auditBroker = broker
return nil
}
// teardownAudit is used before we seal the vault to reset the audit
// backends to their unloaded state. This is reversed by loadAudits.
func (c *Core) teardownAudits() error {
c.auditLock.Lock()
defer c.auditLock.Unlock()
if c.audit != nil {
for _, entry := range c.audit.Entries {
c.removeAuditReloadFunc(entry)
removeAuditPathChecker(c, entry)
}
}
c.audit = nil
c.auditBroker = nil
return nil
}
// removeAuditReloadFunc removes the reload func from the working set. The
// audit lock needs to be held before calling this.
func (c *Core) removeAuditReloadFunc(entry *MountEntry) {
switch entry.Type {
case "file":
key := "audit_file|" + entry.Path
c.reloadFuncsLock.Lock()
if c.logger.IsDebug() {
c.baseLogger.Named("audit").Debug("removing reload function", "path", entry.Path)
}
delete(c.reloadFuncs, key)
c.reloadFuncsLock.Unlock()
}
}
// newAuditBackend is used to create and configure a new audit backend by name
func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logical.Storage, conf map[string]string) (audit.Backend, error) {
f, ok := c.auditBackends[entry.Type]
if !ok {
return nil, fmt.Errorf("unknown backend type: %q", entry.Type)
}
saltConfig := &salt.Config{
HMAC: sha256.New,
HMACType: "hmac-sha256",
Location: salt.DefaultLocation,
}
be, err := f(ctx, &audit.BackendConfig{
SaltView: view,
SaltConfig: saltConfig,
Config: conf,
})
if err != nil {
return nil, err
}
if be == nil {
return nil, fmt.Errorf("nil backend returned from %q factory function", entry.Type)
}
auditLogger := c.baseLogger.Named("audit")
c.AddLogger(auditLogger)
switch entry.Type {
case "file":
key := "audit_file|" + entry.Path
c.reloadFuncsLock.Lock()
if auditLogger.IsDebug() {
auditLogger.Debug("adding reload function", "path", entry.Path)
if entry.Options != nil {
auditLogger.Debug("file backend options", "path", entry.Path, "file_path", entry.Options["file_path"])
}
}
c.reloadFuncs[key] = append(c.reloadFuncs[key], func() error {
if auditLogger.IsInfo() {
auditLogger.Info("reloading file audit backend", "path", entry.Path)
}
return be.Reload(ctx)
})
c.reloadFuncsLock.Unlock()
case "socket":
if auditLogger.IsDebug() {
if entry.Options != nil {
auditLogger.Debug("socket backend options", "path", entry.Path, "address", entry.Options["address"], "socket type", entry.Options["socket_type"])
}
}
case "syslog":
if auditLogger.IsDebug() {
if entry.Options != nil {
auditLogger.Debug("syslog backend options", "path", entry.Path, "facility", entry.Options["facility"], "tag", entry.Options["tag"])
}
}
}
return be, err
}
// defaultAuditTable creates a default audit table
func defaultAuditTable() *MountTable {
table := &MountTable{
Type: auditTableType,
}
return table
}
type AuditLogger interface {
AuditRequest(ctx context.Context, input *logical.LogInput) error
AuditResponse(ctx context.Context, input *logical.LogInput) error
}
type basicAuditor struct {
c *Core
}
func (b *basicAuditor) AuditRequest(ctx context.Context, input *logical.LogInput) error {
return b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)
}
func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {
return b.c.auditBroker.LogResponse(ctx, input, b.c.auditedHeaders)
}
type genericAuditor struct {
c *Core
mountType string
namespace *namespace.Namespace
}
func (g genericAuditor) AuditRequest(ctx context.Context, input *logical.LogInput) error {
ctx = namespace.ContextWithNamespace(ctx, g.namespace)
logInput := *input
logInput.Type = g.mountType + "-request"
return g.c.auditBroker.LogRequest(ctx, &logInput, g.c.auditedHeaders)
}
func (g genericAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {
ctx = namespace.ContextWithNamespace(ctx, g.namespace)
logInput := *input
logInput.Type = g.mountType + "-response"
return g.c.auditBroker.LogResponse(ctx, &logInput, g.c.auditedHeaders)
}
| vault/audit.go | 1 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.03470412641763687,
0.0011692323023453355,
0.00016353485989384353,
0.00018154163262806833,
0.004645257256925106
] |
{
"id": 0,
"code_window": [
"\t\tinput := &logical.LogInput{\n",
"\t\t\tRequest: req,\n",
"\t\t}\n",
"\t\tcore.AuditLogger().AuditRequest(r.Context(), input)\n",
"\t\tcw := newCopyResponseWriter(w)\n",
"\t\th.ServeHTTP(cw, r)\n",
"\t\tdata := make(map[string]interface{})\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr = core.AuditLogger().AuditRequest(r.Context(), input)\n",
"\t\tif err != nil {\n",
"\t\t\trespondError(w, status, err)\n",
"\t\t\treturn\n",
"\t\t}\n"
],
"file_path": "http/handler.go",
"type": "replace",
"edit_start_line_idx": 237
} | import Component from '@ember/component';
import { computed } from '@ember/object';
import hbs from 'htmlbars-inline-precompile';
/**
* @module DocLink
* `DocLink` components are used to render anchor links to relevant Vault documentation.
*
* @example
* ```js
<DocLink @path="/docs/secrets/kv/kv-v2.html">Learn about KV v2</DocLink>
* ```
*
* @param path="/"{String} - The path to documentation on vaultproject.io that the component should link to.
*
*/
export default Component.extend({
tagName: 'a',
classNames: ['doc-link'],
attributeBindings: ['target', 'rel', 'href'],
layout: hbs`{{yield}}`,
target: '_blank',
rel: 'noreferrer noopener',
host: 'https://www.vaultproject.io',
path: '/',
href: computed('host', 'path', function() {
return `${this.host}${this.path}`;
}),
});
| ui/lib/core/addon/components/doc-link.js | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.000175680237589404,
0.0001726204645819962,
0.00016831810353323817,
0.000173241802258417,
0.000002866205704776803
] |
{
"id": 0,
"code_window": [
"\t\tinput := &logical.LogInput{\n",
"\t\t\tRequest: req,\n",
"\t\t}\n",
"\t\tcore.AuditLogger().AuditRequest(r.Context(), input)\n",
"\t\tcw := newCopyResponseWriter(w)\n",
"\t\th.ServeHTTP(cw, r)\n",
"\t\tdata := make(map[string]interface{})\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr = core.AuditLogger().AuditRequest(r.Context(), input)\n",
"\t\tif err != nil {\n",
"\t\t\trespondError(w, status, err)\n",
"\t\t\treturn\n",
"\t\t}\n"
],
"file_path": "http/handler.go",
"type": "replace",
"edit_start_line_idx": 237
} | package plugin
import (
"context"
"fmt"
"strings"
"time"
"github.com/go-errors/errors"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
const (
credPrefix = "creds/"
storageKey = "creds"
// Since password TTL can be set to as low as 1 second,
// we can't cache passwords for an entire second.
credCacheCleanup = time.Second / 3
credCacheExpiration = time.Second / 2
)
// deleteCred fulfills the DeleteWatcher interface in roles.
// It allows the roleHandler to let us know when a role's been deleted so we can delete its associated creds too.
func (b *backend) deleteCred(ctx context.Context, storage logical.Storage, roleName string) error {
if err := storage.Delete(ctx, storageKey+"/"+roleName); err != nil {
return err
}
b.credCache.Delete(roleName)
return nil
}
func (b *backend) invalidateCred(ctx context.Context, key string) {
if strings.HasPrefix(key, credPrefix) {
roleName := key[len(credPrefix):]
b.credCache.Delete(roleName)
}
}
func (b *backend) pathCreds() *framework.Path {
return &framework.Path{
Pattern: credPrefix + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": {
Type: framework.TypeString,
Description: "Name of the role",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.credReadOperation,
},
HelpSynopsis: credHelpSynopsis,
HelpDescription: credHelpDescription,
}
}
func (b *backend) credReadOperation(ctx context.Context, req *logical.Request, fieldData *framework.FieldData) (*logical.Response, error) {
cred := make(map[string]interface{})
engineConf, err := readConfig(ctx, req.Storage)
if err != nil {
return nil, err
}
if engineConf == nil {
return nil, errors.New("the config is currently unset")
}
roleName := fieldData.Get("name").(string)
// We act upon quite a few things below that could be racy if not locked:
// - Roles. If a new cred is created, the role is updated to include the new LastVaultRotation time,
// effecting role storage (and the role cache, but that's already thread-safe).
// - Creds. New creds involve writing to cred storage and the cred cache (also already thread-safe).
// Rather than setting read locks of different types, and upgrading them to write locks, let's keep complexity
// low and use one simple mutex.
b.credLock.Lock()
defer b.credLock.Unlock()
role, err := b.readRole(ctx, req.Storage, roleName)
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
b.Logger().Debug(fmt.Sprintf("role is: %+v", role))
var resp *logical.Response
var respErr error
var unset time.Time
switch {
case role.LastVaultRotation == unset:
b.Logger().Info("rotating password for the first time so Vault will know it")
resp, respErr = b.generateAndReturnCreds(ctx, engineConf, req.Storage, roleName, role, cred)
case role.PasswordLastSet.After(role.LastVaultRotation.Add(time.Second * time.Duration(engineConf.LastRotationTolerance))):
b.Logger().Warn(fmt.Sprintf(
"Vault rotated the password at %s, but it was rotated in AD later at %s, so rotating it again so Vault will know it",
role.LastVaultRotation.String(), role.PasswordLastSet.String()),
)
resp, respErr = b.generateAndReturnCreds(ctx, engineConf, req.Storage, roleName, role, cred)
default:
b.Logger().Debug("determining whether to rotate credential")
credIfc, found := b.credCache.Get(roleName)
if found {
b.Logger().Debug("checking cached credential")
cred = credIfc.(map[string]interface{})
} else {
b.Logger().Debug("checking stored credential")
entry, err := req.Storage.Get(ctx, storageKey+"/"+roleName)
if err != nil {
return nil, err
}
if entry == nil {
// If the creds aren't in storage, but roles are and we've created creds before,
// this is an unexpected state and something has gone wrong.
// Let's be explicit and error about this.
return nil, fmt.Errorf("should have the creds for %+v but they're not found", role)
}
if err := entry.DecodeJSON(&cred); err != nil {
return nil, err
}
b.credCache.SetDefault(roleName, cred)
}
now := time.Now().UTC()
shouldBeRolled := role.LastVaultRotation.Add(time.Duration(role.TTL) * time.Second) // already in UTC
if now.After(shouldBeRolled) {
b.Logger().Info(fmt.Sprintf(
"last Vault rotation was at %s, and since the TTL is %d and it's now %s, it's time to rotate it",
role.LastVaultRotation.String(), role.TTL, now.String()),
)
resp, respErr = b.generateAndReturnCreds(ctx, engineConf, req.Storage, roleName, role, cred)
} else {
b.Logger().Debug("returning previous credential")
resp = &logical.Response{
Data: cred,
}
}
}
if respErr != nil {
return nil, respErr
}
return resp, nil
}
func (b *backend) generateAndReturnCreds(ctx context.Context, engineConf *configuration, storage logical.Storage, roleName string, role *backendRole, previousCred map[string]interface{}) (*logical.Response, error) {
newPassword, err := GeneratePassword(ctx, engineConf.PasswordConf, b.System())
if err != nil {
return nil, err
}
if err := b.client.UpdatePassword(engineConf.ADConf, role.ServiceAccountName, newPassword); err != nil {
return nil, err
}
// Time recorded is in UTC for easier user comparison to AD's last rotated time, which is set to UTC by Microsoft.
role.LastVaultRotation = time.Now().UTC()
if err := b.writeRoleToStorage(ctx, storage, roleName, role); err != nil {
return nil, err
}
// Cache the full role to minimize Vault storage calls.
b.roleCache.SetDefault(roleName, role)
// Although a service account name is typically [email protected],
// the username it uses is just my_app, or everything before the @.
var username string
fields := strings.Split(role.ServiceAccountName, "@")
if len(fields) > 0 {
username = fields[0]
} else {
return nil, fmt.Errorf("unable to infer username from service account name: %s", role.ServiceAccountName)
}
cred := map[string]interface{}{
"username": username,
"current_password": newPassword,
}
if previousCred["current_password"] != nil {
cred["last_password"] = previousCred["current_password"]
}
// Cache and save the cred.
entry, err := logical.StorageEntryJSON(storageKey+"/"+roleName, cred)
if err != nil {
return nil, err
}
if err := storage.Put(ctx, entry); err != nil {
return nil, err
}
b.credCache.SetDefault(roleName, cred)
return &logical.Response{
Data: cred,
}, nil
}
const (
credHelpSynopsis = `
Retrieve a role's creds by role name.
`
credHelpDescription = `
Read creds using a role's name to view the login, current password, and last password.
`
)
| vendor/github.com/hashicorp/vault-plugin-secrets-ad/plugin/path_creds.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0175255686044693,
0.0012958229053765535,
0.00016422083717770875,
0.00017204711912199855,
0.003740101819857955
] |
{
"id": 0,
"code_window": [
"\t\tinput := &logical.LogInput{\n",
"\t\t\tRequest: req,\n",
"\t\t}\n",
"\t\tcore.AuditLogger().AuditRequest(r.Context(), input)\n",
"\t\tcw := newCopyResponseWriter(w)\n",
"\t\th.ServeHTTP(cw, r)\n",
"\t\tdata := make(map[string]interface{})\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr = core.AuditLogger().AuditRequest(r.Context(), input)\n",
"\t\tif err != nil {\n",
"\t\t\trespondError(w, status, err)\n",
"\t\t\treturn\n",
"\t\t}\n"
],
"file_path": "http/handler.go",
"type": "replace",
"edit_start_line_idx": 237
} | // Package jose implements some helper functions and types for the children
// packages, jws, jwt, and jwe.
package jose
| vendor/github.com/briankassouf/jose/doc.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.00017156466492451727,
0.00017156466492451727,
0.00017156466492451727,
0.00017156466492451727,
0
] |
{
"id": 1,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\t// best effort, ignore\n",
"\t\t}\n",
"\t\thttpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}\n",
"\t\tinput.Response = logical.HTTPResponseToLogicalResponse(httpResp)\n",
"\t\tcore.AuditLogger().AuditResponse(r.Context(), input)\n",
"\t\treturn\n",
"\t})\n",
"}\n",
"\n",
"// wrapGenericHandler wraps the handler with an extra layer of handler where\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr = core.AuditLogger().AuditResponse(r.Context(), input)\n",
"\t\tif err != nil {\n",
"\t\t\trespondError(w, status, err)\n",
"\t\t}\n"
],
"file_path": "http/handler.go",
"type": "replace",
"edit_start_line_idx": 247
} | package http
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net"
"net/http"
"net/textproto"
"net/url"
"os"
"strings"
"time"
"github.com/NYTimes/gziphandler"
assetfs "github.com/elazarl/go-bindata-assetfs"
"github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/helper/pathmanager"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
const (
// WrapTTLHeaderName is the name of the header containing a directive to
// wrap the response
WrapTTLHeaderName = "X-Vault-Wrap-TTL"
// WrapFormatHeaderName is the name of the header containing the format to
// wrap in; has no effect if the wrap TTL is not set
WrapFormatHeaderName = "X-Vault-Wrap-Format"
// NoRequestForwardingHeaderName is the name of the header telling Vault
// not to use request forwarding
NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding"
// MFAHeaderName represents the HTTP header which carries the credentials
// required to perform MFA on any path.
MFAHeaderName = "X-Vault-MFA"
// canonicalMFAHeaderName is the MFA header value's format in the request
// headers. Do not alter the casing of this string.
canonicalMFAHeaderName = "X-Vault-Mfa"
// PolicyOverrideHeaderName is the header set to request overriding
// soft-mandatory Sentinel policies.
PolicyOverrideHeaderName = "X-Vault-Policy-Override"
// DefaultMaxRequestSize is the default maximum accepted request size. This
// is to prevent a denial of service attack where no Content-Length is
// provided and the server is fed ever more data until it exhausts memory.
// Can be overridden per listener.
DefaultMaxRequestSize = 32 * 1024 * 1024
)
var (
// Set to false by stub_asset if the ui build tag isn't enabled
uiBuiltIn = true
// perfStandbyAlwaysForwardPaths is used to check a requested path against
// the always forward list
perfStandbyAlwaysForwardPaths = pathmanager.New()
alwaysRedirectPaths = pathmanager.New()
injectDataIntoTopRoutes = []string{
"/v1/sys/audit",
"/v1/sys/audit/",
"/v1/sys/audit-hash/",
"/v1/sys/auth",
"/v1/sys/auth/",
"/v1/sys/config/cors",
"/v1/sys/config/auditing/request-headers/",
"/v1/sys/config/auditing/request-headers",
"/v1/sys/capabilities",
"/v1/sys/capabilities-accessor",
"/v1/sys/capabilities-self",
"/v1/sys/key-status",
"/v1/sys/mounts",
"/v1/sys/mounts/",
"/v1/sys/policy",
"/v1/sys/policy/",
"/v1/sys/rekey/backup",
"/v1/sys/rekey/recovery-key-backup",
"/v1/sys/remount",
"/v1/sys/rotate",
"/v1/sys/wrapping/wrap",
}
)
func init() {
alwaysRedirectPaths.AddPaths([]string{
"sys/storage/raft/snapshot",
"sys/storage/raft/snapshot-force",
})
}
// Handler returns an http.Handler for the API. This can be used on
// its own to mount the Vault API within another web server.
func Handler(props *vault.HandlerProperties) http.Handler {
core := props.Core
// Create the muxer to handle the actual endpoints
mux := http.NewServeMux()
switch {
case props.RecoveryMode:
raw := vault.NewRawBackend(core)
strategy := vault.GenerateRecoveryTokenStrategy(props.RecoveryToken)
mux.Handle("/v1/sys/raw/", handleLogicalRecovery(raw, props.RecoveryToken))
mux.Handle("/v1/sys/generate-recovery-token/attempt", handleSysGenerateRootAttempt(core, strategy))
mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy))
default:
// Handle non-forwarded paths
mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core))
mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/init", handleSysInit(core))
mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core))
mux.Handle("/v1/sys/seal", handleSysSeal(core))
mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core)))
mux.Handle("/v1/sys/unseal", handleSysUnseal(core))
mux.Handle("/v1/sys/leader", handleSysLeader(core))
mux.Handle("/v1/sys/health", handleSysHealth(core))
mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core))
mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootUpdate(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false)))
mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false)))
mux.Handle("/v1/sys/rekey/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, false)))
mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true)))
mux.Handle("/v1/sys/storage/raft/bootstrap", handleSysRaftBootstrap(core))
mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core))
for _, path := range injectDataIntoTopRoutes {
mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core)))
}
mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core)))
mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core)))
if core.UIEnabled() == true {
if uiBuiltIn {
mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))))
mux.Handle("/robots.txt", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))
} else {
mux.Handle("/ui/", handleUIHeaders(core, handleUIStub()))
}
mux.Handle("/ui", handleUIRedirect())
mux.Handle("/", handleUIRedirect())
}
// Register metrics path without authentication if enabled
if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess {
mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core))
} else {
mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core))
}
additionalRoutes(mux, core)
}
// Wrap the handler in another handler to trigger all help paths.
helpWrappedHandler := wrapHelpHandler(mux, core)
corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core)
quotaWrappedHandler := rateLimitQuotaWrapping(corsWrappedHandler, core)
genericWrappedHandler := genericWrapping(core, quotaWrappedHandler, props)
// Wrap the handler with PrintablePathCheckHandler to check for non-printable
// characters in the request path.
printablePathCheckHandler := genericWrappedHandler
if !props.DisablePrintableCheck {
printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(genericWrappedHandler, nil)
}
return printablePathCheckHandler
}
type copyResponseWriter struct {
wrapped http.ResponseWriter
statusCode int
body *bytes.Buffer
}
// newCopyResponseWriter returns an initialized newCopyResponseWriter
func newCopyResponseWriter(wrapped http.ResponseWriter) *copyResponseWriter {
w := ©ResponseWriter{
wrapped: wrapped,
body: new(bytes.Buffer),
statusCode: 200,
}
return w
}
func (w *copyResponseWriter) Header() http.Header {
return w.wrapped.Header()
}
func (w *copyResponseWriter) Write(buf []byte) (int, error) {
w.body.Write(buf)
return w.wrapped.Write(buf)
}
func (w *copyResponseWriter) WriteHeader(code int) {
w.statusCode = code
w.wrapped.WriteHeader(code)
}
func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
origBody := new(bytes.Buffer)
reader := ioutil.NopCloser(io.TeeReader(r.Body, origBody))
r.Body = reader
req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r)
if err != nil || status != 0 {
respondError(w, status, err)
return
}
if origBody != nil {
r.Body = ioutil.NopCloser(origBody)
}
input := &logical.LogInput{
Request: req,
}
core.AuditLogger().AuditRequest(r.Context(), input)
cw := newCopyResponseWriter(w)
h.ServeHTTP(cw, r)
data := make(map[string]interface{})
err = jsonutil.DecodeJSON(cw.body.Bytes(), &data)
if err != nil {
// best effort, ignore
}
httpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}
input.Response = logical.HTTPResponseToLogicalResponse(httpResp)
core.AuditLogger().AuditResponse(r.Context(), input)
return
})
}
// wrapGenericHandler wraps the handler with an extra layer of handler where
// tasks that should be commonly handled for all the requests and/or responses
// are performed.
func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler {
var maxRequestDuration time.Duration
var maxRequestSize int64
if props.ListenerConfig != nil {
maxRequestDuration = props.ListenerConfig.MaxRequestDuration
maxRequestSize = props.ListenerConfig.MaxRequestSize
}
if maxRequestDuration == 0 {
maxRequestDuration = vault.DefaultMaxRequestDuration
}
if maxRequestSize == 0 {
maxRequestSize = DefaultMaxRequestSize
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Set the Cache-Control header for all the responses returned
// by Vault
w.Header().Set("Cache-Control", "no-store")
// Start with the request context
ctx := r.Context()
var cancelFunc context.CancelFunc
// Add our timeout, but not for the monitor endpoint, as it's streaming
if strings.HasSuffix(r.URL.Path, "sys/monitor") {
ctx, cancelFunc = context.WithCancel(ctx)
} else {
ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration)
}
// Add a size limiter if desired
if maxRequestSize > 0 {
ctx = context.WithValue(ctx, "max_request_size", maxRequestSize)
}
ctx = context.WithValue(ctx, "original_request_path", r.URL.Path)
r = r.WithContext(ctx)
r = r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace))
switch {
case strings.HasPrefix(r.URL.Path, "/v1/"):
newR, status := adjustRequest(core, r)
if status != 0 {
respondError(w, status, nil)
cancelFunc()
return
}
r = newR
case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/":
default:
respondError(w, http.StatusNotFound, nil)
cancelFunc()
return
}
h.ServeHTTP(w, r)
cancelFunc()
return
})
}
func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler {
rejectNotPresent := l.XForwardedForRejectNotPresent
hopSkips := l.XForwardedForHopSkips
authorizedAddrs := l.XForwardedForAuthorizedAddrs
rejectNotAuthz := l.XForwardedForRejectNotAuthorized
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")]
if !headersOK || len(headers) == 0 {
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("missing x-forwarded-for header and configured to reject when not present"))
return
}
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
// If not rejecting treat it like we just don't have a valid
// header because we can't do a comparison against an address we
// can't understand
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client hostport: {{err}}", err))
return
}
addr, err := sockaddr.NewIPAddr(host)
if err != nil {
// We treat this the same as the case above
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client address: {{err}}", err))
return
}
var found bool
for _, authz := range authorizedAddrs {
if authz.Contains(addr) {
found = true
break
}
}
if !found {
// If we didn't find it and aren't configured to reject, simply
// don't trust it
if !rejectNotAuthz {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("client address not authorized for x-forwarded-for and configured to reject connection"))
return
}
// At this point we have at least one value and it's authorized
// Split comma separated ones, which are common. This brings it in line
// to the multiple-header case.
var acc []string
for _, header := range headers {
vals := strings.Split(header, ",")
for _, v := range vals {
acc = append(acc, strings.TrimSpace(v))
}
}
indexToUse := int64(len(acc)) - 1 - hopSkips
if indexToUse < 0 {
// This is likely an error in either configuration or other
// infrastructure. We could either deny the request, or we
// could simply not trust the value. Denying the request is
// "safer" since if this logic is configured at all there may
// be an assumption it can always be trusted. Given that we can
// deny accepting the request at all if it's not from an
// authorized address, if we're at this point the address is
// authorized (or we've turned off explicit rejection) and we
// should assume that what comes in should be properly
// formatted.
respondError(w, http.StatusBadRequest, fmt.Errorf("malformed x-forwarded-for configuration or request, hops to skip (%d) would skip before earliest chain link (chain length %d)", hopSkips, len(headers)))
return
}
r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port)
h.ServeHTTP(w, r)
return
})
}
// A lookup on a token that is about to expire returns nil, which means by the
// time we can validate a wrapping token lookup will return nil since it will
// be revoked after the call. So we have to do the validation here.
func wrappingVerificationFunc(ctx context.Context, core *vault.Core, req *logical.Request) error {
if req == nil {
return fmt.Errorf("invalid request")
}
valid, err := core.ValidateWrappingToken(ctx, req)
if err != nil {
return errwrap.Wrapf("error validating wrapping token: {{err}}", err)
}
if !valid {
return consts.ErrInvalidWrappingToken
}
return nil
}
// stripPrefix is a helper to strip a prefix from the path. It will
// return false from the second return value if it the prefix doesn't exist.
func stripPrefix(prefix, path string) (string, bool) {
if !strings.HasPrefix(path, prefix) {
return "", false
}
path = path[len(prefix):]
if path == "" {
return "", false
}
return path, true
}
func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
header := w.Header()
userHeaders, err := core.UIHeaders()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
if userHeaders != nil {
for k := range userHeaders {
v := userHeaders.Get(k)
header.Set(k, v)
}
}
h.ServeHTTP(w, req)
})
}
func handleUI(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
// The fileserver handler strips trailing slashes and does a redirect.
// We don't want the redirect to happen so we preemptively trim the slash
// here.
req.URL.Path = strings.TrimSuffix(req.URL.Path, "/")
h.ServeHTTP(w, req)
return
})
}
func handleUIStub() http.Handler {
stubHTML := `
<!DOCTYPE html>
<html>
<style>
body {
color: #1F2124;
font-family: system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
}
.wrapper {
display: flex;
justify-content: center;
align-items: center;
height: 500px;
}
.content ul {
line-height: 1.5;
}
a {
color: #1563ff;
text-decoration: none;
}
.header {
display: flex;
color: #6a7786;
align-items: center;
}
.header svg {
padding-right: 12px;
}
.alert {
transform: scale(0.07);
fill: #6a7786;
}
h1 {
font-weight: 500;
}
p {
margin-top: 0px;
}
</style>
<div class="wrapper">
<div class="content">
<div class="header">
<svg width="36px" height="36px" viewBox="0 0 36 36" xmlns="http://www.w3.org/2000/svg">
<path class="alert" d="M476.7 422.2L270.1 72.7c-2.9-5-8.3-8.7-14.1-8.7-5.9 0-11.3 3.7-14.1 8.7L35.3 422.2c-2.8 5-4.8 13-1.9 17.9 2.9 4.9 8.2 7.9 14 7.9h417.1c5.8 0 11.1-3 14-7.9 3-4.9 1-13-1.8-17.9zM288 400h-64v-48h64v48zm0-80h-64V176h64v144z"/>
</svg>
<h1>Vault UI is not available in this binary.</h1>
</div>
<p>To get Vault UI do one of the following:</p>
<ul>
<li><a href="https://www.vaultproject.io/downloads.html">Download an official release</a></li>
<li>Run <code>make bin</code> to create your own release binaries.
<li>Run <code>make dev-ui</code> to create a development binary with the UI.
</ul>
</div>
</div>
</html>
`
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(stubHTML))
})
}
func handleUIRedirect() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, "/ui/", 307)
return
})
}
type UIAssetWrapper struct {
FileSystem *assetfs.AssetFS
}
func (fs *UIAssetWrapper) Open(name string) (http.File, error) {
file, err := fs.FileSystem.Open(name)
if err == nil {
return file, nil
}
// serve index.html instead of 404ing
if err == os.ErrNotExist {
return fs.FileSystem.Open("index.html")
}
return nil, err
}
func parseQuery(values url.Values) map[string]interface{} {
data := map[string]interface{}{}
for k, v := range values {
// Skip the help key as this is a reserved parameter
if k == "help" {
continue
}
switch {
case len(v) == 0:
case len(v) == 1:
data[k] = v[0]
default:
data[k] = v
}
}
if len(data) > 0 {
return data
}
return nil
}
func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, out interface{}) (io.ReadCloser, error) {
// Limit the maximum number of bytes to MaxRequestSize to protect
// against an indefinite amount of data being read.
reader := r.Body
ctx := r.Context()
maxRequestSize := ctx.Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
reader = http.MaxBytesReader(w, r.Body, max)
}
}
var origBody io.ReadWriter
if perfStandby {
// Since we're checking PerfStandby here we key on origBody being nil
// or not later, so we need to always allocate so it's non-nil
origBody = new(bytes.Buffer)
reader = ioutil.NopCloser(io.TeeReader(reader, origBody))
}
err := jsonutil.DecodeJSONFromReader(reader, out)
if err != nil && err != io.EOF {
return nil, errwrap.Wrapf("failed to parse JSON input: {{err}}", err)
}
if origBody != nil {
return ioutil.NopCloser(origBody), err
}
return nil, err
}
// parseFormRequest parses values from a form POST.
//
// A nil map will be returned if the format is empty or invalid.
func parseFormRequest(r *http.Request) (map[string]interface{}, error) {
maxRequestSize := r.Context().Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
r.Body = ioutil.NopCloser(io.LimitReader(r.Body, max))
}
}
if err := r.ParseForm(); err != nil {
return nil, err
}
var data map[string]interface{}
if len(r.PostForm) != 0 {
data = make(map[string]interface{}, len(r.PostForm))
for k, v := range r.PostForm {
switch len(v) {
case 0:
case 1:
data[k] = v[0]
default:
// Almost anywhere taking in a string list can take in comma
// separated values, and really this is super niche anyways
data[k] = strings.Join(v, ",")
}
}
}
return data, nil
}
// handleRequestForwarding determines whether to forward a request or not,
// falling back on the older behavior of redirecting the client
func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If we are a performance standby we can handle the request.
if core.PerfStandby() {
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
switch {
case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path):
handler.ServeHTTP(w, r)
return
case strings.HasPrefix(path, "auth/token/create/"):
isBatch, err := core.IsBatchTokenCreationRequest(r.Context(), path)
if err == nil && isBatch {
handler.ServeHTTP(w, r)
return
}
}
}
// Note: in an HA setup, this call will also ensure that connections to
// the leader are set up, as that happens once the advertised cluster
// values are read during this function
isLeader, leaderAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve request normally
handler.ServeHTTP(w, r)
return
}
// Some internal error occurred
respondError(w, http.StatusInternalServerError, err)
return
}
if isLeader {
// No forwarding needed, we're leader
handler.ServeHTTP(w, r)
return
}
if leaderAddr == "" {
respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found"))
return
}
forwardRequest(core, w, r)
return
})
}
func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) {
if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
respondStandby(core, w, r.URL)
return
}
if r.Header.Get(NoRequestForwardingHeaderName) != "" {
// Forwarding explicitly disabled, fall back to previous behavior
core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request")
respondStandby(core, w, r.URL)
return
}
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
if alwaysRedirectPaths.HasPath(path) {
respondStandby(core, w, r.URL)
return
}
// Attempt forwarding the request. If we cannot forward -- perhaps it's
// been disabled on the active node -- this will return with an
// ErrCannotForward and we simply fall back
statusCode, header, retBytes, err := core.ForwardRequest(r)
if err != nil {
if err == vault.ErrCannotForward {
core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back")
} else {
core.Logger().Error("forward request error", "error", err)
}
// Fall back to redirection
respondStandby(core, w, r.URL)
return
}
if header != nil {
for k, v := range header {
w.Header()[k] = v
}
}
w.WriteHeader(statusCode)
w.Write(retBytes)
}
// request is a helper to perform a request and properly exit in the
// case of an error.
func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) {
resp, err := core.HandleRequest(rawReq.Context(), r)
if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) {
if resp == nil {
resp = &logical.Response{}
}
resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.")
}
if errwrap.Contains(err, consts.ErrStandby.Error()) {
respondStandby(core, w, rawReq.URL)
return resp, false, false
}
if err != nil && errwrap.Contains(err, logical.ErrPerfStandbyPleaseForward.Error()) {
return nil, false, true
}
if resp != nil && len(resp.Headers) > 0 {
// Set this here so it will take effect regardless of any other type of
// response processing
header := w.Header()
for k, v := range resp.Headers {
for _, h := range v {
header.Add(k, h)
}
}
switch {
case resp.Secret != nil,
resp.Auth != nil,
len(resp.Data) > 0,
resp.Redirect != "",
len(resp.Warnings) > 0,
resp.WrapInfo != nil:
// Nothing, resp has data
default:
// We have an otherwise totally empty response except for headers,
// so nil out the response now that the headers are written out
resp = nil
}
}
// If vault's core has already written to the response writer do not add any
// additional output. Headers have already been sent. If the response writer
// is set but has not been written to it likely means there was some kind of
// error
if r.ResponseWriter != nil && r.ResponseWriter.Written() {
return nil, true, false
}
if respondErrorCommon(w, r, resp, err) {
return resp, false, false
}
return resp, true, false
}
// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby
func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
// Request the leader address
_, redirectAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve 503
err = errors.New("node is not active")
respondError(w, http.StatusServiceUnavailable, err)
return
}
respondError(w, http.StatusInternalServerError, err)
return
}
// If there is no leader, generate a 503 error
if redirectAddr == "" {
err = errors.New("no active Vault instance found")
respondError(w, http.StatusServiceUnavailable, err)
return
}
// Parse the redirect location
redirectURL, err := url.Parse(redirectAddr)
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
// Generate a redirect URL
finalURL := url.URL{
Scheme: redirectURL.Scheme,
Host: redirectURL.Host,
Path: reqURL.Path,
RawQuery: reqURL.RawQuery,
}
// Ensure there is a scheme, default to https
if finalURL.Scheme == "" {
finalURL.Scheme = "https"
}
// If we have an address, redirect! We use a 307 code
// because we don't actually know if its permanent and
// the request method should be preserved.
w.Header().Set("Location", finalURL.String())
w.WriteHeader(307)
}
// getTokenFromReq parse headers of the incoming request to extract token if
// present it accepts Authorization Bearer (RFC6750) and X-Vault-Token header.
// Returns true if the token was sourced from a Bearer header.
func getTokenFromReq(r *http.Request) (string, bool) {
if token := r.Header.Get(consts.AuthHeaderName); token != "" {
return token, false
}
if headers, ok := r.Header["Authorization"]; ok {
// Reference for Authorization header format: https://tools.ietf.org/html/rfc7236#section-3
// If string does not start by 'Bearer ', it is not one we would use,
// but might be used by plugins
for _, v := range headers {
if !strings.HasPrefix(v, "Bearer ") {
continue
}
return strings.TrimSpace(v[7:]), true
}
}
return "", false
}
// requestAuth adds the token to the logical.Request if it exists.
func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) (*logical.Request, error) {
// Attach the header value if we have it
token, fromAuthzHeader := getTokenFromReq(r)
if token != "" {
req.ClientToken = token
req.ClientTokenSource = logical.ClientTokenFromVaultHeader
if fromAuthzHeader {
req.ClientTokenSource = logical.ClientTokenFromAuthzHeader
}
// Also attach the accessor if we have it. This doesn't fail if it
// doesn't exist because the request may be to an unauthenticated
// endpoint/login endpoint where a bad current token doesn't matter, or
// a token from a Vault version pre-accessors. We ignore errors for
// JWTs.
te, err := core.LookupToken(r.Context(), token)
if err != nil {
dotCount := strings.Count(token, ".")
// If we have two dots but the second char is a dot it's a vault
// token of the form s.SOMETHING.nsid, not a JWT
if dotCount != 2 ||
dotCount == 2 && token[1] == '.' {
return req, err
}
}
if err == nil && te != nil {
req.ClientTokenAccessor = te.Accessor
req.ClientTokenRemainingUses = te.NumUses
req.SetTokenEntry(te)
}
}
return req, nil
}
func requestPolicyOverride(r *http.Request, req *logical.Request) error {
raw := r.Header.Get(PolicyOverrideHeaderName)
if raw == "" {
return nil
}
override, err := parseutil.ParseBool(raw)
if err != nil {
return err
}
req.PolicyOverride = override
return nil
}
// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists
func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) {
// First try for the header value
wrapTTL := r.Header.Get(WrapTTLHeaderName)
if wrapTTL == "" {
return req, nil
}
// If it has an allowed suffix parse as a duration string
dur, err := parseutil.ParseDurationSecond(wrapTTL)
if err != nil {
return req, err
}
if int64(dur) < 0 {
return req, fmt.Errorf("requested wrap ttl cannot be negative")
}
req.WrapInfo = &logical.RequestWrapInfo{
TTL: dur,
}
wrapFormat := r.Header.Get(WrapFormatHeaderName)
switch wrapFormat {
case "jwt":
req.WrapInfo.Format = "jwt"
}
return req, nil
}
// parseMFAHeader parses the MFAHeaderName in the request headers and organizes
// them with MFA method name as the index.
func parseMFAHeader(req *logical.Request) error {
if req == nil {
return fmt.Errorf("request is nil")
}
if req.Headers == nil {
return nil
}
// Reset and initialize the credentials in the request
req.MFACreds = make(map[string][]string)
for _, mfaHeaderValue := range req.Headers[canonicalMFAHeaderName] {
// Skip the header with no value in it
if mfaHeaderValue == "" {
continue
}
// Handle the case where only method name is mentioned and no value
// is supplied
if !strings.Contains(mfaHeaderValue, ":") {
// Mark the presence of method name, but set an empty set to it
// indicating that there were no values supplied for the method
if req.MFACreds[mfaHeaderValue] == nil {
req.MFACreds[mfaHeaderValue] = []string{}
}
continue
}
shardSplits := strings.SplitN(mfaHeaderValue, ":", 2)
if shardSplits[0] == "" {
return fmt.Errorf("invalid data in header %q; missing method name", MFAHeaderName)
}
if shardSplits[1] == "" {
return fmt.Errorf("invalid data in header %q; missing method value", MFAHeaderName)
}
req.MFACreds[shardSplits[0]] = append(req.MFACreds[shardSplits[0]], shardSplits[1])
}
return nil
}
// isForm tries to determine whether the request should be
// processed as a form or as JSON.
//
// Virtually all existing use cases have assumed processing as JSON,
// and there has not been a Content-Type requirement in the API. In order to
// maintain backwards compatibility, this will err on the side of JSON.
// The request will be considered a form only if:
//
// 1. The content type is "application/x-www-form-urlencoded"
// 2. The start of the request doesn't look like JSON. For this test we
// we expect the body to begin with { or [, ignoring leading whitespace.
func isForm(head []byte, contentType string) bool {
contentType, _, err := mime.ParseMediaType(contentType)
if err != nil || contentType != "application/x-www-form-urlencoded" {
return false
}
// Look for the start of JSON or not-JSON, skipping any insignificant
// whitespace (per https://tools.ietf.org/html/rfc7159#section-2).
for _, c := range head {
switch c {
case ' ', '\t', '\n', '\r':
continue
case '[', '{': // JSON
return false
default: // not JSON
return true
}
}
return true
}
func respondError(w http.ResponseWriter, status int, err error) {
logical.RespondError(w, status, err)
}
func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool {
statusCode, newErr := logical.RespondErrorCommon(req, resp, err)
if newErr == nil && statusCode == 0 {
return false
}
respondError(w, statusCode, newErr)
return true
}
func respondOk(w http.ResponseWriter, body interface{}) {
w.Header().Set("Content-Type", "application/json")
if body == nil {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
enc.Encode(body)
}
}
| http/handler.go | 1 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.998231828212738,
0.010104850865900517,
0.00015994074055925012,
0.00019112108566332608,
0.09509864449501038
] |
{
"id": 1,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\t// best effort, ignore\n",
"\t\t}\n",
"\t\thttpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}\n",
"\t\tinput.Response = logical.HTTPResponseToLogicalResponse(httpResp)\n",
"\t\tcore.AuditLogger().AuditResponse(r.Context(), input)\n",
"\t\treturn\n",
"\t})\n",
"}\n",
"\n",
"// wrapGenericHandler wraps the handler with an extra layer of handler where\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr = core.AuditLogger().AuditResponse(r.Context(), input)\n",
"\t\tif err != nil {\n",
"\t\t\trespondError(w, status, err)\n",
"\t\t}\n"
],
"file_path": "http/handler.go",
"type": "replace",
"edit_start_line_idx": 247
} | package client // import "github.com/docker/docker/client"
import "context"
// SecretRemove removes a Secret.
func (cli *Client) SecretRemove(ctx context.Context, id string) error {
if err := cli.NewVersionError("1.25", "secret remove"); err != nil {
return err
}
resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
defer ensureReaderClosed(resp)
return wrapResponseError(err, resp, "secret", id)
}
| vendor/github.com/docker/docker/client/secret_remove.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0001665976451477036,
0.00016544447862543166,
0.00016429131210315973,
0.00016544447862543166,
0.0000011531665222719312
] |
{
"id": 1,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\t// best effort, ignore\n",
"\t\t}\n",
"\t\thttpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}\n",
"\t\tinput.Response = logical.HTTPResponseToLogicalResponse(httpResp)\n",
"\t\tcore.AuditLogger().AuditResponse(r.Context(), input)\n",
"\t\treturn\n",
"\t})\n",
"}\n",
"\n",
"// wrapGenericHandler wraps the handler with an extra layer of handler where\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr = core.AuditLogger().AuditResponse(r.Context(), input)\n",
"\t\tif err != nil {\n",
"\t\t\trespondError(w, status, err)\n",
"\t\t}\n"
],
"file_path": "http/handler.go",
"type": "replace",
"edit_start_line_idx": 247
} | // Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Metric Cluster API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/metric_cluster
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// MetricQuery object
type MetricQuery struct {
Query string `json:"query"`
Type string `json:"type"`
}
// MetricCluster defines a metric cluster. See https://login.circonus.com/resources/api/calls/metric_cluster for more information.
type MetricCluster struct {
CID string `json:"_cid,omitempty"` // string
Description string `json:"description"` // string
MatchingMetrics []string `json:"_matching_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set)
MatchingUUIDMetrics map[string][]string `json:"_matching_uuid_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set)
Name string `json:"name"` // string
Queries []MetricQuery `json:"queries"` // [] len >= 1
Tags []string `json:"tags"` // [] len >= 0
}
// NewMetricCluster returns a new MetricCluster (with defaults, if applicable)
func NewMetricCluster() *MetricCluster {
return &MetricCluster{}
}
// FetchMetricCluster retrieves metric cluster with passed cid.
func (a *API) FetchMetricCluster(cid CIDType, extras string) (*MetricCluster, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid metric cluster CID [none]")
}
clusterCID := string(*cid)
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
}
reqURL := url.URL{
Path: clusterCID,
}
extra := ""
switch extras {
case "metrics":
extra = "_matching_metrics"
case "uuids":
extra = "_matching_uuid_metrics"
}
if extra != "" {
q := url.Values{}
q.Set("extra", extra)
reqURL.RawQuery = q.Encode()
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch metric cluster, received JSON: %s", string(result))
}
cluster := &MetricCluster{}
if err := json.Unmarshal(result, cluster); err != nil {
return nil, err
}
return cluster, nil
}
// FetchMetricClusters retrieves all metric clusters available to API Token.
func (a *API) FetchMetricClusters(extras string) (*[]MetricCluster, error) {
reqURL := url.URL{
Path: config.MetricClusterPrefix,
}
extra := ""
switch extras {
case "metrics":
extra = "_matching_metrics"
case "uuids":
extra = "_matching_uuid_metrics"
}
if extra != "" {
q := url.Values{}
q.Set("extra", extra)
reqURL.RawQuery = q.Encode()
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, err
}
var clusters []MetricCluster
if err := json.Unmarshal(result, &clusters); err != nil {
return nil, err
}
return &clusters, nil
}
// UpdateMetricCluster updates passed metric cluster.
func (a *API) UpdateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid metric cluster config [nil]")
}
clusterCID := string(cfg.CID)
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update metric cluster, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(clusterCID, jsonCfg)
if err != nil {
return nil, err
}
cluster := &MetricCluster{}
if err := json.Unmarshal(result, cluster); err != nil {
return nil, err
}
return cluster, nil
}
// CreateMetricCluster creates a new metric cluster.
func (a *API) CreateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid metric cluster config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create metric cluster, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.MetricClusterPrefix, jsonCfg)
if err != nil {
return nil, err
}
cluster := &MetricCluster{}
if err := json.Unmarshal(result, cluster); err != nil {
return nil, err
}
return cluster, nil
}
// DeleteMetricCluster deletes passed metric cluster.
func (a *API) DeleteMetricCluster(cfg *MetricCluster) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid metric cluster config [nil]")
}
return a.DeleteMetricClusterByCID(CIDType(&cfg.CID))
}
// DeleteMetricClusterByCID deletes metric cluster with passed cid.
func (a *API) DeleteMetricClusterByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid metric cluster CID [none]")
}
clusterCID := string(*cid)
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
}
_, err = a.Delete(clusterCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchMetricClusters returns metric clusters matching the specified
// search query and/or filter. If nil is passed for both parameters
// all metric clusters will be returned.
func (a *API) SearchMetricClusters(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]MetricCluster, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchMetricClusters("")
}
reqURL := url.URL{
Path: config.MetricClusterPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var clusters []MetricCluster
if err := json.Unmarshal(result, &clusters); err != nil {
return nil, err
}
return &clusters, nil
}
| vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0004097174387425184,
0.00018235220341011882,
0.00016023839998524636,
0.00017227463831659406,
0.000046539509639842436
] |
{
"id": 1,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\t// best effort, ignore\n",
"\t\t}\n",
"\t\thttpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}\n",
"\t\tinput.Response = logical.HTTPResponseToLogicalResponse(httpResp)\n",
"\t\tcore.AuditLogger().AuditResponse(r.Context(), input)\n",
"\t\treturn\n",
"\t})\n",
"}\n",
"\n",
"// wrapGenericHandler wraps the handler with an extra layer of handler where\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr = core.AuditLogger().AuditResponse(r.Context(), input)\n",
"\t\tif err != nil {\n",
"\t\t\trespondError(w, status, err)\n",
"\t\t}\n"
],
"file_path": "http/handler.go",
"type": "replace",
"edit_start_line_idx": 247
} | package protocol
import (
"strings"
"github.com/aws/aws-sdk-go/aws/request"
)
// ValidateEndpointHostHandler is a request handler that will validate the
// request endpoint's hosts is a valid RFC 3986 host.
var ValidateEndpointHostHandler = request.NamedHandler{
Name: "awssdk.protocol.ValidateEndpointHostHandler",
Fn: func(r *request.Request) {
err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
if err != nil {
r.Error = err
}
},
}
// ValidateEndpointHost validates that the host string passed in is a valid RFC
// 3986 host. Returns error if the host is not valid.
func ValidateEndpointHost(opName, host string) error {
paramErrs := request.ErrInvalidParams{Context: opName}
labels := strings.Split(host, ".")
for i, label := range labels {
if i == len(labels)-1 && len(label) == 0 {
// Allow trailing dot for FQDN hosts.
continue
}
if !ValidHostLabel(label) {
paramErrs.Add(request.NewErrParamFormat(
"endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
}
}
if len(host) > 255 {
paramErrs.Add(request.NewErrParamMaxLen(
"endpoint host", 255, host,
))
}
if paramErrs.Len() > 0 {
return paramErrs
}
return nil
}
// ValidHostLabel returns if the label is a valid RFC 3986 host label.
func ValidHostLabel(label string) bool {
if l := len(label); l == 0 || l > 63 {
return false
}
for _, r := range label {
switch {
case r >= '0' && r <= '9':
case r >= 'A' && r <= 'Z':
case r >= 'a' && r <= 'z':
case r == '-':
default:
return false
}
}
return true
}
| vendor/github.com/aws/aws-sdk-go/private/protocol/host.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0016614715568721294,
0.0006195927271619439,
0.00016185831918846816,
0.00027756745112128556,
0.0005529874470084906
] |
{
"id": 2,
"code_window": [
"\tuuid \"github.com/hashicorp/go-uuid\"\n",
"\t\"github.com/hashicorp/vault/audit\"\n",
"\t\"github.com/hashicorp/vault/helper/namespace\"\n",
"\t\"github.com/hashicorp/vault/sdk/helper/jsonutil\"\n",
"\t\"github.com/hashicorp/vault/sdk/helper/salt\"\n",
"\t\"github.com/hashicorp/vault/sdk/logical\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/hashicorp/vault/sdk/helper/consts\"\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 12
} | package http
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net"
"net/http"
"net/textproto"
"net/url"
"os"
"strings"
"time"
"github.com/NYTimes/gziphandler"
assetfs "github.com/elazarl/go-bindata-assetfs"
"github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/helper/pathmanager"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
const (
// WrapTTLHeaderName is the name of the header containing a directive to
// wrap the response
WrapTTLHeaderName = "X-Vault-Wrap-TTL"
// WrapFormatHeaderName is the name of the header containing the format to
// wrap in; has no effect if the wrap TTL is not set
WrapFormatHeaderName = "X-Vault-Wrap-Format"
// NoRequestForwardingHeaderName is the name of the header telling Vault
// not to use request forwarding
NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding"
// MFAHeaderName represents the HTTP header which carries the credentials
// required to perform MFA on any path.
MFAHeaderName = "X-Vault-MFA"
// canonicalMFAHeaderName is the MFA header value's format in the request
// headers. Do not alter the casing of this string.
canonicalMFAHeaderName = "X-Vault-Mfa"
// PolicyOverrideHeaderName is the header set to request overriding
// soft-mandatory Sentinel policies.
PolicyOverrideHeaderName = "X-Vault-Policy-Override"
// DefaultMaxRequestSize is the default maximum accepted request size. This
// is to prevent a denial of service attack where no Content-Length is
// provided and the server is fed ever more data until it exhausts memory.
// Can be overridden per listener.
DefaultMaxRequestSize = 32 * 1024 * 1024
)
var (
// Set to false by stub_asset if the ui build tag isn't enabled
uiBuiltIn = true
// perfStandbyAlwaysForwardPaths is used to check a requested path against
// the always forward list
perfStandbyAlwaysForwardPaths = pathmanager.New()
alwaysRedirectPaths = pathmanager.New()
injectDataIntoTopRoutes = []string{
"/v1/sys/audit",
"/v1/sys/audit/",
"/v1/sys/audit-hash/",
"/v1/sys/auth",
"/v1/sys/auth/",
"/v1/sys/config/cors",
"/v1/sys/config/auditing/request-headers/",
"/v1/sys/config/auditing/request-headers",
"/v1/sys/capabilities",
"/v1/sys/capabilities-accessor",
"/v1/sys/capabilities-self",
"/v1/sys/key-status",
"/v1/sys/mounts",
"/v1/sys/mounts/",
"/v1/sys/policy",
"/v1/sys/policy/",
"/v1/sys/rekey/backup",
"/v1/sys/rekey/recovery-key-backup",
"/v1/sys/remount",
"/v1/sys/rotate",
"/v1/sys/wrapping/wrap",
}
)
func init() {
alwaysRedirectPaths.AddPaths([]string{
"sys/storage/raft/snapshot",
"sys/storage/raft/snapshot-force",
})
}
// Handler returns an http.Handler for the API. This can be used on
// its own to mount the Vault API within another web server.
func Handler(props *vault.HandlerProperties) http.Handler {
core := props.Core
// Create the muxer to handle the actual endpoints
mux := http.NewServeMux()
switch {
case props.RecoveryMode:
raw := vault.NewRawBackend(core)
strategy := vault.GenerateRecoveryTokenStrategy(props.RecoveryToken)
mux.Handle("/v1/sys/raw/", handleLogicalRecovery(raw, props.RecoveryToken))
mux.Handle("/v1/sys/generate-recovery-token/attempt", handleSysGenerateRootAttempt(core, strategy))
mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy))
default:
// Handle non-forwarded paths
mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core))
mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/init", handleSysInit(core))
mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core))
mux.Handle("/v1/sys/seal", handleSysSeal(core))
mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core)))
mux.Handle("/v1/sys/unseal", handleSysUnseal(core))
mux.Handle("/v1/sys/leader", handleSysLeader(core))
mux.Handle("/v1/sys/health", handleSysHealth(core))
mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core))
mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootUpdate(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false)))
mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false)))
mux.Handle("/v1/sys/rekey/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, false)))
mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true)))
mux.Handle("/v1/sys/storage/raft/bootstrap", handleSysRaftBootstrap(core))
mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core))
for _, path := range injectDataIntoTopRoutes {
mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core)))
}
mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core)))
mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core)))
if core.UIEnabled() == true {
if uiBuiltIn {
mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))))
mux.Handle("/robots.txt", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))
} else {
mux.Handle("/ui/", handleUIHeaders(core, handleUIStub()))
}
mux.Handle("/ui", handleUIRedirect())
mux.Handle("/", handleUIRedirect())
}
// Register metrics path without authentication if enabled
if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess {
mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core))
} else {
mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core))
}
additionalRoutes(mux, core)
}
// Wrap the handler in another handler to trigger all help paths.
helpWrappedHandler := wrapHelpHandler(mux, core)
corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core)
quotaWrappedHandler := rateLimitQuotaWrapping(corsWrappedHandler, core)
genericWrappedHandler := genericWrapping(core, quotaWrappedHandler, props)
// Wrap the handler with PrintablePathCheckHandler to check for non-printable
// characters in the request path.
printablePathCheckHandler := genericWrappedHandler
if !props.DisablePrintableCheck {
printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(genericWrappedHandler, nil)
}
return printablePathCheckHandler
}
type copyResponseWriter struct {
wrapped http.ResponseWriter
statusCode int
body *bytes.Buffer
}
// newCopyResponseWriter returns an initialized newCopyResponseWriter
func newCopyResponseWriter(wrapped http.ResponseWriter) *copyResponseWriter {
w := ©ResponseWriter{
wrapped: wrapped,
body: new(bytes.Buffer),
statusCode: 200,
}
return w
}
func (w *copyResponseWriter) Header() http.Header {
return w.wrapped.Header()
}
func (w *copyResponseWriter) Write(buf []byte) (int, error) {
w.body.Write(buf)
return w.wrapped.Write(buf)
}
func (w *copyResponseWriter) WriteHeader(code int) {
w.statusCode = code
w.wrapped.WriteHeader(code)
}
func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
origBody := new(bytes.Buffer)
reader := ioutil.NopCloser(io.TeeReader(r.Body, origBody))
r.Body = reader
req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r)
if err != nil || status != 0 {
respondError(w, status, err)
return
}
if origBody != nil {
r.Body = ioutil.NopCloser(origBody)
}
input := &logical.LogInput{
Request: req,
}
core.AuditLogger().AuditRequest(r.Context(), input)
cw := newCopyResponseWriter(w)
h.ServeHTTP(cw, r)
data := make(map[string]interface{})
err = jsonutil.DecodeJSON(cw.body.Bytes(), &data)
if err != nil {
// best effort, ignore
}
httpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}
input.Response = logical.HTTPResponseToLogicalResponse(httpResp)
core.AuditLogger().AuditResponse(r.Context(), input)
return
})
}
// wrapGenericHandler wraps the handler with an extra layer of handler where
// tasks that should be commonly handled for all the requests and/or responses
// are performed.
func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler {
var maxRequestDuration time.Duration
var maxRequestSize int64
if props.ListenerConfig != nil {
maxRequestDuration = props.ListenerConfig.MaxRequestDuration
maxRequestSize = props.ListenerConfig.MaxRequestSize
}
if maxRequestDuration == 0 {
maxRequestDuration = vault.DefaultMaxRequestDuration
}
if maxRequestSize == 0 {
maxRequestSize = DefaultMaxRequestSize
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Set the Cache-Control header for all the responses returned
// by Vault
w.Header().Set("Cache-Control", "no-store")
// Start with the request context
ctx := r.Context()
var cancelFunc context.CancelFunc
// Add our timeout, but not for the monitor endpoint, as it's streaming
if strings.HasSuffix(r.URL.Path, "sys/monitor") {
ctx, cancelFunc = context.WithCancel(ctx)
} else {
ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration)
}
// Add a size limiter if desired
if maxRequestSize > 0 {
ctx = context.WithValue(ctx, "max_request_size", maxRequestSize)
}
ctx = context.WithValue(ctx, "original_request_path", r.URL.Path)
r = r.WithContext(ctx)
r = r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace))
switch {
case strings.HasPrefix(r.URL.Path, "/v1/"):
newR, status := adjustRequest(core, r)
if status != 0 {
respondError(w, status, nil)
cancelFunc()
return
}
r = newR
case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/":
default:
respondError(w, http.StatusNotFound, nil)
cancelFunc()
return
}
h.ServeHTTP(w, r)
cancelFunc()
return
})
}
func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler {
rejectNotPresent := l.XForwardedForRejectNotPresent
hopSkips := l.XForwardedForHopSkips
authorizedAddrs := l.XForwardedForAuthorizedAddrs
rejectNotAuthz := l.XForwardedForRejectNotAuthorized
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")]
if !headersOK || len(headers) == 0 {
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("missing x-forwarded-for header and configured to reject when not present"))
return
}
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
// If not rejecting treat it like we just don't have a valid
// header because we can't do a comparison against an address we
// can't understand
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client hostport: {{err}}", err))
return
}
addr, err := sockaddr.NewIPAddr(host)
if err != nil {
// We treat this the same as the case above
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client address: {{err}}", err))
return
}
var found bool
for _, authz := range authorizedAddrs {
if authz.Contains(addr) {
found = true
break
}
}
if !found {
// If we didn't find it and aren't configured to reject, simply
// don't trust it
if !rejectNotAuthz {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("client address not authorized for x-forwarded-for and configured to reject connection"))
return
}
// At this point we have at least one value and it's authorized
// Split comma separated ones, which are common. This brings it in line
// to the multiple-header case.
var acc []string
for _, header := range headers {
vals := strings.Split(header, ",")
for _, v := range vals {
acc = append(acc, strings.TrimSpace(v))
}
}
indexToUse := int64(len(acc)) - 1 - hopSkips
if indexToUse < 0 {
// This is likely an error in either configuration or other
// infrastructure. We could either deny the request, or we
// could simply not trust the value. Denying the request is
// "safer" since if this logic is configured at all there may
// be an assumption it can always be trusted. Given that we can
// deny accepting the request at all if it's not from an
// authorized address, if we're at this point the address is
// authorized (or we've turned off explicit rejection) and we
// should assume that what comes in should be properly
// formatted.
respondError(w, http.StatusBadRequest, fmt.Errorf("malformed x-forwarded-for configuration or request, hops to skip (%d) would skip before earliest chain link (chain length %d)", hopSkips, len(headers)))
return
}
r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port)
h.ServeHTTP(w, r)
return
})
}
// A lookup on a token that is about to expire returns nil, which means by the
// time we can validate a wrapping token lookup will return nil since it will
// be revoked after the call. So we have to do the validation here.
func wrappingVerificationFunc(ctx context.Context, core *vault.Core, req *logical.Request) error {
if req == nil {
return fmt.Errorf("invalid request")
}
valid, err := core.ValidateWrappingToken(ctx, req)
if err != nil {
return errwrap.Wrapf("error validating wrapping token: {{err}}", err)
}
if !valid {
return consts.ErrInvalidWrappingToken
}
return nil
}
// stripPrefix is a helper to strip a prefix from the path. It will
// return false from the second return value if it the prefix doesn't exist.
func stripPrefix(prefix, path string) (string, bool) {
if !strings.HasPrefix(path, prefix) {
return "", false
}
path = path[len(prefix):]
if path == "" {
return "", false
}
return path, true
}
func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
header := w.Header()
userHeaders, err := core.UIHeaders()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
if userHeaders != nil {
for k := range userHeaders {
v := userHeaders.Get(k)
header.Set(k, v)
}
}
h.ServeHTTP(w, req)
})
}
func handleUI(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
// The fileserver handler strips trailing slashes and does a redirect.
// We don't want the redirect to happen so we preemptively trim the slash
// here.
req.URL.Path = strings.TrimSuffix(req.URL.Path, "/")
h.ServeHTTP(w, req)
return
})
}
func handleUIStub() http.Handler {
stubHTML := `
<!DOCTYPE html>
<html>
<style>
body {
color: #1F2124;
font-family: system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
}
.wrapper {
display: flex;
justify-content: center;
align-items: center;
height: 500px;
}
.content ul {
line-height: 1.5;
}
a {
color: #1563ff;
text-decoration: none;
}
.header {
display: flex;
color: #6a7786;
align-items: center;
}
.header svg {
padding-right: 12px;
}
.alert {
transform: scale(0.07);
fill: #6a7786;
}
h1 {
font-weight: 500;
}
p {
margin-top: 0px;
}
</style>
<div class="wrapper">
<div class="content">
<div class="header">
<svg width="36px" height="36px" viewBox="0 0 36 36" xmlns="http://www.w3.org/2000/svg">
<path class="alert" d="M476.7 422.2L270.1 72.7c-2.9-5-8.3-8.7-14.1-8.7-5.9 0-11.3 3.7-14.1 8.7L35.3 422.2c-2.8 5-4.8 13-1.9 17.9 2.9 4.9 8.2 7.9 14 7.9h417.1c5.8 0 11.1-3 14-7.9 3-4.9 1-13-1.8-17.9zM288 400h-64v-48h64v48zm0-80h-64V176h64v144z"/>
</svg>
<h1>Vault UI is not available in this binary.</h1>
</div>
<p>To get Vault UI do one of the following:</p>
<ul>
<li><a href="https://www.vaultproject.io/downloads.html">Download an official release</a></li>
<li>Run <code>make bin</code> to create your own release binaries.
<li>Run <code>make dev-ui</code> to create a development binary with the UI.
</ul>
</div>
</div>
</html>
`
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(stubHTML))
})
}
func handleUIRedirect() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, "/ui/", 307)
return
})
}
type UIAssetWrapper struct {
FileSystem *assetfs.AssetFS
}
func (fs *UIAssetWrapper) Open(name string) (http.File, error) {
file, err := fs.FileSystem.Open(name)
if err == nil {
return file, nil
}
// serve index.html instead of 404ing
if err == os.ErrNotExist {
return fs.FileSystem.Open("index.html")
}
return nil, err
}
func parseQuery(values url.Values) map[string]interface{} {
data := map[string]interface{}{}
for k, v := range values {
// Skip the help key as this is a reserved parameter
if k == "help" {
continue
}
switch {
case len(v) == 0:
case len(v) == 1:
data[k] = v[0]
default:
data[k] = v
}
}
if len(data) > 0 {
return data
}
return nil
}
func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, out interface{}) (io.ReadCloser, error) {
// Limit the maximum number of bytes to MaxRequestSize to protect
// against an indefinite amount of data being read.
reader := r.Body
ctx := r.Context()
maxRequestSize := ctx.Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
reader = http.MaxBytesReader(w, r.Body, max)
}
}
var origBody io.ReadWriter
if perfStandby {
// Since we're checking PerfStandby here we key on origBody being nil
// or not later, so we need to always allocate so it's non-nil
origBody = new(bytes.Buffer)
reader = ioutil.NopCloser(io.TeeReader(reader, origBody))
}
err := jsonutil.DecodeJSONFromReader(reader, out)
if err != nil && err != io.EOF {
return nil, errwrap.Wrapf("failed to parse JSON input: {{err}}", err)
}
if origBody != nil {
return ioutil.NopCloser(origBody), err
}
return nil, err
}
// parseFormRequest parses values from a form POST.
//
// A nil map will be returned if the format is empty or invalid.
func parseFormRequest(r *http.Request) (map[string]interface{}, error) {
maxRequestSize := r.Context().Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
r.Body = ioutil.NopCloser(io.LimitReader(r.Body, max))
}
}
if err := r.ParseForm(); err != nil {
return nil, err
}
var data map[string]interface{}
if len(r.PostForm) != 0 {
data = make(map[string]interface{}, len(r.PostForm))
for k, v := range r.PostForm {
switch len(v) {
case 0:
case 1:
data[k] = v[0]
default:
// Almost anywhere taking in a string list can take in comma
// separated values, and really this is super niche anyways
data[k] = strings.Join(v, ",")
}
}
}
return data, nil
}
// handleRequestForwarding determines whether to forward a request or not,
// falling back on the older behavior of redirecting the client
func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If we are a performance standby we can handle the request.
if core.PerfStandby() {
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
switch {
case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path):
handler.ServeHTTP(w, r)
return
case strings.HasPrefix(path, "auth/token/create/"):
isBatch, err := core.IsBatchTokenCreationRequest(r.Context(), path)
if err == nil && isBatch {
handler.ServeHTTP(w, r)
return
}
}
}
// Note: in an HA setup, this call will also ensure that connections to
// the leader are set up, as that happens once the advertised cluster
// values are read during this function
isLeader, leaderAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve request normally
handler.ServeHTTP(w, r)
return
}
// Some internal error occurred
respondError(w, http.StatusInternalServerError, err)
return
}
if isLeader {
// No forwarding needed, we're leader
handler.ServeHTTP(w, r)
return
}
if leaderAddr == "" {
respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found"))
return
}
forwardRequest(core, w, r)
return
})
}
func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) {
if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
respondStandby(core, w, r.URL)
return
}
if r.Header.Get(NoRequestForwardingHeaderName) != "" {
// Forwarding explicitly disabled, fall back to previous behavior
core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request")
respondStandby(core, w, r.URL)
return
}
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
if alwaysRedirectPaths.HasPath(path) {
respondStandby(core, w, r.URL)
return
}
// Attempt forwarding the request. If we cannot forward -- perhaps it's
// been disabled on the active node -- this will return with an
// ErrCannotForward and we simply fall back
statusCode, header, retBytes, err := core.ForwardRequest(r)
if err != nil {
if err == vault.ErrCannotForward {
core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back")
} else {
core.Logger().Error("forward request error", "error", err)
}
// Fall back to redirection
respondStandby(core, w, r.URL)
return
}
if header != nil {
for k, v := range header {
w.Header()[k] = v
}
}
w.WriteHeader(statusCode)
w.Write(retBytes)
}
// request is a helper to perform a request and properly exit in the
// case of an error.
func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) {
resp, err := core.HandleRequest(rawReq.Context(), r)
if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) {
if resp == nil {
resp = &logical.Response{}
}
resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.")
}
if errwrap.Contains(err, consts.ErrStandby.Error()) {
respondStandby(core, w, rawReq.URL)
return resp, false, false
}
if err != nil && errwrap.Contains(err, logical.ErrPerfStandbyPleaseForward.Error()) {
return nil, false, true
}
if resp != nil && len(resp.Headers) > 0 {
// Set this here so it will take effect regardless of any other type of
// response processing
header := w.Header()
for k, v := range resp.Headers {
for _, h := range v {
header.Add(k, h)
}
}
switch {
case resp.Secret != nil,
resp.Auth != nil,
len(resp.Data) > 0,
resp.Redirect != "",
len(resp.Warnings) > 0,
resp.WrapInfo != nil:
// Nothing, resp has data
default:
// We have an otherwise totally empty response except for headers,
// so nil out the response now that the headers are written out
resp = nil
}
}
// If vault's core has already written to the response writer do not add any
// additional output. Headers have already been sent. If the response writer
// is set but has not been written to it likely means there was some kind of
// error
if r.ResponseWriter != nil && r.ResponseWriter.Written() {
return nil, true, false
}
if respondErrorCommon(w, r, resp, err) {
return resp, false, false
}
return resp, true, false
}
// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby
func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
// Request the leader address
_, redirectAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve 503
err = errors.New("node is not active")
respondError(w, http.StatusServiceUnavailable, err)
return
}
respondError(w, http.StatusInternalServerError, err)
return
}
// If there is no leader, generate a 503 error
if redirectAddr == "" {
err = errors.New("no active Vault instance found")
respondError(w, http.StatusServiceUnavailable, err)
return
}
// Parse the redirect location
redirectURL, err := url.Parse(redirectAddr)
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
// Generate a redirect URL
finalURL := url.URL{
Scheme: redirectURL.Scheme,
Host: redirectURL.Host,
Path: reqURL.Path,
RawQuery: reqURL.RawQuery,
}
// Ensure there is a scheme, default to https
if finalURL.Scheme == "" {
finalURL.Scheme = "https"
}
// If we have an address, redirect! We use a 307 code
// because we don't actually know if its permanent and
// the request method should be preserved.
w.Header().Set("Location", finalURL.String())
w.WriteHeader(307)
}
// getTokenFromReq parse headers of the incoming request to extract token if
// present it accepts Authorization Bearer (RFC6750) and X-Vault-Token header.
// Returns true if the token was sourced from a Bearer header.
func getTokenFromReq(r *http.Request) (string, bool) {
if token := r.Header.Get(consts.AuthHeaderName); token != "" {
return token, false
}
if headers, ok := r.Header["Authorization"]; ok {
// Reference for Authorization header format: https://tools.ietf.org/html/rfc7236#section-3
// If string does not start by 'Bearer ', it is not one we would use,
// but might be used by plugins
for _, v := range headers {
if !strings.HasPrefix(v, "Bearer ") {
continue
}
return strings.TrimSpace(v[7:]), true
}
}
return "", false
}
// requestAuth adds the token to the logical.Request if it exists.
func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) (*logical.Request, error) {
// Attach the header value if we have it
token, fromAuthzHeader := getTokenFromReq(r)
if token != "" {
req.ClientToken = token
req.ClientTokenSource = logical.ClientTokenFromVaultHeader
if fromAuthzHeader {
req.ClientTokenSource = logical.ClientTokenFromAuthzHeader
}
// Also attach the accessor if we have it. This doesn't fail if it
// doesn't exist because the request may be to an unauthenticated
// endpoint/login endpoint where a bad current token doesn't matter, or
// a token from a Vault version pre-accessors. We ignore errors for
// JWTs.
te, err := core.LookupToken(r.Context(), token)
if err != nil {
dotCount := strings.Count(token, ".")
// If we have two dots but the second char is a dot it's a vault
// token of the form s.SOMETHING.nsid, not a JWT
if dotCount != 2 ||
dotCount == 2 && token[1] == '.' {
return req, err
}
}
if err == nil && te != nil {
req.ClientTokenAccessor = te.Accessor
req.ClientTokenRemainingUses = te.NumUses
req.SetTokenEntry(te)
}
}
return req, nil
}
func requestPolicyOverride(r *http.Request, req *logical.Request) error {
raw := r.Header.Get(PolicyOverrideHeaderName)
if raw == "" {
return nil
}
override, err := parseutil.ParseBool(raw)
if err != nil {
return err
}
req.PolicyOverride = override
return nil
}
// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists
func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) {
// First try for the header value
wrapTTL := r.Header.Get(WrapTTLHeaderName)
if wrapTTL == "" {
return req, nil
}
// If it has an allowed suffix parse as a duration string
dur, err := parseutil.ParseDurationSecond(wrapTTL)
if err != nil {
return req, err
}
if int64(dur) < 0 {
return req, fmt.Errorf("requested wrap ttl cannot be negative")
}
req.WrapInfo = &logical.RequestWrapInfo{
TTL: dur,
}
wrapFormat := r.Header.Get(WrapFormatHeaderName)
switch wrapFormat {
case "jwt":
req.WrapInfo.Format = "jwt"
}
return req, nil
}
// parseMFAHeader parses the MFAHeaderName in the request headers and organizes
// them with MFA method name as the index.
func parseMFAHeader(req *logical.Request) error {
if req == nil {
return fmt.Errorf("request is nil")
}
if req.Headers == nil {
return nil
}
// Reset and initialize the credentials in the request
req.MFACreds = make(map[string][]string)
for _, mfaHeaderValue := range req.Headers[canonicalMFAHeaderName] {
// Skip the header with no value in it
if mfaHeaderValue == "" {
continue
}
// Handle the case where only method name is mentioned and no value
// is supplied
if !strings.Contains(mfaHeaderValue, ":") {
// Mark the presence of method name, but set an empty set to it
// indicating that there were no values supplied for the method
if req.MFACreds[mfaHeaderValue] == nil {
req.MFACreds[mfaHeaderValue] = []string{}
}
continue
}
shardSplits := strings.SplitN(mfaHeaderValue, ":", 2)
if shardSplits[0] == "" {
return fmt.Errorf("invalid data in header %q; missing method name", MFAHeaderName)
}
if shardSplits[1] == "" {
return fmt.Errorf("invalid data in header %q; missing method value", MFAHeaderName)
}
req.MFACreds[shardSplits[0]] = append(req.MFACreds[shardSplits[0]], shardSplits[1])
}
return nil
}
// isForm tries to determine whether the request should be
// processed as a form or as JSON.
//
// Virtually all existing use cases have assumed processing as JSON,
// and there has not been a Content-Type requirement in the API. In order to
// maintain backwards compatibility, this will err on the side of JSON.
// The request will be considered a form only if:
//
// 1. The content type is "application/x-www-form-urlencoded"
// 2. The start of the request doesn't look like JSON. For this test we
// we expect the body to begin with { or [, ignoring leading whitespace.
func isForm(head []byte, contentType string) bool {
contentType, _, err := mime.ParseMediaType(contentType)
if err != nil || contentType != "application/x-www-form-urlencoded" {
return false
}
// Look for the start of JSON or not-JSON, skipping any insignificant
// whitespace (per https://tools.ietf.org/html/rfc7159#section-2).
for _, c := range head {
switch c {
case ' ', '\t', '\n', '\r':
continue
case '[', '{': // JSON
return false
default: // not JSON
return true
}
}
return true
}
func respondError(w http.ResponseWriter, status int, err error) {
logical.RespondError(w, status, err)
}
func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool {
statusCode, newErr := logical.RespondErrorCommon(req, resp, err)
if newErr == nil && statusCode == 0 {
return false
}
respondError(w, statusCode, newErr)
return true
}
func respondOk(w http.ResponseWriter, body interface{}) {
w.Header().Set("Content-Type", "application/json")
if body == nil {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
enc.Encode(body)
}
}
| http/handler.go | 1 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.02734464965760708,
0.0008646283531561494,
0.00016153040633071214,
0.0001711643417365849,
0.0028744966257363558
] |
{
"id": 2,
"code_window": [
"\tuuid \"github.com/hashicorp/go-uuid\"\n",
"\t\"github.com/hashicorp/vault/audit\"\n",
"\t\"github.com/hashicorp/vault/helper/namespace\"\n",
"\t\"github.com/hashicorp/vault/sdk/helper/jsonutil\"\n",
"\t\"github.com/hashicorp/vault/sdk/helper/salt\"\n",
"\t\"github.com/hashicorp/vault/sdk/logical\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/hashicorp/vault/sdk/helper/consts\"\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 12
} | package command
import (
"fmt"
"strings"
"github.com/mitchellh/cli"
"github.com/posener/complete"
)
var _ cli.Command = (*KVListCommand)(nil)
var _ cli.CommandAutocomplete = (*KVListCommand)(nil)
type KVListCommand struct {
*BaseCommand
}
func (c *KVListCommand) Synopsis() string {
return "List data or secrets"
}
func (c *KVListCommand) Help() string {
helpText := `
Usage: vault kv list [options] PATH
Lists data from Vault's key-value store at the given path.
List values under the "my-app" folder of the key-value store:
$ vault kv list secret/my-app/
Additional flags and more advanced use cases are detailed below.
` + c.Flags().Help()
return strings.TrimSpace(helpText)
}
func (c *KVListCommand) Flags() *FlagSets {
return c.flagSet(FlagSetHTTP | FlagSetOutputFormat)
}
func (c *KVListCommand) AutocompleteArgs() complete.Predictor {
return c.PredictVaultFolders()
}
func (c *KVListCommand) AutocompleteFlags() complete.Flags {
return c.Flags().Completions()
}
func (c *KVListCommand) Run(args []string) int {
f := c.Flags()
if err := f.Parse(args); err != nil {
c.UI.Error(err.Error())
return 1
}
args = f.Args()
switch {
case len(args) < 1:
c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args)))
return 1
case len(args) > 1:
c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args)))
return 1
}
client, err := c.Client()
if err != nil {
c.UI.Error(err.Error())
return 2
}
path := ensureTrailingSlash(sanitizePath(args[0]))
mountPath, v2, err := isKVv2(path, client)
if err != nil {
c.UI.Error(err.Error())
return 2
}
if v2 {
path = addPrefixToVKVPath(path, mountPath, "metadata")
if err != nil {
c.UI.Error(err.Error())
return 2
}
}
secret, err := client.Logical().List(path)
if err != nil {
c.UI.Error(fmt.Sprintf("Error listing %s: %s", path, err))
return 2
}
_, ok := extractListData(secret)
if Format(c.UI) != "table" {
if secret == nil || secret.Data == nil || !ok {
OutputData(c.UI, map[string]interface{}{})
return 2
}
}
if secret == nil || secret.Data == nil {
c.UI.Error(fmt.Sprintf("No value found at %s", path))
return 2
}
// If the secret is wrapped, return the wrapped response.
if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 {
return OutputSecret(c.UI, secret)
}
if !ok {
c.UI.Error(fmt.Sprintf("No entries found at %s", path))
return 2
}
return OutputList(c.UI, secret)
}
| command/kv_list.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0002293601428391412,
0.0001790945971151814,
0.00016617737128399312,
0.00017117364041041583,
0.000017088947060983628
] |
{
"id": 2,
"code_window": [
"\tuuid \"github.com/hashicorp/go-uuid\"\n",
"\t\"github.com/hashicorp/vault/audit\"\n",
"\t\"github.com/hashicorp/vault/helper/namespace\"\n",
"\t\"github.com/hashicorp/vault/sdk/helper/jsonutil\"\n",
"\t\"github.com/hashicorp/vault/sdk/helper/salt\"\n",
"\t\"github.com/hashicorp/vault/sdk/logical\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/hashicorp/vault/sdk/helper/consts\"\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 12
} | // Package endpoints provides the types and functionality for defining regions
// and endpoints, as well as querying those definitions.
//
// The SDK's Regions and Endpoints metadata is code generated into the endpoints
// package, and is accessible via the DefaultResolver function. This function
// returns a endpoint Resolver will search the metadata and build an associated
// endpoint if one is found. The default resolver will search all partitions
// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
// AWS GovCloud (US) (aws-us-gov).
// .
//
// Enumerating Regions and Endpoint Metadata
//
// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
// will allow you to get access to the list of underlying Partitions with the
// Partitions method. This is helpful if you want to limit the SDK's endpoint
// resolving to a single partition, or enumerate regions, services, and endpoints
// in the partition.
//
// resolver := endpoints.DefaultResolver()
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
//
// for _, p := range partitions {
// fmt.Println("Regions for", p.ID())
// for id, _ := range p.Regions() {
// fmt.Println("*", id)
// }
//
// fmt.Println("Services for", p.ID())
// for id, _ := range p.Services() {
// fmt.Println("*", id)
// }
// }
//
// Using Custom Endpoints
//
// The endpoints package also gives you the ability to use your own logic how
// endpoints are resolved. This is a great way to define a custom endpoint
// for select services, without passing that logic down through your code.
//
// If a type implements the Resolver interface it can be used to resolve
// endpoints. To use this with the SDK's Session and Config set the value
// of the type to the EndpointsResolver field of aws.Config when initializing
// the session, or service client.
//
// In addition the ResolverFunc is a wrapper for a func matching the signature
// of Resolver.EndpointFor, converting it to a type that satisfies the
// Resolver interface.
//
//
// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
// if service == endpoints.S3ServiceID {
// return endpoints.ResolvedEndpoint{
// URL: "s3.custom.endpoint.com",
// SigningRegion: "custom-signing-region",
// }, nil
// }
//
// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
// }
//
// sess := session.Must(session.NewSession(&aws.Config{
// Region: aws.String("us-west-2"),
// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
// }))
package endpoints
| vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.00017259657033719122,
0.00016741738363634795,
0.00016337541455868632,
0.00016514731396455318,
0.00000402325849790941
] |
{
"id": 2,
"code_window": [
"\tuuid \"github.com/hashicorp/go-uuid\"\n",
"\t\"github.com/hashicorp/vault/audit\"\n",
"\t\"github.com/hashicorp/vault/helper/namespace\"\n",
"\t\"github.com/hashicorp/vault/sdk/helper/jsonutil\"\n",
"\t\"github.com/hashicorp/vault/sdk/helper/salt\"\n",
"\t\"github.com/hashicorp/vault/sdk/logical\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/hashicorp/vault/sdk/helper/consts\"\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 12
} | package api
import (
"errors"
"math/rand"
"sync"
"time"
)
var (
ErrLifetimeWatcherMissingInput = errors.New("missing input")
ErrLifetimeWatcherMissingSecret = errors.New("missing secret")
ErrLifetimeWatcherNotRenewable = errors.New("secret is not renewable")
ErrLifetimeWatcherNoSecretData = errors.New("returned empty secret data")
// Deprecated; kept for compatibility
ErrRenewerMissingInput = errors.New("missing input to renewer")
ErrRenewerMissingSecret = errors.New("missing secret to renew")
ErrRenewerNotRenewable = errors.New("secret is not renewable")
ErrRenewerNoSecretData = errors.New("returned empty secret data")
// DefaultLifetimeWatcherRenewBuffer is the default size of the buffer for renew
// messages on the channel.
DefaultLifetimeWatcherRenewBuffer = 5
// Deprecated: kept for backwards compatibility
DefaultRenewerRenewBuffer = 5
)
type RenewBehavior uint
const (
// RenewBehaviorIgnoreErrors means we will attempt to keep renewing until
// we hit the lifetime threshold. It also ignores errors stemming from
// passing a non-renewable lease in. In practice, this means you simply
// reauthenticate/refetch credentials when the watcher exits. This is the
// default.
RenewBehaviorIgnoreErrors RenewBehavior = iota
// RenewBehaviorRenewDisabled turns off renewal attempts entirely. This
// allows you to simply watch lifetime and have the watcher return at a
// reasonable threshold without actually making Vault calls.
RenewBehaviorRenewDisabled
// RenewBehaviorErrorOnErrors is the "legacy" behavior which always exits
// on some kind of error
RenewBehaviorErrorOnErrors
)
// LifetimeWatcher is a process for watching lifetime of a secret.
//
// watcher, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{
// Secret: mySecret,
// })
// go watcher.Start()
// defer watcher.Stop()
//
// for {
// select {
// case err := <-watcher.DoneCh():
// if err != nil {
// log.Fatal(err)
// }
//
// // Renewal is now over
// case renewal := <-watcher.RenewCh():
// log.Printf("Successfully renewed: %#v", renewal)
// }
// }
//
//
// `DoneCh` will return if renewal fails, or if the remaining lease duration is
// under a built-in threshold and either renewing is not extending it or
// renewing is disabled. In both cases, the caller should attempt a re-read of
// the secret. Clients should check the return value of the channel to see if
// renewal was successful.
type LifetimeWatcher struct {
l sync.Mutex
client *Client
secret *Secret
grace time.Duration
random *rand.Rand
increment int
doneCh chan error
renewCh chan *RenewOutput
renewBehavior RenewBehavior
stopped bool
stopCh chan struct{}
errLifetimeWatcherNotRenewable error
errLifetimeWatcherNoSecretData error
}
// LifetimeWatcherInput is used as input to the renew function.
type LifetimeWatcherInput struct {
// Secret is the secret to renew
Secret *Secret
// DEPRECATED: this does not do anything.
Grace time.Duration
// Rand is the randomizer to use for underlying randomization. If not
// provided, one will be generated and seeded automatically. If provided, it
// is assumed to have already been seeded.
Rand *rand.Rand
// RenewBuffer is the size of the buffered channel where renew messages are
// dispatched.
RenewBuffer int
// The new TTL, in seconds, that should be set on the lease. The TTL set
// here may or may not be honored by the vault server, based on Vault
// configuration or any associated max TTL values.
Increment int
// RenewBehavior controls what happens when a renewal errors or the
// passed-in secret is not renewable.
RenewBehavior RenewBehavior
}
// RenewOutput is the metadata returned to the client (if it's listening) to
// renew messages.
type RenewOutput struct {
// RenewedAt is the timestamp when the renewal took place (UTC).
RenewedAt time.Time
// Secret is the underlying renewal data. It's the same struct as all data
// that is returned from Vault, but since this is renewal data, it will not
// usually include the secret itself.
Secret *Secret
}
// NewLifetimeWatcher creates a new renewer from the given input.
func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, error) {
if i == nil {
return nil, ErrLifetimeWatcherMissingInput
}
secret := i.Secret
if secret == nil {
return nil, ErrLifetimeWatcherMissingSecret
}
random := i.Rand
if random == nil {
random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
}
renewBuffer := i.RenewBuffer
if renewBuffer == 0 {
renewBuffer = DefaultLifetimeWatcherRenewBuffer
}
return &LifetimeWatcher{
client: c,
secret: secret,
increment: i.Increment,
random: random,
doneCh: make(chan error, 1),
renewCh: make(chan *RenewOutput, renewBuffer),
renewBehavior: i.RenewBehavior,
stopped: false,
stopCh: make(chan struct{}),
errLifetimeWatcherNotRenewable: ErrLifetimeWatcherNotRenewable,
errLifetimeWatcherNoSecretData: ErrLifetimeWatcherNoSecretData,
}, nil
}
// Deprecated: exists only for backwards compatibility. Calls
// NewLifetimeWatcher, and sets compatibility flags.
func (c *Client) NewRenewer(i *LifetimeWatcherInput) (*LifetimeWatcher, error) {
if i == nil {
return nil, ErrRenewerMissingInput
}
secret := i.Secret
if secret == nil {
return nil, ErrRenewerMissingSecret
}
renewer, err := c.NewLifetimeWatcher(i)
if err != nil {
return nil, err
}
renewer.renewBehavior = RenewBehaviorErrorOnErrors
renewer.errLifetimeWatcherNotRenewable = ErrRenewerNotRenewable
renewer.errLifetimeWatcherNoSecretData = ErrRenewerNoSecretData
return renewer, err
}
// DoneCh returns the channel where the renewer will publish when renewal stops.
// If there is an error, this will be an error.
func (r *LifetimeWatcher) DoneCh() <-chan error {
return r.doneCh
}
// RenewCh is a channel that receives a message when a successful renewal takes
// place and includes metadata about the renewal.
func (r *LifetimeWatcher) RenewCh() <-chan *RenewOutput {
return r.renewCh
}
// Stop stops the renewer.
func (r *LifetimeWatcher) Stop() {
r.l.Lock()
defer r.l.Unlock()
if !r.stopped {
close(r.stopCh)
r.stopped = true
}
}
// Start starts a background process for watching the lifetime of this secret.
// If renewal is enabled, when the secret has auth data, this attempts to renew
// the auth (token); When the secret has a lease, this attempts to renew the
// lease.
func (r *LifetimeWatcher) Start() {
r.doneCh <- r.doRenew()
}
// Renew is for comnpatibility with the legacy api.Renewer. Calling Renew
// simply chains to Start.
func (r *LifetimeWatcher) Renew() {
r.Start()
}
// renewAuth is a helper for renewing authentication.
func (r *LifetimeWatcher) doRenew() error {
var nonRenewable bool
var tokenMode bool
var initLeaseDuration int
var credString string
var renewFunc func(string, int) (*Secret, error)
switch {
case r.secret.Auth != nil:
tokenMode = true
nonRenewable = !r.secret.Auth.Renewable
initLeaseDuration = r.secret.Auth.LeaseDuration
credString = r.secret.Auth.ClientToken
renewFunc = r.client.Auth().Token().RenewTokenAsSelf
default:
nonRenewable = !r.secret.Renewable
initLeaseDuration = r.secret.LeaseDuration
credString = r.secret.LeaseID
renewFunc = r.client.Sys().Renew
}
if credString == "" ||
(nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) {
return r.errLifetimeWatcherNotRenewable
}
initialTime := time.Now()
priorDuration := time.Duration(initLeaseDuration) * time.Second
r.calculateGrace(priorDuration)
for {
// Check if we are stopped.
select {
case <-r.stopCh:
return nil
default:
}
var leaseDuration time.Duration
fallbackLeaseDuration := initialTime.Add(priorDuration).Sub(time.Now())
switch {
case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled:
// Can't or won't renew, just keep the same expiration so we exit
// when it's reauthentication time
leaseDuration = fallbackLeaseDuration
default:
// Renew the token
renewal, err := renewFunc(credString, r.increment)
if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) {
if r.renewBehavior == RenewBehaviorErrorOnErrors {
if err != nil {
return err
}
if renewal == nil || (tokenMode && renewal.Auth == nil) {
return r.errLifetimeWatcherNoSecretData
}
}
leaseDuration = fallbackLeaseDuration
break
}
// Push a message that a renewal took place.
select {
case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}:
default:
}
// Possibly error if we are not renewable
if ((tokenMode && !renewal.Auth.Renewable) || (!tokenMode && !renewal.Renewable)) &&
r.renewBehavior == RenewBehaviorErrorOnErrors {
return r.errLifetimeWatcherNotRenewable
}
// Grab the lease duration
newDuration := renewal.LeaseDuration
if tokenMode {
newDuration = renewal.Auth.LeaseDuration
}
leaseDuration = time.Duration(newDuration) * time.Second
}
// We keep evaluating a new grace period so long as the lease is
// extending. Once it stops extending, we've hit the max and need to
// rely on the grace duration.
if leaseDuration > priorDuration {
r.calculateGrace(leaseDuration)
}
priorDuration = leaseDuration
// The sleep duration is set to 2/3 of the current lease duration plus
// 1/3 of the current grace period, which adds jitter.
sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3)
// If we are within grace, return now; or, if the amount of time we
// would sleep would land us in the grace period. This helps with short
// tokens; for example, you don't want a current lease duration of 4
// seconds, a grace period of 3 seconds, and end up sleeping for more
// than three of those seconds and having a very small budget of time
// to renew.
if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace {
return nil
}
select {
case <-r.stopCh:
return nil
case <-time.After(sleepDuration):
continue
}
}
}
// sleepDuration calculates the time to sleep given the base lease duration. The
// base is the resulting lease duration. It will be reduced to 1/3 and
// multiplied by a random float between 0.0 and 1.0. This extra randomness
// prevents multiple clients from all trying to renew simultaneously.
func (r *LifetimeWatcher) sleepDuration(base time.Duration) time.Duration {
sleep := float64(base)
// Renew at 1/3 the remaining lease. This will give us an opportunity to retry
// at least one more time should the first renewal fail.
sleep = sleep / 3.0
// Use a randomness so many clients do not hit Vault simultaneously.
sleep = sleep * (r.random.Float64() + 1) / 2.0
return time.Duration(sleep)
}
// calculateGrace calculates the grace period based on a reasonable set of
// assumptions given the total lease time; it also adds some jitter to not have
// clients be in sync.
func (r *LifetimeWatcher) calculateGrace(leaseDuration time.Duration) {
if leaseDuration == 0 {
r.grace = 0
return
}
leaseNanos := float64(leaseDuration.Nanoseconds())
jitterMax := 0.1 * leaseNanos
// For a given lease duration, we want to allow 80-90% of that to elapse,
// so the remaining amount is the grace period
r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax))
}
type Renewer = LifetimeWatcher
type RenewerInput = LifetimeWatcherInput
| api/lifetime_watcher.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0005619421717710793,
0.000183089665370062,
0.00016296218382194638,
0.0001694487000349909,
0.00006297437357716262
] |
{
"id": 3,
"code_window": [
"\tc *Core\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditRequest(ctx context.Context, input *logical.LogInput) error {\n",
"\treturn b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif b.c.auditBroker == nil {\n",
"\t\treturn consts.ErrSealed\n",
"\t}\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 518
} | package http
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net"
"net/http"
"net/textproto"
"net/url"
"os"
"strings"
"time"
"github.com/NYTimes/gziphandler"
assetfs "github.com/elazarl/go-bindata-assetfs"
"github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/helper/pathmanager"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
const (
// WrapTTLHeaderName is the name of the header containing a directive to
// wrap the response
WrapTTLHeaderName = "X-Vault-Wrap-TTL"
// WrapFormatHeaderName is the name of the header containing the format to
// wrap in; has no effect if the wrap TTL is not set
WrapFormatHeaderName = "X-Vault-Wrap-Format"
// NoRequestForwardingHeaderName is the name of the header telling Vault
// not to use request forwarding
NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding"
// MFAHeaderName represents the HTTP header which carries the credentials
// required to perform MFA on any path.
MFAHeaderName = "X-Vault-MFA"
// canonicalMFAHeaderName is the MFA header value's format in the request
// headers. Do not alter the casing of this string.
canonicalMFAHeaderName = "X-Vault-Mfa"
// PolicyOverrideHeaderName is the header set to request overriding
// soft-mandatory Sentinel policies.
PolicyOverrideHeaderName = "X-Vault-Policy-Override"
// DefaultMaxRequestSize is the default maximum accepted request size. This
// is to prevent a denial of service attack where no Content-Length is
// provided and the server is fed ever more data until it exhausts memory.
// Can be overridden per listener.
DefaultMaxRequestSize = 32 * 1024 * 1024
)
var (
// Set to false by stub_asset if the ui build tag isn't enabled
uiBuiltIn = true
// perfStandbyAlwaysForwardPaths is used to check a requested path against
// the always forward list
perfStandbyAlwaysForwardPaths = pathmanager.New()
alwaysRedirectPaths = pathmanager.New()
injectDataIntoTopRoutes = []string{
"/v1/sys/audit",
"/v1/sys/audit/",
"/v1/sys/audit-hash/",
"/v1/sys/auth",
"/v1/sys/auth/",
"/v1/sys/config/cors",
"/v1/sys/config/auditing/request-headers/",
"/v1/sys/config/auditing/request-headers",
"/v1/sys/capabilities",
"/v1/sys/capabilities-accessor",
"/v1/sys/capabilities-self",
"/v1/sys/key-status",
"/v1/sys/mounts",
"/v1/sys/mounts/",
"/v1/sys/policy",
"/v1/sys/policy/",
"/v1/sys/rekey/backup",
"/v1/sys/rekey/recovery-key-backup",
"/v1/sys/remount",
"/v1/sys/rotate",
"/v1/sys/wrapping/wrap",
}
)
func init() {
alwaysRedirectPaths.AddPaths([]string{
"sys/storage/raft/snapshot",
"sys/storage/raft/snapshot-force",
})
}
// Handler returns an http.Handler for the API. This can be used on
// its own to mount the Vault API within another web server.
func Handler(props *vault.HandlerProperties) http.Handler {
core := props.Core
// Create the muxer to handle the actual endpoints
mux := http.NewServeMux()
switch {
case props.RecoveryMode:
raw := vault.NewRawBackend(core)
strategy := vault.GenerateRecoveryTokenStrategy(props.RecoveryToken)
mux.Handle("/v1/sys/raw/", handleLogicalRecovery(raw, props.RecoveryToken))
mux.Handle("/v1/sys/generate-recovery-token/attempt", handleSysGenerateRootAttempt(core, strategy))
mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy))
default:
// Handle non-forwarded paths
mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core))
mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/init", handleSysInit(core))
mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core))
mux.Handle("/v1/sys/seal", handleSysSeal(core))
mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core)))
mux.Handle("/v1/sys/unseal", handleSysUnseal(core))
mux.Handle("/v1/sys/leader", handleSysLeader(core))
mux.Handle("/v1/sys/health", handleSysHealth(core))
mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core))
mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootUpdate(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false)))
mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false)))
mux.Handle("/v1/sys/rekey/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, false)))
mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true)))
mux.Handle("/v1/sys/storage/raft/bootstrap", handleSysRaftBootstrap(core))
mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core))
for _, path := range injectDataIntoTopRoutes {
mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core)))
}
mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core)))
mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core)))
if core.UIEnabled() == true {
if uiBuiltIn {
mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))))
mux.Handle("/robots.txt", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))
} else {
mux.Handle("/ui/", handleUIHeaders(core, handleUIStub()))
}
mux.Handle("/ui", handleUIRedirect())
mux.Handle("/", handleUIRedirect())
}
// Register metrics path without authentication if enabled
if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess {
mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core))
} else {
mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core))
}
additionalRoutes(mux, core)
}
// Wrap the handler in another handler to trigger all help paths.
helpWrappedHandler := wrapHelpHandler(mux, core)
corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core)
quotaWrappedHandler := rateLimitQuotaWrapping(corsWrappedHandler, core)
genericWrappedHandler := genericWrapping(core, quotaWrappedHandler, props)
// Wrap the handler with PrintablePathCheckHandler to check for non-printable
// characters in the request path.
printablePathCheckHandler := genericWrappedHandler
if !props.DisablePrintableCheck {
printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(genericWrappedHandler, nil)
}
return printablePathCheckHandler
}
type copyResponseWriter struct {
wrapped http.ResponseWriter
statusCode int
body *bytes.Buffer
}
// newCopyResponseWriter returns an initialized newCopyResponseWriter
func newCopyResponseWriter(wrapped http.ResponseWriter) *copyResponseWriter {
w := ©ResponseWriter{
wrapped: wrapped,
body: new(bytes.Buffer),
statusCode: 200,
}
return w
}
func (w *copyResponseWriter) Header() http.Header {
return w.wrapped.Header()
}
func (w *copyResponseWriter) Write(buf []byte) (int, error) {
w.body.Write(buf)
return w.wrapped.Write(buf)
}
func (w *copyResponseWriter) WriteHeader(code int) {
w.statusCode = code
w.wrapped.WriteHeader(code)
}
func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
origBody := new(bytes.Buffer)
reader := ioutil.NopCloser(io.TeeReader(r.Body, origBody))
r.Body = reader
req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r)
if err != nil || status != 0 {
respondError(w, status, err)
return
}
if origBody != nil {
r.Body = ioutil.NopCloser(origBody)
}
input := &logical.LogInput{
Request: req,
}
core.AuditLogger().AuditRequest(r.Context(), input)
cw := newCopyResponseWriter(w)
h.ServeHTTP(cw, r)
data := make(map[string]interface{})
err = jsonutil.DecodeJSON(cw.body.Bytes(), &data)
if err != nil {
// best effort, ignore
}
httpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}
input.Response = logical.HTTPResponseToLogicalResponse(httpResp)
core.AuditLogger().AuditResponse(r.Context(), input)
return
})
}
// wrapGenericHandler wraps the handler with an extra layer of handler where
// tasks that should be commonly handled for all the requests and/or responses
// are performed.
func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler {
var maxRequestDuration time.Duration
var maxRequestSize int64
if props.ListenerConfig != nil {
maxRequestDuration = props.ListenerConfig.MaxRequestDuration
maxRequestSize = props.ListenerConfig.MaxRequestSize
}
if maxRequestDuration == 0 {
maxRequestDuration = vault.DefaultMaxRequestDuration
}
if maxRequestSize == 0 {
maxRequestSize = DefaultMaxRequestSize
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Set the Cache-Control header for all the responses returned
// by Vault
w.Header().Set("Cache-Control", "no-store")
// Start with the request context
ctx := r.Context()
var cancelFunc context.CancelFunc
// Add our timeout, but not for the monitor endpoint, as it's streaming
if strings.HasSuffix(r.URL.Path, "sys/monitor") {
ctx, cancelFunc = context.WithCancel(ctx)
} else {
ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration)
}
// Add a size limiter if desired
if maxRequestSize > 0 {
ctx = context.WithValue(ctx, "max_request_size", maxRequestSize)
}
ctx = context.WithValue(ctx, "original_request_path", r.URL.Path)
r = r.WithContext(ctx)
r = r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace))
switch {
case strings.HasPrefix(r.URL.Path, "/v1/"):
newR, status := adjustRequest(core, r)
if status != 0 {
respondError(w, status, nil)
cancelFunc()
return
}
r = newR
case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/":
default:
respondError(w, http.StatusNotFound, nil)
cancelFunc()
return
}
h.ServeHTTP(w, r)
cancelFunc()
return
})
}
func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler {
rejectNotPresent := l.XForwardedForRejectNotPresent
hopSkips := l.XForwardedForHopSkips
authorizedAddrs := l.XForwardedForAuthorizedAddrs
rejectNotAuthz := l.XForwardedForRejectNotAuthorized
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")]
if !headersOK || len(headers) == 0 {
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("missing x-forwarded-for header and configured to reject when not present"))
return
}
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
// If not rejecting treat it like we just don't have a valid
// header because we can't do a comparison against an address we
// can't understand
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client hostport: {{err}}", err))
return
}
addr, err := sockaddr.NewIPAddr(host)
if err != nil {
// We treat this the same as the case above
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client address: {{err}}", err))
return
}
var found bool
for _, authz := range authorizedAddrs {
if authz.Contains(addr) {
found = true
break
}
}
if !found {
// If we didn't find it and aren't configured to reject, simply
// don't trust it
if !rejectNotAuthz {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("client address not authorized for x-forwarded-for and configured to reject connection"))
return
}
// At this point we have at least one value and it's authorized
// Split comma separated ones, which are common. This brings it in line
// to the multiple-header case.
var acc []string
for _, header := range headers {
vals := strings.Split(header, ",")
for _, v := range vals {
acc = append(acc, strings.TrimSpace(v))
}
}
indexToUse := int64(len(acc)) - 1 - hopSkips
if indexToUse < 0 {
// This is likely an error in either configuration or other
// infrastructure. We could either deny the request, or we
// could simply not trust the value. Denying the request is
// "safer" since if this logic is configured at all there may
// be an assumption it can always be trusted. Given that we can
// deny accepting the request at all if it's not from an
// authorized address, if we're at this point the address is
// authorized (or we've turned off explicit rejection) and we
// should assume that what comes in should be properly
// formatted.
respondError(w, http.StatusBadRequest, fmt.Errorf("malformed x-forwarded-for configuration or request, hops to skip (%d) would skip before earliest chain link (chain length %d)", hopSkips, len(headers)))
return
}
r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port)
h.ServeHTTP(w, r)
return
})
}
// A lookup on a token that is about to expire returns nil, which means by the
// time we can validate a wrapping token lookup will return nil since it will
// be revoked after the call. So we have to do the validation here.
func wrappingVerificationFunc(ctx context.Context, core *vault.Core, req *logical.Request) error {
if req == nil {
return fmt.Errorf("invalid request")
}
valid, err := core.ValidateWrappingToken(ctx, req)
if err != nil {
return errwrap.Wrapf("error validating wrapping token: {{err}}", err)
}
if !valid {
return consts.ErrInvalidWrappingToken
}
return nil
}
// stripPrefix is a helper to strip a prefix from the path. It will
// return false from the second return value if it the prefix doesn't exist.
func stripPrefix(prefix, path string) (string, bool) {
if !strings.HasPrefix(path, prefix) {
return "", false
}
path = path[len(prefix):]
if path == "" {
return "", false
}
return path, true
}
func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
header := w.Header()
userHeaders, err := core.UIHeaders()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
if userHeaders != nil {
for k := range userHeaders {
v := userHeaders.Get(k)
header.Set(k, v)
}
}
h.ServeHTTP(w, req)
})
}
func handleUI(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
// The fileserver handler strips trailing slashes and does a redirect.
// We don't want the redirect to happen so we preemptively trim the slash
// here.
req.URL.Path = strings.TrimSuffix(req.URL.Path, "/")
h.ServeHTTP(w, req)
return
})
}
func handleUIStub() http.Handler {
stubHTML := `
<!DOCTYPE html>
<html>
<style>
body {
color: #1F2124;
font-family: system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
}
.wrapper {
display: flex;
justify-content: center;
align-items: center;
height: 500px;
}
.content ul {
line-height: 1.5;
}
a {
color: #1563ff;
text-decoration: none;
}
.header {
display: flex;
color: #6a7786;
align-items: center;
}
.header svg {
padding-right: 12px;
}
.alert {
transform: scale(0.07);
fill: #6a7786;
}
h1 {
font-weight: 500;
}
p {
margin-top: 0px;
}
</style>
<div class="wrapper">
<div class="content">
<div class="header">
<svg width="36px" height="36px" viewBox="0 0 36 36" xmlns="http://www.w3.org/2000/svg">
<path class="alert" d="M476.7 422.2L270.1 72.7c-2.9-5-8.3-8.7-14.1-8.7-5.9 0-11.3 3.7-14.1 8.7L35.3 422.2c-2.8 5-4.8 13-1.9 17.9 2.9 4.9 8.2 7.9 14 7.9h417.1c5.8 0 11.1-3 14-7.9 3-4.9 1-13-1.8-17.9zM288 400h-64v-48h64v48zm0-80h-64V176h64v144z"/>
</svg>
<h1>Vault UI is not available in this binary.</h1>
</div>
<p>To get Vault UI do one of the following:</p>
<ul>
<li><a href="https://www.vaultproject.io/downloads.html">Download an official release</a></li>
<li>Run <code>make bin</code> to create your own release binaries.
<li>Run <code>make dev-ui</code> to create a development binary with the UI.
</ul>
</div>
</div>
</html>
`
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(stubHTML))
})
}
func handleUIRedirect() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, "/ui/", 307)
return
})
}
type UIAssetWrapper struct {
FileSystem *assetfs.AssetFS
}
func (fs *UIAssetWrapper) Open(name string) (http.File, error) {
file, err := fs.FileSystem.Open(name)
if err == nil {
return file, nil
}
// serve index.html instead of 404ing
if err == os.ErrNotExist {
return fs.FileSystem.Open("index.html")
}
return nil, err
}
func parseQuery(values url.Values) map[string]interface{} {
data := map[string]interface{}{}
for k, v := range values {
// Skip the help key as this is a reserved parameter
if k == "help" {
continue
}
switch {
case len(v) == 0:
case len(v) == 1:
data[k] = v[0]
default:
data[k] = v
}
}
if len(data) > 0 {
return data
}
return nil
}
func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, out interface{}) (io.ReadCloser, error) {
// Limit the maximum number of bytes to MaxRequestSize to protect
// against an indefinite amount of data being read.
reader := r.Body
ctx := r.Context()
maxRequestSize := ctx.Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
reader = http.MaxBytesReader(w, r.Body, max)
}
}
var origBody io.ReadWriter
if perfStandby {
// Since we're checking PerfStandby here we key on origBody being nil
// or not later, so we need to always allocate so it's non-nil
origBody = new(bytes.Buffer)
reader = ioutil.NopCloser(io.TeeReader(reader, origBody))
}
err := jsonutil.DecodeJSONFromReader(reader, out)
if err != nil && err != io.EOF {
return nil, errwrap.Wrapf("failed to parse JSON input: {{err}}", err)
}
if origBody != nil {
return ioutil.NopCloser(origBody), err
}
return nil, err
}
// parseFormRequest parses values from a form POST.
//
// A nil map will be returned if the format is empty or invalid.
func parseFormRequest(r *http.Request) (map[string]interface{}, error) {
maxRequestSize := r.Context().Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
r.Body = ioutil.NopCloser(io.LimitReader(r.Body, max))
}
}
if err := r.ParseForm(); err != nil {
return nil, err
}
var data map[string]interface{}
if len(r.PostForm) != 0 {
data = make(map[string]interface{}, len(r.PostForm))
for k, v := range r.PostForm {
switch len(v) {
case 0:
case 1:
data[k] = v[0]
default:
// Almost anywhere taking in a string list can take in comma
// separated values, and really this is super niche anyways
data[k] = strings.Join(v, ",")
}
}
}
return data, nil
}
// handleRequestForwarding determines whether to forward a request or not,
// falling back on the older behavior of redirecting the client
func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If we are a performance standby we can handle the request.
if core.PerfStandby() {
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
switch {
case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path):
handler.ServeHTTP(w, r)
return
case strings.HasPrefix(path, "auth/token/create/"):
isBatch, err := core.IsBatchTokenCreationRequest(r.Context(), path)
if err == nil && isBatch {
handler.ServeHTTP(w, r)
return
}
}
}
// Note: in an HA setup, this call will also ensure that connections to
// the leader are set up, as that happens once the advertised cluster
// values are read during this function
isLeader, leaderAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve request normally
handler.ServeHTTP(w, r)
return
}
// Some internal error occurred
respondError(w, http.StatusInternalServerError, err)
return
}
if isLeader {
// No forwarding needed, we're leader
handler.ServeHTTP(w, r)
return
}
if leaderAddr == "" {
respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found"))
return
}
forwardRequest(core, w, r)
return
})
}
func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) {
if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
respondStandby(core, w, r.URL)
return
}
if r.Header.Get(NoRequestForwardingHeaderName) != "" {
// Forwarding explicitly disabled, fall back to previous behavior
core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request")
respondStandby(core, w, r.URL)
return
}
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
if alwaysRedirectPaths.HasPath(path) {
respondStandby(core, w, r.URL)
return
}
// Attempt forwarding the request. If we cannot forward -- perhaps it's
// been disabled on the active node -- this will return with an
// ErrCannotForward and we simply fall back
statusCode, header, retBytes, err := core.ForwardRequest(r)
if err != nil {
if err == vault.ErrCannotForward {
core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back")
} else {
core.Logger().Error("forward request error", "error", err)
}
// Fall back to redirection
respondStandby(core, w, r.URL)
return
}
if header != nil {
for k, v := range header {
w.Header()[k] = v
}
}
w.WriteHeader(statusCode)
w.Write(retBytes)
}
// request is a helper to perform a request and properly exit in the
// case of an error.
func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) {
resp, err := core.HandleRequest(rawReq.Context(), r)
if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) {
if resp == nil {
resp = &logical.Response{}
}
resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.")
}
if errwrap.Contains(err, consts.ErrStandby.Error()) {
respondStandby(core, w, rawReq.URL)
return resp, false, false
}
if err != nil && errwrap.Contains(err, logical.ErrPerfStandbyPleaseForward.Error()) {
return nil, false, true
}
if resp != nil && len(resp.Headers) > 0 {
// Set this here so it will take effect regardless of any other type of
// response processing
header := w.Header()
for k, v := range resp.Headers {
for _, h := range v {
header.Add(k, h)
}
}
switch {
case resp.Secret != nil,
resp.Auth != nil,
len(resp.Data) > 0,
resp.Redirect != "",
len(resp.Warnings) > 0,
resp.WrapInfo != nil:
// Nothing, resp has data
default:
// We have an otherwise totally empty response except for headers,
// so nil out the response now that the headers are written out
resp = nil
}
}
// If vault's core has already written to the response writer do not add any
// additional output. Headers have already been sent. If the response writer
// is set but has not been written to it likely means there was some kind of
// error
if r.ResponseWriter != nil && r.ResponseWriter.Written() {
return nil, true, false
}
if respondErrorCommon(w, r, resp, err) {
return resp, false, false
}
return resp, true, false
}
// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby
func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
// Request the leader address
_, redirectAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve 503
err = errors.New("node is not active")
respondError(w, http.StatusServiceUnavailable, err)
return
}
respondError(w, http.StatusInternalServerError, err)
return
}
// If there is no leader, generate a 503 error
if redirectAddr == "" {
err = errors.New("no active Vault instance found")
respondError(w, http.StatusServiceUnavailable, err)
return
}
// Parse the redirect location
redirectURL, err := url.Parse(redirectAddr)
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
// Generate a redirect URL
finalURL := url.URL{
Scheme: redirectURL.Scheme,
Host: redirectURL.Host,
Path: reqURL.Path,
RawQuery: reqURL.RawQuery,
}
// Ensure there is a scheme, default to https
if finalURL.Scheme == "" {
finalURL.Scheme = "https"
}
// If we have an address, redirect! We use a 307 code
// because we don't actually know if its permanent and
// the request method should be preserved.
w.Header().Set("Location", finalURL.String())
w.WriteHeader(307)
}
// getTokenFromReq parse headers of the incoming request to extract token if
// present it accepts Authorization Bearer (RFC6750) and X-Vault-Token header.
// Returns true if the token was sourced from a Bearer header.
func getTokenFromReq(r *http.Request) (string, bool) {
if token := r.Header.Get(consts.AuthHeaderName); token != "" {
return token, false
}
if headers, ok := r.Header["Authorization"]; ok {
// Reference for Authorization header format: https://tools.ietf.org/html/rfc7236#section-3
// If string does not start by 'Bearer ', it is not one we would use,
// but might be used by plugins
for _, v := range headers {
if !strings.HasPrefix(v, "Bearer ") {
continue
}
return strings.TrimSpace(v[7:]), true
}
}
return "", false
}
// requestAuth adds the token to the logical.Request if it exists.
func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) (*logical.Request, error) {
// Attach the header value if we have it
token, fromAuthzHeader := getTokenFromReq(r)
if token != "" {
req.ClientToken = token
req.ClientTokenSource = logical.ClientTokenFromVaultHeader
if fromAuthzHeader {
req.ClientTokenSource = logical.ClientTokenFromAuthzHeader
}
// Also attach the accessor if we have it. This doesn't fail if it
// doesn't exist because the request may be to an unauthenticated
// endpoint/login endpoint where a bad current token doesn't matter, or
// a token from a Vault version pre-accessors. We ignore errors for
// JWTs.
te, err := core.LookupToken(r.Context(), token)
if err != nil {
dotCount := strings.Count(token, ".")
// If we have two dots but the second char is a dot it's a vault
// token of the form s.SOMETHING.nsid, not a JWT
if dotCount != 2 ||
dotCount == 2 && token[1] == '.' {
return req, err
}
}
if err == nil && te != nil {
req.ClientTokenAccessor = te.Accessor
req.ClientTokenRemainingUses = te.NumUses
req.SetTokenEntry(te)
}
}
return req, nil
}
func requestPolicyOverride(r *http.Request, req *logical.Request) error {
raw := r.Header.Get(PolicyOverrideHeaderName)
if raw == "" {
return nil
}
override, err := parseutil.ParseBool(raw)
if err != nil {
return err
}
req.PolicyOverride = override
return nil
}
// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists
func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) {
// First try for the header value
wrapTTL := r.Header.Get(WrapTTLHeaderName)
if wrapTTL == "" {
return req, nil
}
// If it has an allowed suffix parse as a duration string
dur, err := parseutil.ParseDurationSecond(wrapTTL)
if err != nil {
return req, err
}
if int64(dur) < 0 {
return req, fmt.Errorf("requested wrap ttl cannot be negative")
}
req.WrapInfo = &logical.RequestWrapInfo{
TTL: dur,
}
wrapFormat := r.Header.Get(WrapFormatHeaderName)
switch wrapFormat {
case "jwt":
req.WrapInfo.Format = "jwt"
}
return req, nil
}
// parseMFAHeader parses the MFAHeaderName in the request headers and organizes
// them with MFA method name as the index.
func parseMFAHeader(req *logical.Request) error {
if req == nil {
return fmt.Errorf("request is nil")
}
if req.Headers == nil {
return nil
}
// Reset and initialize the credentials in the request
req.MFACreds = make(map[string][]string)
for _, mfaHeaderValue := range req.Headers[canonicalMFAHeaderName] {
// Skip the header with no value in it
if mfaHeaderValue == "" {
continue
}
// Handle the case where only method name is mentioned and no value
// is supplied
if !strings.Contains(mfaHeaderValue, ":") {
// Mark the presence of method name, but set an empty set to it
// indicating that there were no values supplied for the method
if req.MFACreds[mfaHeaderValue] == nil {
req.MFACreds[mfaHeaderValue] = []string{}
}
continue
}
shardSplits := strings.SplitN(mfaHeaderValue, ":", 2)
if shardSplits[0] == "" {
return fmt.Errorf("invalid data in header %q; missing method name", MFAHeaderName)
}
if shardSplits[1] == "" {
return fmt.Errorf("invalid data in header %q; missing method value", MFAHeaderName)
}
req.MFACreds[shardSplits[0]] = append(req.MFACreds[shardSplits[0]], shardSplits[1])
}
return nil
}
// isForm tries to determine whether the request should be
// processed as a form or as JSON.
//
// Virtually all existing use cases have assumed processing as JSON,
// and there has not been a Content-Type requirement in the API. In order to
// maintain backwards compatibility, this will err on the side of JSON.
// The request will be considered a form only if:
//
// 1. The content type is "application/x-www-form-urlencoded"
// 2. The start of the request doesn't look like JSON. For this test we
// we expect the body to begin with { or [, ignoring leading whitespace.
func isForm(head []byte, contentType string) bool {
contentType, _, err := mime.ParseMediaType(contentType)
if err != nil || contentType != "application/x-www-form-urlencoded" {
return false
}
// Look for the start of JSON or not-JSON, skipping any insignificant
// whitespace (per https://tools.ietf.org/html/rfc7159#section-2).
for _, c := range head {
switch c {
case ' ', '\t', '\n', '\r':
continue
case '[', '{': // JSON
return false
default: // not JSON
return true
}
}
return true
}
func respondError(w http.ResponseWriter, status int, err error) {
logical.RespondError(w, status, err)
}
func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool {
statusCode, newErr := logical.RespondErrorCommon(req, resp, err)
if newErr == nil && statusCode == 0 {
return false
}
respondError(w, statusCode, newErr)
return true
}
func respondOk(w http.ResponseWriter, body interface{}) {
w.Header().Set("Content-Type", "application/json")
if body == nil {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
enc.Encode(body)
}
}
| http/handler.go | 1 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.9990609288215637,
0.027422800660133362,
0.00016339067951776087,
0.00018384240684099495,
0.15265142917633057
] |
{
"id": 3,
"code_window": [
"\tc *Core\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditRequest(ctx context.Context, input *logical.LogInput) error {\n",
"\treturn b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif b.c.auditBroker == nil {\n",
"\t\treturn consts.ErrSealed\n",
"\t}\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 518
} | package api
import (
"encoding/json"
"fmt"
"io"
"net"
"strconv"
"sync"
"time"
)
const (
// OriginStart and OriginEnd are the available parameters for the origin
// argument when streaming a file. They respectively offset from the start
// and end of a file.
OriginStart = "start"
OriginEnd = "end"
)
// AllocFileInfo holds information about a file inside the AllocDir
type AllocFileInfo struct {
Name string
IsDir bool
Size int64
FileMode string
ModTime time.Time
ContentType string
}
// StreamFrame is used to frame data of a file when streaming
type StreamFrame struct {
Offset int64 `json:",omitempty"`
Data []byte `json:",omitempty"`
File string `json:",omitempty"`
FileEvent string `json:",omitempty"`
}
// IsHeartbeat returns if the frame is a heartbeat frame
func (s *StreamFrame) IsHeartbeat() bool {
return len(s.Data) == 0 && s.FileEvent == "" && s.File == "" && s.Offset == 0
}
// AllocFS is used to introspect an allocation directory on a Nomad client
type AllocFS struct {
client *Client
}
// AllocFS returns an handle to the AllocFS endpoints
func (c *Client) AllocFS() *AllocFS {
return &AllocFS{client: c}
}
// List is used to list the files at a given path of an allocation directory
func (a *AllocFS) List(alloc *Allocation, path string, q *QueryOptions) ([]*AllocFileInfo, *QueryMeta, error) {
if q == nil {
q = &QueryOptions{}
}
if q.Params == nil {
q.Params = make(map[string]string)
}
q.Params["path"] = path
var resp []*AllocFileInfo
qm, err := a.client.query(fmt.Sprintf("/v1/client/fs/ls/%s", alloc.ID), &resp, q)
if err != nil {
return nil, nil, err
}
return resp, qm, nil
}
// Stat is used to stat a file at a given path of an allocation directory
func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocFileInfo, *QueryMeta, error) {
if q == nil {
q = &QueryOptions{}
}
if q.Params == nil {
q.Params = make(map[string]string)
}
q.Params["path"] = path
var resp AllocFileInfo
qm, err := a.client.query(fmt.Sprintf("/v1/client/fs/stat/%s", alloc.ID), &resp, q)
if err != nil {
return nil, nil, err
}
return &resp, qm, nil
}
// ReadAt is used to read bytes at a given offset until limit at the given path
// in an allocation directory. If limit is <= 0, there is no limit.
func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int64, q *QueryOptions) (io.ReadCloser, error) {
reqPath := fmt.Sprintf("/v1/client/fs/readat/%s", alloc.ID)
return queryClientNode(a.client, alloc, reqPath, q,
func(q *QueryOptions) {
q.Params["path"] = path
q.Params["offset"] = strconv.FormatInt(offset, 10)
q.Params["limit"] = strconv.FormatInt(limit, 10)
})
}
// Cat is used to read contents of a file at the given path in an allocation
// directory
func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadCloser, error) {
reqPath := fmt.Sprintf("/v1/client/fs/cat/%s", alloc.ID)
return queryClientNode(a.client, alloc, reqPath, q,
func(q *QueryOptions) {
q.Params["path"] = path
})
}
// Stream streams the content of a file blocking on EOF.
// The parameters are:
// * path: path to file to stream.
// * offset: The offset to start streaming data at.
// * origin: Either "start" or "end" and defines from where the offset is applied.
// * cancel: A channel that when closed, streaming will end.
//
// The return value is a channel that will emit StreamFrames as they are read.
func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64,
cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
errCh := make(chan error, 1)
reqPath := fmt.Sprintf("/v1/client/fs/stream/%s", alloc.ID)
r, err := queryClientNode(a.client, alloc, reqPath, q,
func(q *QueryOptions) {
q.Params["path"] = path
q.Params["offset"] = strconv.FormatInt(offset, 10)
q.Params["origin"] = origin
})
if err != nil {
errCh <- err
return nil, errCh
}
// Create the output channel
frames := make(chan *StreamFrame, 10)
go func() {
// Close the body
defer r.Close()
// Create a decoder
dec := json.NewDecoder(r)
for {
// Check if we have been cancelled
select {
case <-cancel:
return
default:
}
// Decode the next frame
var frame StreamFrame
if err := dec.Decode(&frame); err != nil {
errCh <- err
close(frames)
return
}
// Discard heartbeat frames
if frame.IsHeartbeat() {
continue
}
frames <- &frame
}
}()
return frames, errCh
}
func queryClientNode(c *Client, alloc *Allocation, reqPath string, q *QueryOptions, customizeQ func(*QueryOptions)) (io.ReadCloser, error) {
nodeClient, _ := c.GetNodeClientWithTimeout(alloc.NodeID, ClientConnTimeout, q)
if q == nil {
q = &QueryOptions{}
}
if q.Params == nil {
q.Params = make(map[string]string)
}
if customizeQ != nil {
customizeQ(q)
}
var r io.ReadCloser
var err error
if nodeClient != nil {
r, err = nodeClient.rawQuery(reqPath, q)
if _, ok := err.(net.Error); err != nil && !ok {
// found a non networking error talking to client directly
return nil, err
}
}
// failed to query node, access through server directly
// or network error when talking to the client directly
if r == nil {
return c.rawQuery(reqPath, q)
}
return r, err
}
// Logs streams the content of a tasks logs blocking on EOF.
// The parameters are:
// * allocation: the allocation to stream from.
// * follow: Whether the logs should be followed.
// * task: the tasks name to stream logs for.
// * logType: Either "stdout" or "stderr"
// * origin: Either "start" or "end" and defines from where the offset is applied.
// * offset: The offset to start streaming data at.
// * cancel: A channel that when closed, streaming will end.
//
// The return value is a channel that will emit StreamFrames as they are read.
// The chan will be closed when follow=false and the end of the file is
// reached.
//
// Unexpected (non-EOF) errors will be sent on the error chan.
func (a *AllocFS) Logs(alloc *Allocation, follow bool, task, logType, origin string,
offset int64, cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) {
errCh := make(chan error, 1)
reqPath := fmt.Sprintf("/v1/client/fs/logs/%s", alloc.ID)
r, err := queryClientNode(a.client, alloc, reqPath, q,
func(q *QueryOptions) {
q.Params["follow"] = strconv.FormatBool(follow)
q.Params["task"] = task
q.Params["type"] = logType
q.Params["origin"] = origin
q.Params["offset"] = strconv.FormatInt(offset, 10)
})
if err != nil {
errCh <- err
return nil, errCh
}
// Create the output channel
frames := make(chan *StreamFrame, 10)
go func() {
// Close the body
defer r.Close()
// Create a decoder
dec := json.NewDecoder(r)
for {
// Check if we have been cancelled
select {
case <-cancel:
return
default:
}
// Decode the next frame
var frame StreamFrame
if err := dec.Decode(&frame); err != nil {
if err == io.EOF || err == io.ErrClosedPipe {
close(frames)
} else {
errCh <- err
}
return
}
// Discard heartbeat frames
if frame.IsHeartbeat() {
continue
}
frames <- &frame
}
}()
return frames, errCh
}
// FrameReader is used to convert a stream of frames into a read closer.
type FrameReader struct {
frames <-chan *StreamFrame
errCh <-chan error
cancelCh chan struct{}
closedLock sync.Mutex
closed bool
unblockTime time.Duration
frame *StreamFrame
frameOffset int
byteOffset int
}
// NewFrameReader takes a channel of frames and returns a FrameReader which
// implements io.ReadCloser
func NewFrameReader(frames <-chan *StreamFrame, errCh <-chan error, cancelCh chan struct{}) *FrameReader {
return &FrameReader{
frames: frames,
errCh: errCh,
cancelCh: cancelCh,
}
}
// SetUnblockTime sets the time to unblock and return zero bytes read. If the
// duration is unset or is zero or less, the read will block until data is read.
func (f *FrameReader) SetUnblockTime(d time.Duration) {
f.unblockTime = d
}
// Offset returns the offset into the stream.
func (f *FrameReader) Offset() int {
return f.byteOffset
}
// Read reads the data of the incoming frames into the bytes buffer. Returns EOF
// when there are no more frames.
func (f *FrameReader) Read(p []byte) (n int, err error) {
f.closedLock.Lock()
closed := f.closed
f.closedLock.Unlock()
if closed {
return 0, io.EOF
}
if f.frame == nil {
var unblock <-chan time.Time
if f.unblockTime.Nanoseconds() > 0 {
unblock = time.After(f.unblockTime)
}
select {
case frame, ok := <-f.frames:
if !ok {
return 0, io.EOF
}
f.frame = frame
// Store the total offset into the file
f.byteOffset = int(f.frame.Offset)
case <-unblock:
return 0, nil
case err := <-f.errCh:
return 0, err
case <-f.cancelCh:
return 0, io.EOF
}
}
// Copy the data out of the frame and update our offset
n = copy(p, f.frame.Data[f.frameOffset:])
f.frameOffset += n
// Clear the frame and its offset once we have read everything
if len(f.frame.Data) == f.frameOffset {
f.frame = nil
f.frameOffset = 0
}
return n, nil
}
// Close cancels the stream of frames
func (f *FrameReader) Close() error {
f.closedLock.Lock()
defer f.closedLock.Unlock()
if f.closed {
return nil
}
close(f.cancelCh)
f.closed = true
return nil
}
| vendor/github.com/hashicorp/nomad/api/fs.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0015910358633846045,
0.00022434123093262315,
0.0001662620052229613,
0.000170828789123334,
0.00022707972675561905
] |
{
"id": 3,
"code_window": [
"\tc *Core\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditRequest(ctx context.Context, input *logical.LogInput) error {\n",
"\treturn b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif b.c.auditBroker == nil {\n",
"\t\treturn consts.ErrSealed\n",
"\t}\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 518
} | package ioutils // import "github.com/ory/dockertest/docker/pkg/ioutils"
import "io"
// NopWriter represents a type which write operation is nop.
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
// NopWriteCloser returns a nopWriteCloser.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
// NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{}
// Flush is a nop operation.
func (f *NopFlusher) Flush() {}
type writeCloserWrapper struct {
io.Writer
closer func() error
}
func (r *writeCloserWrapper) Close() error {
return r.closer()
}
// NewWriteCloserWrapper returns a new io.WriteCloser.
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
return &writeCloserWrapper{
Writer: r,
closer: closer,
}
}
// WriteCounter wraps a concrete io.Writer and hold a count of the number
// of bytes written to the writer during a "session".
// This can be convenient when write return is masked
// (e.g., json.Encoder.Encode())
type WriteCounter struct {
Count int64
Writer io.Writer
}
// NewWriteCounter returns a new WriteCounter.
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
Writer: w,
}
}
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
count, err = wc.Writer.Write(p)
wc.Count += int64(count)
return
}
| vendor/github.com/ory/dockertest/docker/pkg/ioutils/writers.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0002110447530867532,
0.0001751812087604776,
0.00016560175572521985,
0.00016959775530267507,
0.000014800795725022908
] |
{
"id": 3,
"code_window": [
"\tc *Core\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditRequest(ctx context.Context, input *logical.LogInput) error {\n",
"\treturn b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif b.c.auditBroker == nil {\n",
"\t\treturn consts.ErrSealed\n",
"\t}\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 518
} | package cf
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
"github.com/hashicorp/vault-plugin-auth-cf/signatures"
"github.com/hashicorp/vault/api"
)
type CLIHandler struct{}
func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
mount, ok := m["mount"]
if !ok {
mount = "cf"
}
role := m["role"]
if role == "" {
return nil, errors.New(`"role" is required`)
}
pathToInstanceCert := m["cf_instance_cert"]
if pathToInstanceCert == "" {
pathToInstanceCert = os.Getenv(EnvVarInstanceCertificate)
}
if pathToInstanceCert == "" {
return nil, errors.New(`"cf_instance_cert" is required`)
}
pathToInstanceKey := m["cf_instance_key"]
if pathToInstanceKey == "" {
pathToInstanceKey = os.Getenv(EnvVarInstanceKey)
}
if pathToInstanceKey == "" {
return nil, errors.New(`"cf_instance_key" is required`)
}
certBytes, err := ioutil.ReadFile(pathToInstanceCert)
if err != nil {
return nil, err
}
cfInstanceCertContents := string(certBytes)
signingTime := time.Now().UTC()
signatureData := &signatures.SignatureData{
SigningTime: signingTime,
Role: role,
CFInstanceCertContents: cfInstanceCertContents,
}
signature, err := signatures.Sign(pathToInstanceKey, signatureData)
if err != nil {
return nil, err
}
loginData := map[string]interface{}{
"role": role,
"cf_instance_cert": cfInstanceCertContents,
"signing_time": signingTime.Format(signatures.TimeFormat),
"signature": signature,
}
path := fmt.Sprintf("auth/%s/login", mount)
secret, err := c.Logical().Write(path, loginData)
if err != nil {
return nil, err
}
if secret == nil {
return nil, errors.New("empty response from credential provider")
}
return secret, nil
}
func (h *CLIHandler) Help() string {
help := `
Usage: vault login -method=cf [CONFIG K=V...]
The CF auth method allows users to authenticate using CF's instance identity service.
The CF credentials may be specified explicitly via the command line:
$ vault login -method=cf role=...
This will automatically pull from the CF_INSTANCE_CERT and CF_INSTANCE_KEY values
in your local environment. If they're not available or you wish to override them,
they may also be supplied explicitly:
$ vault login -method=cf role=... cf_instance_cert=... cf_instance_key=...
Configuration:
cf_instance_cert=<string>
Explicit value to use for the path to the CF instance certificate.
cf_instance_key=<string>
Explicit value to use for the path to the CF instance key.
mount=<string>
Path where the CF credential method is mounted. This is usually provided
via the -path flag in the "vault login" command, but it can be specified
here as well. If specified here, it takes precedence over the value for
-path. The default value is "cf".
role=<string>
Name of the role to request a token against
`
return strings.TrimSpace(help)
}
| vendor/github.com/hashicorp/vault-plugin-auth-cf/cli.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.00034140943898819387,
0.00018863759760279208,
0.0001667281030677259,
0.00017076623043976724,
0.000047375247959280387
] |
{
"id": 4,
"code_window": [
"\treturn b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {\n",
"\treturn b.c.auditBroker.LogResponse(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif b.c.auditBroker == nil {\n",
"\t\treturn consts.ErrSealed\n",
"\t}\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 522
} | package http
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net"
"net/http"
"net/textproto"
"net/url"
"os"
"strings"
"time"
"github.com/NYTimes/gziphandler"
assetfs "github.com/elazarl/go-bindata-assetfs"
"github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/helper/pathmanager"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
const (
// WrapTTLHeaderName is the name of the header containing a directive to
// wrap the response
WrapTTLHeaderName = "X-Vault-Wrap-TTL"
// WrapFormatHeaderName is the name of the header containing the format to
// wrap in; has no effect if the wrap TTL is not set
WrapFormatHeaderName = "X-Vault-Wrap-Format"
// NoRequestForwardingHeaderName is the name of the header telling Vault
// not to use request forwarding
NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding"
// MFAHeaderName represents the HTTP header which carries the credentials
// required to perform MFA on any path.
MFAHeaderName = "X-Vault-MFA"
// canonicalMFAHeaderName is the MFA header value's format in the request
// headers. Do not alter the casing of this string.
canonicalMFAHeaderName = "X-Vault-Mfa"
// PolicyOverrideHeaderName is the header set to request overriding
// soft-mandatory Sentinel policies.
PolicyOverrideHeaderName = "X-Vault-Policy-Override"
// DefaultMaxRequestSize is the default maximum accepted request size. This
// is to prevent a denial of service attack where no Content-Length is
// provided and the server is fed ever more data until it exhausts memory.
// Can be overridden per listener.
DefaultMaxRequestSize = 32 * 1024 * 1024
)
var (
// Set to false by stub_asset if the ui build tag isn't enabled
uiBuiltIn = true
// perfStandbyAlwaysForwardPaths is used to check a requested path against
// the always forward list
perfStandbyAlwaysForwardPaths = pathmanager.New()
alwaysRedirectPaths = pathmanager.New()
injectDataIntoTopRoutes = []string{
"/v1/sys/audit",
"/v1/sys/audit/",
"/v1/sys/audit-hash/",
"/v1/sys/auth",
"/v1/sys/auth/",
"/v1/sys/config/cors",
"/v1/sys/config/auditing/request-headers/",
"/v1/sys/config/auditing/request-headers",
"/v1/sys/capabilities",
"/v1/sys/capabilities-accessor",
"/v1/sys/capabilities-self",
"/v1/sys/key-status",
"/v1/sys/mounts",
"/v1/sys/mounts/",
"/v1/sys/policy",
"/v1/sys/policy/",
"/v1/sys/rekey/backup",
"/v1/sys/rekey/recovery-key-backup",
"/v1/sys/remount",
"/v1/sys/rotate",
"/v1/sys/wrapping/wrap",
}
)
func init() {
alwaysRedirectPaths.AddPaths([]string{
"sys/storage/raft/snapshot",
"sys/storage/raft/snapshot-force",
})
}
// Handler returns an http.Handler for the API. This can be used on
// its own to mount the Vault API within another web server.
func Handler(props *vault.HandlerProperties) http.Handler {
core := props.Core
// Create the muxer to handle the actual endpoints
mux := http.NewServeMux()
switch {
case props.RecoveryMode:
raw := vault.NewRawBackend(core)
strategy := vault.GenerateRecoveryTokenStrategy(props.RecoveryToken)
mux.Handle("/v1/sys/raw/", handleLogicalRecovery(raw, props.RecoveryToken))
mux.Handle("/v1/sys/generate-recovery-token/attempt", handleSysGenerateRootAttempt(core, strategy))
mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy))
default:
// Handle non-forwarded paths
mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core))
mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/init", handleSysInit(core))
mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core))
mux.Handle("/v1/sys/seal", handleSysSeal(core))
mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core)))
mux.Handle("/v1/sys/unseal", handleSysUnseal(core))
mux.Handle("/v1/sys/leader", handleSysLeader(core))
mux.Handle("/v1/sys/health", handleSysHealth(core))
mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core))
mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootUpdate(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false)))
mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false)))
mux.Handle("/v1/sys/rekey/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, false)))
mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true)))
mux.Handle("/v1/sys/storage/raft/bootstrap", handleSysRaftBootstrap(core))
mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core))
for _, path := range injectDataIntoTopRoutes {
mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core)))
}
mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core)))
mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core)))
if core.UIEnabled() == true {
if uiBuiltIn {
mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))))
mux.Handle("/robots.txt", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))
} else {
mux.Handle("/ui/", handleUIHeaders(core, handleUIStub()))
}
mux.Handle("/ui", handleUIRedirect())
mux.Handle("/", handleUIRedirect())
}
// Register metrics path without authentication if enabled
if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess {
mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core))
} else {
mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core))
}
additionalRoutes(mux, core)
}
// Wrap the handler in another handler to trigger all help paths.
helpWrappedHandler := wrapHelpHandler(mux, core)
corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core)
quotaWrappedHandler := rateLimitQuotaWrapping(corsWrappedHandler, core)
genericWrappedHandler := genericWrapping(core, quotaWrappedHandler, props)
// Wrap the handler with PrintablePathCheckHandler to check for non-printable
// characters in the request path.
printablePathCheckHandler := genericWrappedHandler
if !props.DisablePrintableCheck {
printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(genericWrappedHandler, nil)
}
return printablePathCheckHandler
}
type copyResponseWriter struct {
wrapped http.ResponseWriter
statusCode int
body *bytes.Buffer
}
// newCopyResponseWriter returns an initialized newCopyResponseWriter
func newCopyResponseWriter(wrapped http.ResponseWriter) *copyResponseWriter {
w := ©ResponseWriter{
wrapped: wrapped,
body: new(bytes.Buffer),
statusCode: 200,
}
return w
}
func (w *copyResponseWriter) Header() http.Header {
return w.wrapped.Header()
}
func (w *copyResponseWriter) Write(buf []byte) (int, error) {
w.body.Write(buf)
return w.wrapped.Write(buf)
}
func (w *copyResponseWriter) WriteHeader(code int) {
w.statusCode = code
w.wrapped.WriteHeader(code)
}
func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
origBody := new(bytes.Buffer)
reader := ioutil.NopCloser(io.TeeReader(r.Body, origBody))
r.Body = reader
req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r)
if err != nil || status != 0 {
respondError(w, status, err)
return
}
if origBody != nil {
r.Body = ioutil.NopCloser(origBody)
}
input := &logical.LogInput{
Request: req,
}
core.AuditLogger().AuditRequest(r.Context(), input)
cw := newCopyResponseWriter(w)
h.ServeHTTP(cw, r)
data := make(map[string]interface{})
err = jsonutil.DecodeJSON(cw.body.Bytes(), &data)
if err != nil {
// best effort, ignore
}
httpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}
input.Response = logical.HTTPResponseToLogicalResponse(httpResp)
core.AuditLogger().AuditResponse(r.Context(), input)
return
})
}
// wrapGenericHandler wraps the handler with an extra layer of handler where
// tasks that should be commonly handled for all the requests and/or responses
// are performed.
func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler {
var maxRequestDuration time.Duration
var maxRequestSize int64
if props.ListenerConfig != nil {
maxRequestDuration = props.ListenerConfig.MaxRequestDuration
maxRequestSize = props.ListenerConfig.MaxRequestSize
}
if maxRequestDuration == 0 {
maxRequestDuration = vault.DefaultMaxRequestDuration
}
if maxRequestSize == 0 {
maxRequestSize = DefaultMaxRequestSize
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Set the Cache-Control header for all the responses returned
// by Vault
w.Header().Set("Cache-Control", "no-store")
// Start with the request context
ctx := r.Context()
var cancelFunc context.CancelFunc
// Add our timeout, but not for the monitor endpoint, as it's streaming
if strings.HasSuffix(r.URL.Path, "sys/monitor") {
ctx, cancelFunc = context.WithCancel(ctx)
} else {
ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration)
}
// Add a size limiter if desired
if maxRequestSize > 0 {
ctx = context.WithValue(ctx, "max_request_size", maxRequestSize)
}
ctx = context.WithValue(ctx, "original_request_path", r.URL.Path)
r = r.WithContext(ctx)
r = r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace))
switch {
case strings.HasPrefix(r.URL.Path, "/v1/"):
newR, status := adjustRequest(core, r)
if status != 0 {
respondError(w, status, nil)
cancelFunc()
return
}
r = newR
case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/":
default:
respondError(w, http.StatusNotFound, nil)
cancelFunc()
return
}
h.ServeHTTP(w, r)
cancelFunc()
return
})
}
func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler {
rejectNotPresent := l.XForwardedForRejectNotPresent
hopSkips := l.XForwardedForHopSkips
authorizedAddrs := l.XForwardedForAuthorizedAddrs
rejectNotAuthz := l.XForwardedForRejectNotAuthorized
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")]
if !headersOK || len(headers) == 0 {
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("missing x-forwarded-for header and configured to reject when not present"))
return
}
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
// If not rejecting treat it like we just don't have a valid
// header because we can't do a comparison against an address we
// can't understand
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client hostport: {{err}}", err))
return
}
addr, err := sockaddr.NewIPAddr(host)
if err != nil {
// We treat this the same as the case above
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error parsing client address: {{err}}", err))
return
}
var found bool
for _, authz := range authorizedAddrs {
if authz.Contains(addr) {
found = true
break
}
}
if !found {
// If we didn't find it and aren't configured to reject, simply
// don't trust it
if !rejectNotAuthz {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("client address not authorized for x-forwarded-for and configured to reject connection"))
return
}
// At this point we have at least one value and it's authorized
// Split comma separated ones, which are common. This brings it in line
// to the multiple-header case.
var acc []string
for _, header := range headers {
vals := strings.Split(header, ",")
for _, v := range vals {
acc = append(acc, strings.TrimSpace(v))
}
}
indexToUse := int64(len(acc)) - 1 - hopSkips
if indexToUse < 0 {
// This is likely an error in either configuration or other
// infrastructure. We could either deny the request, or we
// could simply not trust the value. Denying the request is
// "safer" since if this logic is configured at all there may
// be an assumption it can always be trusted. Given that we can
// deny accepting the request at all if it's not from an
// authorized address, if we're at this point the address is
// authorized (or we've turned off explicit rejection) and we
// should assume that what comes in should be properly
// formatted.
respondError(w, http.StatusBadRequest, fmt.Errorf("malformed x-forwarded-for configuration or request, hops to skip (%d) would skip before earliest chain link (chain length %d)", hopSkips, len(headers)))
return
}
r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port)
h.ServeHTTP(w, r)
return
})
}
// A lookup on a token that is about to expire returns nil, which means by the
// time we can validate a wrapping token lookup will return nil since it will
// be revoked after the call. So we have to do the validation here.
func wrappingVerificationFunc(ctx context.Context, core *vault.Core, req *logical.Request) error {
if req == nil {
return fmt.Errorf("invalid request")
}
valid, err := core.ValidateWrappingToken(ctx, req)
if err != nil {
return errwrap.Wrapf("error validating wrapping token: {{err}}", err)
}
if !valid {
return consts.ErrInvalidWrappingToken
}
return nil
}
// stripPrefix is a helper to strip a prefix from the path. It will
// return false from the second return value if it the prefix doesn't exist.
func stripPrefix(prefix, path string) (string, bool) {
if !strings.HasPrefix(path, prefix) {
return "", false
}
path = path[len(prefix):]
if path == "" {
return "", false
}
return path, true
}
func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
header := w.Header()
userHeaders, err := core.UIHeaders()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
if userHeaders != nil {
for k := range userHeaders {
v := userHeaders.Get(k)
header.Set(k, v)
}
}
h.ServeHTTP(w, req)
})
}
func handleUI(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
// The fileserver handler strips trailing slashes and does a redirect.
// We don't want the redirect to happen so we preemptively trim the slash
// here.
req.URL.Path = strings.TrimSuffix(req.URL.Path, "/")
h.ServeHTTP(w, req)
return
})
}
func handleUIStub() http.Handler {
stubHTML := `
<!DOCTYPE html>
<html>
<style>
body {
color: #1F2124;
font-family: system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
}
.wrapper {
display: flex;
justify-content: center;
align-items: center;
height: 500px;
}
.content ul {
line-height: 1.5;
}
a {
color: #1563ff;
text-decoration: none;
}
.header {
display: flex;
color: #6a7786;
align-items: center;
}
.header svg {
padding-right: 12px;
}
.alert {
transform: scale(0.07);
fill: #6a7786;
}
h1 {
font-weight: 500;
}
p {
margin-top: 0px;
}
</style>
<div class="wrapper">
<div class="content">
<div class="header">
<svg width="36px" height="36px" viewBox="0 0 36 36" xmlns="http://www.w3.org/2000/svg">
<path class="alert" d="M476.7 422.2L270.1 72.7c-2.9-5-8.3-8.7-14.1-8.7-5.9 0-11.3 3.7-14.1 8.7L35.3 422.2c-2.8 5-4.8 13-1.9 17.9 2.9 4.9 8.2 7.9 14 7.9h417.1c5.8 0 11.1-3 14-7.9 3-4.9 1-13-1.8-17.9zM288 400h-64v-48h64v48zm0-80h-64V176h64v144z"/>
</svg>
<h1>Vault UI is not available in this binary.</h1>
</div>
<p>To get Vault UI do one of the following:</p>
<ul>
<li><a href="https://www.vaultproject.io/downloads.html">Download an official release</a></li>
<li>Run <code>make bin</code> to create your own release binaries.
<li>Run <code>make dev-ui</code> to create a development binary with the UI.
</ul>
</div>
</div>
</html>
`
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(stubHTML))
})
}
func handleUIRedirect() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, "/ui/", 307)
return
})
}
type UIAssetWrapper struct {
FileSystem *assetfs.AssetFS
}
func (fs *UIAssetWrapper) Open(name string) (http.File, error) {
file, err := fs.FileSystem.Open(name)
if err == nil {
return file, nil
}
// serve index.html instead of 404ing
if err == os.ErrNotExist {
return fs.FileSystem.Open("index.html")
}
return nil, err
}
func parseQuery(values url.Values) map[string]interface{} {
data := map[string]interface{}{}
for k, v := range values {
// Skip the help key as this is a reserved parameter
if k == "help" {
continue
}
switch {
case len(v) == 0:
case len(v) == 1:
data[k] = v[0]
default:
data[k] = v
}
}
if len(data) > 0 {
return data
}
return nil
}
func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, out interface{}) (io.ReadCloser, error) {
// Limit the maximum number of bytes to MaxRequestSize to protect
// against an indefinite amount of data being read.
reader := r.Body
ctx := r.Context()
maxRequestSize := ctx.Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
reader = http.MaxBytesReader(w, r.Body, max)
}
}
var origBody io.ReadWriter
if perfStandby {
// Since we're checking PerfStandby here we key on origBody being nil
// or not later, so we need to always allocate so it's non-nil
origBody = new(bytes.Buffer)
reader = ioutil.NopCloser(io.TeeReader(reader, origBody))
}
err := jsonutil.DecodeJSONFromReader(reader, out)
if err != nil && err != io.EOF {
return nil, errwrap.Wrapf("failed to parse JSON input: {{err}}", err)
}
if origBody != nil {
return ioutil.NopCloser(origBody), err
}
return nil, err
}
// parseFormRequest parses values from a form POST.
//
// A nil map will be returned if the format is empty or invalid.
func parseFormRequest(r *http.Request) (map[string]interface{}, error) {
maxRequestSize := r.Context().Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
r.Body = ioutil.NopCloser(io.LimitReader(r.Body, max))
}
}
if err := r.ParseForm(); err != nil {
return nil, err
}
var data map[string]interface{}
if len(r.PostForm) != 0 {
data = make(map[string]interface{}, len(r.PostForm))
for k, v := range r.PostForm {
switch len(v) {
case 0:
case 1:
data[k] = v[0]
default:
// Almost anywhere taking in a string list can take in comma
// separated values, and really this is super niche anyways
data[k] = strings.Join(v, ",")
}
}
}
return data, nil
}
// handleRequestForwarding determines whether to forward a request or not,
// falling back on the older behavior of redirecting the client
func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If we are a performance standby we can handle the request.
if core.PerfStandby() {
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
switch {
case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path):
handler.ServeHTTP(w, r)
return
case strings.HasPrefix(path, "auth/token/create/"):
isBatch, err := core.IsBatchTokenCreationRequest(r.Context(), path)
if err == nil && isBatch {
handler.ServeHTTP(w, r)
return
}
}
}
// Note: in an HA setup, this call will also ensure that connections to
// the leader are set up, as that happens once the advertised cluster
// values are read during this function
isLeader, leaderAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve request normally
handler.ServeHTTP(w, r)
return
}
// Some internal error occurred
respondError(w, http.StatusInternalServerError, err)
return
}
if isLeader {
// No forwarding needed, we're leader
handler.ServeHTTP(w, r)
return
}
if leaderAddr == "" {
respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found"))
return
}
forwardRequest(core, w, r)
return
})
}
func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) {
if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
respondStandby(core, w, r.URL)
return
}
if r.Header.Get(NoRequestForwardingHeaderName) != "" {
// Forwarding explicitly disabled, fall back to previous behavior
core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request")
respondStandby(core, w, r.URL)
return
}
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
if alwaysRedirectPaths.HasPath(path) {
respondStandby(core, w, r.URL)
return
}
// Attempt forwarding the request. If we cannot forward -- perhaps it's
// been disabled on the active node -- this will return with an
// ErrCannotForward and we simply fall back
statusCode, header, retBytes, err := core.ForwardRequest(r)
if err != nil {
if err == vault.ErrCannotForward {
core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back")
} else {
core.Logger().Error("forward request error", "error", err)
}
// Fall back to redirection
respondStandby(core, w, r.URL)
return
}
if header != nil {
for k, v := range header {
w.Header()[k] = v
}
}
w.WriteHeader(statusCode)
w.Write(retBytes)
}
// request is a helper to perform a request and properly exit in the
// case of an error.
func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) {
resp, err := core.HandleRequest(rawReq.Context(), r)
if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) {
if resp == nil {
resp = &logical.Response{}
}
resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.")
}
if errwrap.Contains(err, consts.ErrStandby.Error()) {
respondStandby(core, w, rawReq.URL)
return resp, false, false
}
if err != nil && errwrap.Contains(err, logical.ErrPerfStandbyPleaseForward.Error()) {
return nil, false, true
}
if resp != nil && len(resp.Headers) > 0 {
// Set this here so it will take effect regardless of any other type of
// response processing
header := w.Header()
for k, v := range resp.Headers {
for _, h := range v {
header.Add(k, h)
}
}
switch {
case resp.Secret != nil,
resp.Auth != nil,
len(resp.Data) > 0,
resp.Redirect != "",
len(resp.Warnings) > 0,
resp.WrapInfo != nil:
// Nothing, resp has data
default:
// We have an otherwise totally empty response except for headers,
// so nil out the response now that the headers are written out
resp = nil
}
}
// If vault's core has already written to the response writer do not add any
// additional output. Headers have already been sent. If the response writer
// is set but has not been written to it likely means there was some kind of
// error
if r.ResponseWriter != nil && r.ResponseWriter.Written() {
return nil, true, false
}
if respondErrorCommon(w, r, resp, err) {
return resp, false, false
}
return resp, true, false
}
// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby
func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
// Request the leader address
_, redirectAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve 503
err = errors.New("node is not active")
respondError(w, http.StatusServiceUnavailable, err)
return
}
respondError(w, http.StatusInternalServerError, err)
return
}
// If there is no leader, generate a 503 error
if redirectAddr == "" {
err = errors.New("no active Vault instance found")
respondError(w, http.StatusServiceUnavailable, err)
return
}
// Parse the redirect location
redirectURL, err := url.Parse(redirectAddr)
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
// Generate a redirect URL
finalURL := url.URL{
Scheme: redirectURL.Scheme,
Host: redirectURL.Host,
Path: reqURL.Path,
RawQuery: reqURL.RawQuery,
}
// Ensure there is a scheme, default to https
if finalURL.Scheme == "" {
finalURL.Scheme = "https"
}
// If we have an address, redirect! We use a 307 code
// because we don't actually know if its permanent and
// the request method should be preserved.
w.Header().Set("Location", finalURL.String())
w.WriteHeader(307)
}
// getTokenFromReq parse headers of the incoming request to extract token if
// present it accepts Authorization Bearer (RFC6750) and X-Vault-Token header.
// Returns true if the token was sourced from a Bearer header.
func getTokenFromReq(r *http.Request) (string, bool) {
if token := r.Header.Get(consts.AuthHeaderName); token != "" {
return token, false
}
if headers, ok := r.Header["Authorization"]; ok {
// Reference for Authorization header format: https://tools.ietf.org/html/rfc7236#section-3
// If string does not start by 'Bearer ', it is not one we would use,
// but might be used by plugins
for _, v := range headers {
if !strings.HasPrefix(v, "Bearer ") {
continue
}
return strings.TrimSpace(v[7:]), true
}
}
return "", false
}
// requestAuth adds the token to the logical.Request if it exists.
func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) (*logical.Request, error) {
// Attach the header value if we have it
token, fromAuthzHeader := getTokenFromReq(r)
if token != "" {
req.ClientToken = token
req.ClientTokenSource = logical.ClientTokenFromVaultHeader
if fromAuthzHeader {
req.ClientTokenSource = logical.ClientTokenFromAuthzHeader
}
// Also attach the accessor if we have it. This doesn't fail if it
// doesn't exist because the request may be to an unauthenticated
// endpoint/login endpoint where a bad current token doesn't matter, or
// a token from a Vault version pre-accessors. We ignore errors for
// JWTs.
te, err := core.LookupToken(r.Context(), token)
if err != nil {
dotCount := strings.Count(token, ".")
// If we have two dots but the second char is a dot it's a vault
// token of the form s.SOMETHING.nsid, not a JWT
if dotCount != 2 ||
dotCount == 2 && token[1] == '.' {
return req, err
}
}
if err == nil && te != nil {
req.ClientTokenAccessor = te.Accessor
req.ClientTokenRemainingUses = te.NumUses
req.SetTokenEntry(te)
}
}
return req, nil
}
func requestPolicyOverride(r *http.Request, req *logical.Request) error {
raw := r.Header.Get(PolicyOverrideHeaderName)
if raw == "" {
return nil
}
override, err := parseutil.ParseBool(raw)
if err != nil {
return err
}
req.PolicyOverride = override
return nil
}
// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists
func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) {
// First try for the header value
wrapTTL := r.Header.Get(WrapTTLHeaderName)
if wrapTTL == "" {
return req, nil
}
// If it has an allowed suffix parse as a duration string
dur, err := parseutil.ParseDurationSecond(wrapTTL)
if err != nil {
return req, err
}
if int64(dur) < 0 {
return req, fmt.Errorf("requested wrap ttl cannot be negative")
}
req.WrapInfo = &logical.RequestWrapInfo{
TTL: dur,
}
wrapFormat := r.Header.Get(WrapFormatHeaderName)
switch wrapFormat {
case "jwt":
req.WrapInfo.Format = "jwt"
}
return req, nil
}
// parseMFAHeader parses the MFAHeaderName in the request headers and organizes
// them with MFA method name as the index.
func parseMFAHeader(req *logical.Request) error {
if req == nil {
return fmt.Errorf("request is nil")
}
if req.Headers == nil {
return nil
}
// Reset and initialize the credentials in the request
req.MFACreds = make(map[string][]string)
for _, mfaHeaderValue := range req.Headers[canonicalMFAHeaderName] {
// Skip the header with no value in it
if mfaHeaderValue == "" {
continue
}
// Handle the case where only method name is mentioned and no value
// is supplied
if !strings.Contains(mfaHeaderValue, ":") {
// Mark the presence of method name, but set an empty set to it
// indicating that there were no values supplied for the method
if req.MFACreds[mfaHeaderValue] == nil {
req.MFACreds[mfaHeaderValue] = []string{}
}
continue
}
shardSplits := strings.SplitN(mfaHeaderValue, ":", 2)
if shardSplits[0] == "" {
return fmt.Errorf("invalid data in header %q; missing method name", MFAHeaderName)
}
if shardSplits[1] == "" {
return fmt.Errorf("invalid data in header %q; missing method value", MFAHeaderName)
}
req.MFACreds[shardSplits[0]] = append(req.MFACreds[shardSplits[0]], shardSplits[1])
}
return nil
}
// isForm tries to determine whether the request should be
// processed as a form or as JSON.
//
// Virtually all existing use cases have assumed processing as JSON,
// and there has not been a Content-Type requirement in the API. In order to
// maintain backwards compatibility, this will err on the side of JSON.
// The request will be considered a form only if:
//
// 1. The content type is "application/x-www-form-urlencoded"
// 2. The start of the request doesn't look like JSON. For this test we
// we expect the body to begin with { or [, ignoring leading whitespace.
func isForm(head []byte, contentType string) bool {
contentType, _, err := mime.ParseMediaType(contentType)
if err != nil || contentType != "application/x-www-form-urlencoded" {
return false
}
// Look for the start of JSON or not-JSON, skipping any insignificant
// whitespace (per https://tools.ietf.org/html/rfc7159#section-2).
for _, c := range head {
switch c {
case ' ', '\t', '\n', '\r':
continue
case '[', '{': // JSON
return false
default: // not JSON
return true
}
}
return true
}
func respondError(w http.ResponseWriter, status int, err error) {
logical.RespondError(w, status, err)
}
func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool {
statusCode, newErr := logical.RespondErrorCommon(req, resp, err)
if newErr == nil && statusCode == 0 {
return false
}
respondError(w, statusCode, newErr)
return true
}
func respondOk(w http.ResponseWriter, body interface{}) {
w.Header().Set("Content-Type", "application/json")
if body == nil {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
enc.Encode(body)
}
}
| http/handler.go | 1 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.9991000890731812,
0.018987329676747322,
0.00016358913853764534,
0.00017218546418007463,
0.13371026515960693
] |
{
"id": 4,
"code_window": [
"\treturn b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {\n",
"\treturn b.c.auditBroker.LogResponse(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif b.c.auditBroker == nil {\n",
"\t\treturn consts.ErrSealed\n",
"\t}\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 522
} | // Copyright 2013 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"fmt"
"time"
)
// IssueComment represents a comment left on an issue.
type IssueComment struct {
ID *int64 `json:"id,omitempty"`
Body *string `json:"body,omitempty"`
User *User `json:"user,omitempty"`
Reactions *Reactions `json:"reactions,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
// AuthorAssociation is the comment author's relationship to the issue's repository.
// Possible values are "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE".
AuthorAssociation *string `json:"author_association,omitempty"`
URL *string `json:"url,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
IssueURL *string `json:"issue_url,omitempty"`
}
func (i IssueComment) String() string {
return Stringify(i)
}
// IssueListCommentsOptions specifies the optional parameters to the
// IssuesService.ListComments method.
type IssueListCommentsOptions struct {
// Sort specifies how to sort comments. Possible values are: created, updated.
Sort string `url:"sort,omitempty"`
// Direction in which to sort comments. Possible values are: asc, desc.
Direction string `url:"direction,omitempty"`
// Since filters comments by time.
Since time.Time `url:"since,omitempty"`
ListOptions
}
// ListComments lists all comments on the specified issue. Specifying an issue
// number of 0 will return all comments on all issues for the repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
func (s *IssuesService) ListComments(ctx context.Context, owner string, repo string, number int, opt *IssueListCommentsOptions) ([]*IssueComment, *Response, error) {
var u string
if number == 0 {
u = fmt.Sprintf("repos/%v/%v/issues/comments", owner, repo)
} else {
u = fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
}
u, err := addOptions(u, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeReactionsPreview)
var comments []*IssueComment
resp, err := s.client.Do(ctx, req, &comments)
if err != nil {
return nil, resp, err
}
return comments, resp, nil
}
// GetComment fetches the specified issue comment.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#get-a-single-comment
func (s *IssuesService) GetComment(ctx context.Context, owner string, repo string, commentID int64) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeReactionsPreview)
comment := new(IssueComment)
resp, err := s.client.Do(ctx, req, comment)
if err != nil {
return nil, resp, err
}
return comment, resp, nil
}
// CreateComment creates a new comment on the specified issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#create-a-comment
func (s *IssuesService) CreateComment(ctx context.Context, owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
req, err := s.client.NewRequest("POST", u, comment)
if err != nil {
return nil, nil, err
}
c := new(IssueComment)
resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
return c, resp, nil
}
// EditComment updates an issue comment.
// A non-nil comment.Body must be provided. Other comment fields should be left nil.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#edit-a-comment
func (s *IssuesService) EditComment(ctx context.Context, owner string, repo string, commentID int64, comment *IssueComment) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID)
req, err := s.client.NewRequest("PATCH", u, comment)
if err != nil {
return nil, nil, err
}
c := new(IssueComment)
resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
return c, resp, nil
}
// DeleteComment deletes an issue comment.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#delete-a-comment
func (s *IssuesService) DeleteComment(ctx context.Context, owner string, repo string, commentID int64) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
| vendor/github.com/google/go-github/github/issues_comments.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0006932620890438557,
0.00024794272030703723,
0.00016645551659166813,
0.00017532444326207042,
0.00015363821876235306
] |
{
"id": 4,
"code_window": [
"\treturn b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {\n",
"\treturn b.c.auditBroker.LogResponse(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif b.c.auditBroker == nil {\n",
"\t\treturn consts.ErrSealed\n",
"\t}\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 522
} | /*
* Copyright 2018 - Present Okta, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
package okta
import (
"context"
"fmt"
"time"
)
type PolicyRuleResource resource
type PolicyRule struct {
Created *time.Time `json:"created,omitempty"`
Id string `json:"id,omitempty"`
LastUpdated *time.Time `json:"lastUpdated,omitempty"`
Priority int64 `json:"priority,omitempty"`
Status string `json:"status,omitempty"`
System *bool `json:"system,omitempty"`
Type string `json:"type,omitempty"`
}
// Updates a policy rule.
func (m *PolicyRuleResource) UpdatePolicyRule(ctx context.Context, policyId string, ruleId string, body PolicyRule) (*PolicyRule, *Response, error) {
url := fmt.Sprintf("/api/v1/policies/%v/rules/%v", policyId, ruleId)
req, err := m.client.requestExecutor.WithAccept("application/json").WithContentType("application/json").NewRequest("PUT", url, body)
if err != nil {
return nil, nil, err
}
var policyRule *PolicyRule
resp, err := m.client.requestExecutor.Do(ctx, req, &policyRule)
if err != nil {
return nil, resp, err
}
return policyRule, resp, nil
}
// Removes a policy rule.
func (m *PolicyRuleResource) DeletePolicyRule(ctx context.Context, policyId string, ruleId string) (*Response, error) {
url := fmt.Sprintf("/api/v1/policies/%v/rules/%v", policyId, ruleId)
req, err := m.client.requestExecutor.WithAccept("application/json").WithContentType("application/json").NewRequest("DELETE", url, nil)
if err != nil {
return nil, err
}
resp, err := m.client.requestExecutor.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
| vendor/github.com/okta/okta-sdk-golang/v2/okta/policyRule.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.000572191143874079,
0.0002317718754056841,
0.0001673709775786847,
0.0001761077728588134,
0.00013098913768772036
] |
{
"id": 4,
"code_window": [
"\treturn b.c.auditBroker.LogRequest(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n",
"func (b *basicAuditor) AuditResponse(ctx context.Context, input *logical.LogInput) error {\n",
"\treturn b.c.auditBroker.LogResponse(ctx, input, b.c.auditedHeaders)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif b.c.auditBroker == nil {\n",
"\t\treturn consts.ErrSealed\n",
"\t}\n"
],
"file_path": "vault/audit.go",
"type": "add",
"edit_start_line_idx": 522
} | // Copyright (c) 2016, 2018, 2019, Oracle and/or its affiliates. All rights reserved.
// Code generated. DO NOT EDIT.
package objectstorage
import (
"github.com/oracle/oci-go-sdk/common"
"net/http"
)
// HeadObjectRequest wrapper for the HeadObject operation
type HeadObjectRequest struct {
// The Object Storage namespace used for the request.
NamespaceName *string `mandatory:"true" contributesTo:"path" name:"namespaceName"`
// The name of the bucket. Avoid entering confidential information.
// Example: `my-new-bucket1`
BucketName *string `mandatory:"true" contributesTo:"path" name:"bucketName"`
// The name of the object. Avoid entering confidential information.
// Example: `test/object1.log`
ObjectName *string `mandatory:"true" contributesTo:"path" name:"objectName"`
// The entity tag (ETag) to match. For creating and committing a multipart upload to an object, this is the entity tag of the target object.
// For uploading a part, this is the entity tag of the target part.
IfMatch *string `mandatory:"false" contributesTo:"header" name:"if-match"`
// The entity tag (ETag) to avoid matching. The only valid value is '*', which indicates that the request should fail if the object
// already exists. For creating and committing a multipart upload, this is the entity tag of the target object. For uploading a
// part, this is the entity tag of the target part.
IfNoneMatch *string `mandatory:"false" contributesTo:"header" name:"if-none-match"`
// The client request ID for tracing.
OpcClientRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-client-request-id"`
// Metadata about the request. This information will not be transmitted to the service, but
// represents information that the SDK will consume to drive retry behavior.
RequestMetadata common.RequestMetadata
}
func (request HeadObjectRequest) String() string {
return common.PointerString(request)
}
// HTTPRequest implements the OCIRequest interface
func (request HeadObjectRequest) HTTPRequest(method, path string) (http.Request, error) {
return common.MakeDefaultHTTPRequestWithTaggedStruct(method, path, request)
}
// RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request HeadObjectRequest) RetryPolicy() *common.RetryPolicy {
return request.RequestMetadata.RetryPolicy
}
// HeadObjectResponse wrapper for the HeadObject operation
type HeadObjectResponse struct {
// The underlying http response
RawResponse *http.Response
// Echoes back the value passed in the opc-client-request-id header, for use by clients when debugging.
OpcClientRequestId *string `presentIn:"header" name:"opc-client-request-id"`
// Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular
// request, provide this request ID.
OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
// The entity tag (ETag) for the object.
ETag *string `presentIn:"header" name:"etag"`
// The user-defined metadata for the object.
OpcMeta map[string]string `presentIn:"header-collection" prefix:"opc-meta-"`
// The object size in bytes.
ContentLength *int64 `presentIn:"header" name:"content-length"`
// Content-MD5 header, as described in RFC 2616 (https://tools.ietf.org/rfc/rfc2616), section 14.15.
// Unavailable for objects uploaded using multipart upload.
ContentMd5 *string `presentIn:"header" name:"content-md5"`
// Only applicable to objects uploaded using multipart upload.
// Base-64 representation of the multipart object hash.
// The multipart object hash is calculated by taking the MD5 hashes of the parts,
// concatenating the binary representation of those hashes in order of their part numbers,
// and then calculating the MD5 hash of the concatenated values.
OpcMultipartMd5 *string `presentIn:"header" name:"opc-multipart-md5"`
// Content-Type header, as described in RFC 2616 (https://tools.ietf.org/rfc/rfc2616), section 14.17.
ContentType *string `presentIn:"header" name:"content-type"`
// Content-Language header, as described in RFC 2616 (https://tools.ietf.org/rfc/rfc2616), section 14.12.
ContentLanguage *string `presentIn:"header" name:"content-language"`
// Content-Encoding header, as described in RFC 2616 (https://tools.ietf.org/rfc/rfc2616), section 14.11.
ContentEncoding *string `presentIn:"header" name:"content-encoding"`
// The object modification time, as described in RFC 2616 (https://tools.ietf.org/rfc/rfc2616), section 14.29.
LastModified *common.SDKTime `presentIn:"header" name:"last-modified"`
// The current state of the object.
ArchivalState HeadObjectArchivalStateEnum `presentIn:"header" name:"archival-state"`
// Time that the object is returned to the archived state. This field is only present for restored objects.
TimeOfArchival *common.SDKTime `presentIn:"header" name:"time-of-archival"`
// Flag to indicate whether or not the object was modified. If this is true,
// the getter for the object itself will return null. Callers should check this
// if they specified one of the request params that might result in a conditional
// response (like 'if-match'/'if-none-match').
IsNotModified bool
}
func (response HeadObjectResponse) String() string {
return common.PointerString(response)
}
// HTTPResponse implements the OCIResponse interface
func (response HeadObjectResponse) HTTPResponse() *http.Response {
return response.RawResponse
}
// HeadObjectArchivalStateEnum Enum with underlying type: string
type HeadObjectArchivalStateEnum string
// Set of constants representing the allowable values for HeadObjectArchivalStateEnum
const (
HeadObjectArchivalStateAvailable HeadObjectArchivalStateEnum = "AVAILABLE"
HeadObjectArchivalStateArchived HeadObjectArchivalStateEnum = "ARCHIVED"
HeadObjectArchivalStateRestoring HeadObjectArchivalStateEnum = "RESTORING"
HeadObjectArchivalStateRestored HeadObjectArchivalStateEnum = "RESTORED"
)
var mappingHeadObjectArchivalState = map[string]HeadObjectArchivalStateEnum{
"AVAILABLE": HeadObjectArchivalStateAvailable,
"ARCHIVED": HeadObjectArchivalStateArchived,
"RESTORING": HeadObjectArchivalStateRestoring,
"RESTORED": HeadObjectArchivalStateRestored,
}
// GetHeadObjectArchivalStateEnumValues Enumerates the set of values for HeadObjectArchivalStateEnum
func GetHeadObjectArchivalStateEnumValues() []HeadObjectArchivalStateEnum {
values := make([]HeadObjectArchivalStateEnum, 0)
for _, v := range mappingHeadObjectArchivalState {
values = append(values, v)
}
return values
}
| vendor/github.com/oracle/oci-go-sdk/objectstorage/head_object_request_response.go | 0 | https://github.com/hashicorp/vault/commit/a50eac1d44fa6f94c138e97214c4f5365011dbc6 | [
0.0018286644481122494,
0.00041727127972990274,
0.00016333881649188697,
0.00016945663082879037,
0.0005150164943188429
] |
{
"id": 0,
"code_window": [
"\t\tcmd.Stdin = os.Stdin\n",
"\t\tcmd.Stdout = os.Stdout\n",
"\t\tcmd.Stderr = os.Stderr\n",
"\t\tif err := cmd.Run(); err != nil {\n",
"\t\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n",
"\t\t\t\tos.Exit(exiterr.ExitCode())\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tfmt.Fprintln(os.Stderr, exiterr.Error())\n"
],
"file_path": "cli/main.go",
"type": "add",
"edit_start_line_idx": 194
} | /*
Copyright (c) 2020 Docker Inc.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package main
import (
"fmt"
"os"
"os/exec"
"testing"
"time"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/suite"
. "github.com/docker/api/tests/framework"
)
type E2eSuite struct {
Suite
}
func (s *E2eSuite) TestContextHelp() {
It("ensures context command includes azure-login and aci-create", func() {
output := s.NewDockerCommand("context", "create", "--help").ExecOrDie()
Expect(output).To(ContainSubstring("docker context create CONTEXT BACKEND [OPTIONS] [flags]"))
Expect(output).To(ContainSubstring("--aci-location"))
Expect(output).To(ContainSubstring("--aci-subscription-id"))
Expect(output).To(ContainSubstring("--aci-resource-group"))
})
}
func (s *E2eSuite) TestContextDefault() {
It("should be initialized with default context", func() {
s.NewDockerCommand("context", "use", "default").ExecOrDie()
output := s.NewDockerCommand("context", "show").ExecOrDie()
Expect(output).To(ContainSubstring("default"))
output = s.NewCommand("docker", "context", "ls").ExecOrDie()
Expect(output).To(Not(ContainSubstring("test-example")))
Expect(output).To(ContainSubstring("default *"))
})
}
func (s *E2eSuite) TestLegacy() {
It("should list all legacy commands", func() {
output := s.NewDockerCommand("--help").ExecOrDie()
Expect(output).To(ContainSubstring("swarm"))
})
It("should execute legacy commands", func() {
output, _ := s.NewDockerCommand("swarm", "join").Exec()
Expect(output).To(ContainSubstring("\"docker swarm join\" requires exactly 1 argument."))
})
It("should run local container in less than 10 secs", func() {
s.NewDockerCommand("pull", "hello-world").ExecOrDie()
output := s.NewDockerCommand("run", "--rm", "hello-world").WithTimeout(time.NewTimer(10 * time.Second).C).ExecOrDie()
Expect(output).To(ContainSubstring("Hello from Docker!"))
})
}
func (s *E2eSuite) TestMockBackend() {
It("creates a new test context to hardcoded example backend", func() {
s.NewDockerCommand("context", "create", "test-example", "example").ExecOrDie()
// Expect(output).To(ContainSubstring("test-example context acitest created"))
})
It("uses the test context", func() {
currentContext := s.NewDockerCommand("context", "use", "test-example").ExecOrDie()
Expect(currentContext).To(ContainSubstring("test-example"))
output := s.NewDockerCommand("context", "ls").ExecOrDie()
Expect(output).To(ContainSubstring("test-example *"))
output = s.NewDockerCommand("context", "show").ExecOrDie()
Expect(output).To(ContainSubstring("test-example"))
})
It("can run ps command", func() {
output := s.NewDockerCommand("ps").ExecOrDie()
lines := Lines(output)
Expect(len(lines)).To(Equal(3))
Expect(lines[2]).To(ContainSubstring("1234 alpine"))
})
It("can run quiet ps command", func() {
output := s.NewDockerCommand("ps", "-q").ExecOrDie()
lines := Lines(output)
Expect(len(lines)).To(Equal(2))
Expect(lines[0]).To(Equal("id"))
Expect(lines[1]).To(Equal("1234"))
})
It("can run ps command with all ", func() {
output := s.NewDockerCommand("ps", "-q", "--all").ExecOrDie()
lines := Lines(output)
Expect(len(lines)).To(Equal(3))
Expect(lines[0]).To(Equal("id"))
Expect(lines[1]).To(Equal("1234"))
Expect(lines[2]).To(Equal("stopped"))
})
It("can run 'run' command", func() {
output := s.NewDockerCommand("run", "nginx", "-p", "80:80").ExecOrDie()
Expect(output).To(ContainSubstring("Running container \"nginx\" with name"))
})
}
func (s *E2eSuite) TestAPIServer() {
_, err := exec.LookPath("yarn")
if err != nil || os.Getenv("SKIP_NODE") != "" {
s.T().Skip("skipping, yarn not installed")
}
It("can run 'serve' command", func() {
cName := "test-example"
s.NewDockerCommand("context", "create", cName, "example").ExecOrDie()
sPath := fmt.Sprintf("unix:///%s/docker.sock", s.ConfigDir)
server, err := serveAPI(s.ConfigDir, sPath)
Expect(err).To(BeNil())
defer killProcess(server)
s.NewCommand("yarn", "install").WithinDirectory("../node-client").ExecOrDie()
output := s.NewCommand("yarn", "run", "start", cName, sPath).WithinDirectory("../node-client").ExecOrDie()
Expect(output).To(ContainSubstring("nginx"))
})
}
func TestE2e(t *testing.T) {
suite.Run(t, new(E2eSuite))
}
func killProcess(process *os.Process) {
err := process.Kill()
Expect(err).To(BeNil())
}
func serveAPI(configDir string, address string) (*os.Process, error) {
cmd := exec.Command("../../bin/docker", "--config", configDir, "serve", "--address", address)
err := cmd.Start()
if err != nil {
return nil, err
}
return cmd.Process, nil
}
| tests/e2e/e2e_test.go | 1 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.021084245294332504,
0.001411955920048058,
0.00016527995467185974,
0.0001711398654151708,
0.004918254446238279
] |
{
"id": 0,
"code_window": [
"\t\tcmd.Stdin = os.Stdin\n",
"\t\tcmd.Stdout = os.Stdout\n",
"\t\tcmd.Stderr = os.Stderr\n",
"\t\tif err := cmd.Run(); err != nil {\n",
"\t\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n",
"\t\t\t\tos.Exit(exiterr.ExitCode())\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tfmt.Fprintln(os.Stderr, exiterr.Error())\n"
],
"file_path": "cli/main.go",
"type": "add",
"edit_start_line_idx": 194
} | /*
Copyright (c) 2020 Docker Inc.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package context
import (
"context"
"fmt"
"os"
"sort"
"strings"
"text/tabwriter"
"github.com/spf13/cobra"
apicontext "github.com/docker/api/context"
"github.com/docker/api/context/store"
)
func listCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "List available contexts",
Aliases: []string{"ls"},
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runList(cmd.Context())
},
}
return cmd
}
func runList(ctx context.Context) error {
currentContext := apicontext.CurrentContext(ctx)
s := store.ContextStore(ctx)
contexts, err := s.List()
if err != nil {
return err
}
sort.Slice(contexts, func(i, j int) bool {
return strings.Compare(contexts[i].Name, contexts[j].Name) == -1
})
w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0)
fmt.Fprintln(w, "NAME\tTYPE\tDESCRIPTION\tDOCKER ENPOINT\tKUBERNETES ENDPOINT\tORCHESTRATOR")
format := "%s\t%s\t%s\t%s\t%s\t%s\n"
for _, c := range contexts {
contextName := c.Name
if c.Name == currentContext {
contextName += " *"
}
fmt.Fprintf(w,
format,
contextName,
c.Type,
c.Metadata.Description,
getEndpoint("docker", c.Endpoints),
getEndpoint("kubernetes", c.Endpoints),
c.Metadata.StackOrchestrator)
}
return w.Flush()
}
func getEndpoint(name string, meta map[string]interface{}) string {
endpoints, ok := meta[name]
if !ok {
return ""
}
data, ok := endpoints.(store.Endpoint)
if !ok {
return ""
}
result := data.Host
if data.DefaultNamespace != "" {
result += fmt.Sprintf(" (%s)", data.DefaultNamespace)
}
return result
}
| cli/cmd/context/ls.go | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.0834910199046135,
0.008549579419195652,
0.00017095213115680963,
0.00017336104065179825,
0.023758895695209503
] |
{
"id": 0,
"code_window": [
"\t\tcmd.Stdin = os.Stdin\n",
"\t\tcmd.Stdout = os.Stdout\n",
"\t\tcmd.Stderr = os.Stderr\n",
"\t\tif err := cmd.Run(); err != nil {\n",
"\t\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n",
"\t\t\t\tos.Exit(exiterr.ExitCode())\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tfmt.Fprintln(os.Stderr, exiterr.Error())\n"
],
"file_path": "cli/main.go",
"type": "add",
"edit_start_line_idx": 194
} | name: releaser
on:
push:
tags:
- 'v*'
jobs:
upload-release:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
id: go
- name: Checkout code into the Go module directory
uses: actions/checkout@v2
- uses: actions/cache@v1
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build
run: make -f builder.Makefile cross
- uses: ncipollo/release-action@v1
with:
artifacts: "bin/*"
prerelease: true
token: ${{ secrets.GITHUB_TOKEN }}
| .github/workflows/release.yaml | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.00017783870862331241,
0.00017505421419627964,
0.000172154774190858,
0.0001751116942614317,
0.0000020143320398346987
] |
{
"id": 0,
"code_window": [
"\t\tcmd.Stdin = os.Stdin\n",
"\t\tcmd.Stdout = os.Stdout\n",
"\t\tcmd.Stderr = os.Stderr\n",
"\t\tif err := cmd.Run(); err != nil {\n",
"\t\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n",
"\t\t\t\tos.Exit(exiterr.ExitCode())\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tfmt.Fprintln(os.Stderr, exiterr.Error())\n"
],
"file_path": "cli/main.go",
"type": "add",
"edit_start_line_idx": 194
} | <!DOCTYPE html>
<html lang="en" ng-app="lab">
<head>
<meta charset="utf-8">
<title>Docker Compose demo</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<div class="header logo"><img src="images/dockercon-logo-2020.png" /></div>
<div class="sentence" ng-controller="LabCtrl">
<div class="line line1 slide-in">
<span class="result adjective slide-in">
<span class="word slide-in" ng-bind="adjective1.word"></span>
<span class="hostname" ng-bind="adjective1.hostname"></span>
</span>
<span class="result noun slide-in">
<span class="word" ng-bind="noun1.word"></span>
<span class="hostname" ng-bind="noun1.hostname"></span>
</span>
</div>
<div class="line line2 slide-in">
<span class="result verb slide-in">
<span class="word" ng-bind="verb.word"></span>
<span class="hostname" ng-bind="verb.hostname"></span>
</span>
</div>
<div class="line line3 slide-in">
<span class="result adjective slide-in">
<span class="word" ng-bind="adjective2.word"></span>
<span class="hostname" ng-bind="adjective2.hostname"></span>
</span>
<span class="result noun slide-in">
<span class="word" ng-bind="noun2.word"></span>
<span class="hostname" ng-bind="noun2.hostname"></span>
</span>
</div>
</div>
<div class="footer"><img src="images/homes.png" /></div>
</body>
<script src="angular.min.js"></script>
<script src="app.js"></script>
</html>
| tests/composefiles/aci-demo/web/static/index.html | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.0001734870602376759,
0.0001721886219456792,
0.00017094839131459594,
0.00017227693751920015,
9.119077049035695e-7
] |
{
"id": 1,
"code_window": [
"\t\t\t\tos.Exit(exiterr.ExitCode())\n",
"\t\t\t}\n",
"\t\t\tos.Exit(1)\n",
"\t\t}\n",
"\t\tos.Exit(0)\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfmt.Fprintln(os.Stderr, err)\n"
],
"file_path": "cli/main.go",
"type": "add",
"edit_start_line_idx": 196
} | /*
Copyright (c) 2020 Docker Inc.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package main
import (
"context"
"fmt"
"math/rand"
"os"
"os/exec"
"os/signal"
"path/filepath"
"syscall"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
// Backend registrations
_ "github.com/docker/api/azure"
_ "github.com/docker/api/example"
_ "github.com/docker/api/moby"
"github.com/docker/api/cli/cmd"
"github.com/docker/api/cli/cmd/compose"
contextcmd "github.com/docker/api/cli/cmd/context"
"github.com/docker/api/cli/cmd/run"
cliconfig "github.com/docker/api/cli/config"
cliopts "github.com/docker/api/cli/options"
apicontext "github.com/docker/api/context"
"github.com/docker/api/context/store"
)
var (
runningOwnCommand bool
)
func init() {
// initial hack to get the path of the project's bin dir
// into the env of this cli for development
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
fatal(errors.Wrap(err, "unable to get absolute bin path"))
}
if err := os.Setenv("PATH", fmt.Sprintf("%s:%s", os.Getenv("PATH"), path)); err != nil {
panic(err)
}
// Seed random
rand.Seed(time.Now().UnixNano())
}
func isOwnCommand(cmd *cobra.Command) bool {
if cmd == nil {
return false
}
if cmd.Name() == "context" || cmd.Name() == "serve" {
return true
}
return isOwnCommand(cmd.Parent())
}
func main() {
var opts cliopts.GlobalOpts
root := &cobra.Command{
Use: "docker",
Long: "docker for the 2020s",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
runningOwnCommand = isOwnCommand(cmd)
if !runningOwnCommand {
execMoby(cmd.Context())
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
root.AddCommand(
contextcmd.Command(),
cmd.PsCommand(),
cmd.ServeCommand(),
run.Command(),
cmd.ExecCommand(),
cmd.LogsCommand(),
cmd.RmCommand(),
compose.Command(),
)
helpFunc := root.HelpFunc()
root.SetHelpFunc(func(cmd *cobra.Command, args []string) {
runningOwnCommand = isOwnCommand(cmd)
if !runningOwnCommand {
execMoby(cmd.Context())
}
helpFunc(cmd, args)
})
root.PersistentFlags().BoolVarP(&opts.Debug, "debug", "d", false, "enable debug output in the logs")
opts.AddConfigFlags(root.PersistentFlags())
opts.AddContextFlags(root.PersistentFlags())
// populate the opts with the global flags
_ = root.PersistentFlags().Parse(os.Args[1:])
if opts.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
ctx, cancel := newSigContext()
defer cancel()
if opts.Config == "" {
fatal(errors.New("config path cannot be empty"))
}
configDir := opts.Config
ctx = cliconfig.WithDir(ctx, configDir)
currentContext, err := determineCurrentContext(opts.Context, configDir)
if err != nil {
fatal(errors.New("unable to determine current context"))
}
s, err := store.New(store.WithRoot(configDir))
if err != nil {
fatal(errors.Wrap(err, "unable to create context store"))
}
ctx = apicontext.WithCurrentContext(ctx, currentContext)
ctx = store.WithContextStore(ctx, s)
if err = root.ExecuteContext(ctx); err != nil {
// Context should always be handled by new CLI
if runningOwnCommand {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
execMoby(ctx)
fmt.Println(err)
os.Exit(1)
}
}
func newSigContext() (context.Context, func()) {
ctx, cancel := context.WithCancel(context.Background())
s := make(chan os.Signal)
signal.Notify(s, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-s
cancel()
}()
return ctx, cancel
}
func execMoby(ctx context.Context) {
currentContext := apicontext.CurrentContext(ctx)
s := store.ContextStore(ctx)
_, err := s.Get(currentContext)
// Only run original docker command if the current context is not
// ours.
if err != nil {
cmd := exec.Command("docker-classic", os.Args[1:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
os.Exit(exiterr.ExitCode())
}
os.Exit(1)
}
os.Exit(0)
}
}
func determineCurrentContext(flag string, configDir string) (string, error) {
res := flag
if res == "" {
config, err := cliconfig.LoadFile(configDir)
if err != nil {
return "", err
}
res = config.CurrentContext
}
if res == "" {
res = "default"
}
return res, nil
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
| cli/main.go | 1 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.16193759441375732,
0.007208079565316439,
0.0001670325145823881,
0.00017111783381551504,
0.032988447695970535
] |
{
"id": 1,
"code_window": [
"\t\t\t\tos.Exit(exiterr.ExitCode())\n",
"\t\t\t}\n",
"\t\t\tos.Exit(1)\n",
"\t\t}\n",
"\t\tos.Exit(0)\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfmt.Fprintln(os.Stderr, err)\n"
],
"file_path": "cli/main.go",
"type": "add",
"edit_start_line_idx": 196
} | # Docker API
[](https://github.com/docker/api/actions)
## Dev Setup
The recommended way is to use the main `Makefile` that runs everything inside a container.
If you don't have or want to use Docker for building you need to make sure you have all the needed tools installed locally:
* go 1.14
* [protoc](https://github.com/protocolbuffers/protobuf)
* `go get github.com/golang/protobuf/[email protected]`
* `go get golang.org/x/tools/cmd/goimports`
* `go get github.com/golangci/golangci-lint/cmd/[email protected]`
And then you can call the same make targets but you need to pass it the `builder.Makefile` (`make -f builder.Makefile`).
The new CLI delegates to the classic docker for default contexts ; delegation is done to `docker-classic`.
* `make classic-link` will create a `docker-classic` link in `/usr/local/bin` if you don't already have it from Docker Desktop
## Building the project
```bash
$ make
```
If you make changes to the `.proto` files, make sure to `make protos` to generate go code.
## Tests
To run unit tests:
```
make test
```
If you need to update a golden file simply do `go test ./... -test.update-golden`.
| README.md | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.0001709588395897299,
0.00016708241309970617,
0.00016490010602865368,
0.00016623534611426294,
0.000002322692125744652
] |
{
"id": 1,
"code_window": [
"\t\t\t\tos.Exit(exiterr.ExitCode())\n",
"\t\t\t}\n",
"\t\t\tos.Exit(1)\n",
"\t\t}\n",
"\t\tos.Exit(0)\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfmt.Fprintln(os.Stderr, err)\n"
],
"file_path": "cli/main.go",
"type": "add",
"edit_start_line_idx": 196
} | # Copyright (c) 2020 Docker Inc.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH
# THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
PROTOS=$(shell find protos -name \*.proto)
EXTENSION :=
ifeq ($(GOOS),windows)
EXTENSION := .exe
endif
STATIC_FLAGS=CGO_ENABLED=0
LDFLAGS := "-s -w"
GO_BUILD = $(STATIC_FLAGS) go build -trimpath -ldflags=$(LDFLAGS)
BINARY=bin/docker
BINARY_WITH_EXTENSION=$(BINARY)$(EXTENSION)
all: cli
protos:
@protoc -I. --go_out=plugins=grpc,paths=source_relative:. ${PROTOS}
cli:
GOOS=${GOOS} GOARCH=${GOARCH} $(GO_BUILD) -o $(BINARY_WITH_EXTENSION) ./cli
cross:
@GOOS=linux GOARCH=amd64 $(GO_BUILD) -o $(BINARY)-linux-amd64 ./cli
@GOOS=darwin GOARCH=amd64 $(GO_BUILD) -o $(BINARY)-darwin-amd64 ./cli
@GOOS=windows GOARCH=amd64 $(GO_BUILD) -o $(BINARY)-windows-amd64.exe ./cli
test:
@go test -cover $(shell go list ./... | grep -vE 'e2e')
lint:
golangci-lint run --timeout 10m0s ./...
FORCE:
.PHONY: all protos cli cross test lint
| builder.Makefile | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.005821295082569122,
0.0011116089299321175,
0.00016554177273064852,
0.0001737307320581749,
0.0019471264677122235
] |
{
"id": 1,
"code_window": [
"\t\t\t\tos.Exit(exiterr.ExitCode())\n",
"\t\t\t}\n",
"\t\t\tos.Exit(1)\n",
"\t\t}\n",
"\t\tos.Exit(0)\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfmt.Fprintln(os.Stderr, err)\n"
],
"file_path": "cli/main.go",
"type": "add",
"edit_start_line_idx": 196
} | package compose
import (
"context"
)
// Service manages a compose project
type Service interface {
// Up executes the equivalent to a `compose up`
Up(ctx context.Context, opts ProjectOptions) error
// Down executes the equivalent to a `compose down`
Down(ctx context.Context, opts ProjectOptions) error
}
| compose/api.go | 0 | https://github.com/docker/compose/commit/66a83410dd768b25a93a8028c900b2dce10a6bac | [
0.00017102468700613827,
0.00016869718092493713,
0.00016636968939565122,
0.00016869718092493713,
0.000002327498805243522
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.