hunk
dict
file
stringlengths
0
11.8M
file_path
stringlengths
2
234
label
int64
0
1
commit_url
stringlengths
74
103
dependency_score
sequencelengths
5
5
{ "id": 0, "code_window": [ "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a order by t.a limit 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Sort + Limit(5) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection\",\n", "\t\t},\n", "\t\t// Test Limit + Left Join + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a limit 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Limit->DataScan(s)}(test.t.a,s.a)->Limit->Projection\",\n", "\t\t},\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t// Test TopN + Left Join + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a order by t.a limit 5, 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Sort + Limit(10) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(5)->Projection\",\n", "\t\t},\n" ], "file_path": "plan/logical_plan_test.go", "type": "add", "edit_start_line_idx": 1000 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package plan import ( "github.com/pingcap/tidb/context" "github.com/pingcap/tidb/expression" ) // pushDownTopNOptimizer pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. type pushDownTopNOptimizer struct { } func (s *pushDownTopNOptimizer) optimize(p LogicalPlan, ctx context.Context, allocator *idAllocator) (LogicalPlan, error) { return p.pushDownTopN(Sort{}.init(allocator, ctx)), nil } func (s *baseLogicalPlan) pushDownTopN(topN *Sort) LogicalPlan { p := s.basePlan.self.(LogicalPlan) for i, child := range p.Children() { p.Children()[i] = child.(LogicalPlan).pushDownTopN(Sort{}.init(topN.allocator, topN.ctx)) p.Children()[i].SetParents(p) } return topN.setChild(p) } func (s *Sort) isEmpty() bool { return s.ExecLimit == nil && len(s.ByItems) == 0 } func (s *Sort) isLimit() bool { return len(s.ByItems) == 0 && s.ExecLimit != nil } func (s *Sort) isTopN() bool { return len(s.ByItems) != 0 && s.ExecLimit != nil } func (s *Sort) setChild(p LogicalPlan) LogicalPlan { if s.isEmpty() { return p } else if s.isLimit() { limit := Limit{Count: s.ExecLimit.Count, Offset: s.ExecLimit.Offset}.init(s.allocator, s.ctx) limit.SetChildren(p) p.SetParents(limit) limit.SetSchema(p.Schema().Clone()) return limit } // Then s must be topN. s.SetChildren(p) p.SetParents(s) s.SetSchema(p.Schema().Clone()) return s } func (s *Sort) pushDownTopN(topN *Sort) LogicalPlan { if topN.isLimit() { s.ExecLimit = topN.ExecLimit // If a Limit is pushed down, the Sort should be converted to topN and be pushed again. return s.children[0].(LogicalPlan).pushDownTopN(s) } else if topN.isEmpty() { // If nothing is pushed down, just continue to push nothing to its child. return s.baseLogicalPlan.pushDownTopN(topN) } // If a TopN is pushed down, this sort is useless. return s.children[0].(LogicalPlan).pushDownTopN(topN) } func (p *Limit) pushDownTopN(topN *Sort) LogicalPlan { child := p.children[0].(LogicalPlan).pushDownTopN(Sort{ExecLimit: p}.init(p.allocator, p.ctx)) return topN.setChild(child) } func (p *Union) pushDownTopN(topN *Sort) LogicalPlan { for i, child := range p.children { newTopN := Sort{}.init(p.allocator, p.ctx) for _, by := range topN.ByItems { newExpr := expression.ColumnSubstitute(by.Expr, p.schema, expression.Column2Exprs(child.Schema().Columns)) newTopN.ByItems = append(newTopN.ByItems, &ByItems{newExpr, by.Desc}) } if !topN.isEmpty() { newTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count} } p.children[i] = child.(LogicalPlan).pushDownTopN(newTopN) p.children[i].SetParents(p) } return topN.setChild(p) } func (p *Projection) pushDownTopN(topN *Sort) LogicalPlan { for _, by := range topN.ByItems { by.Expr = expression.ColumnSubstitute(by.Expr, p.schema, p.Exprs) } child := p.children[0].(LogicalPlan).pushDownTopN(topN) p.SetChildren(child) child.SetParents(p) return p } func (p *LogicalJoin) pushDownTopNToChild(topN *Sort, idx int) LogicalPlan { canPush := true for _, by := range topN.ByItems { cols := expression.ExtractColumns(by.Expr) if len(p.children[1-idx].Schema().ColumnsIndices(cols)) != 0 { canPush = false break } } newTopN := Sort{}.init(topN.allocator, topN.ctx) if canPush { if !topN.isEmpty() { newTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count} } newTopN.ByItems = make([]*ByItems, len(topN.ByItems)) copy(newTopN.ByItems, topN.ByItems) } return p.children[idx].(LogicalPlan).pushDownTopN(newTopN) } func (p *LogicalJoin) pushDownTopN(topN *Sort) LogicalPlan { var leftChild, rightChild LogicalPlan emptySort := Sort{}.init(p.allocator, p.ctx) switch p.JoinType { case LeftOuterJoin, LeftOuterSemiJoin: leftChild = p.pushDownTopNToChild(topN, 0) rightChild = p.children[1].(LogicalPlan).pushDownTopN(emptySort) case RightOuterJoin: leftChild = p.children[0].(LogicalPlan).pushDownTopN(emptySort) rightChild = p.pushDownTopNToChild(topN, 1) default: return p.baseLogicalPlan.pushDownTopN(topN) } p.SetChildren(leftChild, rightChild) // The LogicalJoin may be also a LogicalApply. So we must use self to set parents. self := p.self.(LogicalPlan) leftChild.SetParents(self) rightChild.SetParents(self) return topN.setChild(self) }
plan/topn_push_down.go
1
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.0005938230897299945, 0.0002185661724070087, 0.00016429224342573434, 0.00017184432363137603, 0.0001221588609041646 ]
{ "id": 0, "code_window": [ "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a order by t.a limit 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Sort + Limit(5) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection\",\n", "\t\t},\n", "\t\t// Test Limit + Left Join + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a limit 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Limit->DataScan(s)}(test.t.a,s.a)->Limit->Projection\",\n", "\t\t},\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t// Test TopN + Left Join + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a order by t.a limit 5, 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Sort + Limit(10) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(5)->Projection\",\n", "\t\t},\n" ], "file_path": "plan/logical_plan_test.go", "type": "add", "edit_start_line_idx": 1000 }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package expression import ( "unicode" "github.com/juju/errors" "github.com/pingcap/tidb/ast" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/types" ) // ExtractColumns extracts all columns from an expression. func ExtractColumns(expr Expression) (cols []*Column) { switch v := expr.(type) { case *Column: return []*Column{v} case *ScalarFunction: for _, arg := range v.GetArgs() { cols = append(cols, ExtractColumns(arg)...) } } return } // ColumnSubstitute substitutes the columns in filter to expressions in select fields. // e.g. select * from (select b as a from t) k where a < 10 => select * from (select b as a from t where b < 10) k. func ColumnSubstitute(expr Expression, schema *Schema, newExprs []Expression) Expression { switch v := expr.(type) { case *Column: id := schema.ColumnIndex(v) if id == -1 { return v } return newExprs[id].Clone() case *ScalarFunction: if v.FuncName.L == ast.Cast { newFunc := v.Clone().(*ScalarFunction) newFunc.GetArgs()[0] = ColumnSubstitute(newFunc.GetArgs()[0], schema, newExprs) return newFunc } newArgs := make([]Expression, 0, len(v.GetArgs())) for _, arg := range v.GetArgs() { newArgs = append(newArgs, ColumnSubstitute(arg, schema, newExprs)) } fun, _ := NewFunction(v.GetCtx(), v.FuncName.L, v.RetType, newArgs...) return fun } return expr } func datumsToConstants(datums []types.Datum) []Expression { constants := make([]Expression, 0, len(datums)) for _, d := range datums { constants = append(constants, &Constant{Value: d}) } return constants } // calculateSum adds v to sum. func calculateSum(sc *variable.StatementContext, sum, v types.Datum) (data types.Datum, err error) { // for avg and sum calculation // avg and sum use decimal for integer and decimal type, use float for others // see https://dev.mysql.com/doc/refman/5.7/en/group-by-functions.html switch v.Kind() { case types.KindNull: case types.KindInt64, types.KindUint64: var d *types.MyDecimal d, err = v.ToDecimal(sc) if err == nil { data = types.NewDecimalDatum(d) } case types.KindMysqlDecimal: data = v default: var f float64 f, err = v.ToFloat64(sc) if err == nil { data = types.NewFloat64Datum(f) } } if err != nil { return data, errors.Trace(err) } if data.IsNull() { return sum, nil } switch sum.Kind() { case types.KindNull: return data, nil case types.KindFloat64, types.KindMysqlDecimal: return types.ComputePlus(sum, data) default: return data, errors.Errorf("invalid value %v for aggregate", sum.Kind()) } } // getValidPrefix gets a prefix of string which can parsed to a number with base. the minimum base is 2 and the maximum is 36. func getValidPrefix(s string, base int64) string { var ( validLen int upper rune ) switch { case base >= 2 && base <= 9: upper = rune('0' + base) case base <= 36: upper = rune('A' + base - 10) default: return "" } Loop: for i := 0; i < len(s); i++ { c := rune(s[i]) switch { case unicode.IsDigit(c) || unicode.IsLower(c) || unicode.IsUpper(c): c = unicode.ToUpper(c) if c < upper { validLen = i + 1 } else { break Loop } case c == '+' || c == '-': if i != 0 { break Loop } default: break Loop } } if validLen > 1 && s[0] == '+' { return s[1:validLen] } return s[:validLen] } // createDistinctChecker creates a new distinct checker. func createDistinctChecker() *distinctChecker { return &distinctChecker{ existingKeys: make(map[string]bool), } } // Checker stores existing keys and checks if given data is distinct. type distinctChecker struct { existingKeys map[string]bool } // Check checks if values is distinct. func (d *distinctChecker) Check(values []interface{}) (bool, error) { bs, err := codec.EncodeValue([]byte{}, types.MakeDatums(values...)...) if err != nil { return false, errors.Trace(err) } key := string(bs) _, ok := d.existingKeys[key] if ok { return false, nil } d.existingKeys[key] = true return true, nil } // SubstituteCorCol2Constant will substitute correlated column to constant value which it contains. // If the args of one scalar function are all constant, we will substitute it to constant. func SubstituteCorCol2Constant(expr Expression) (Expression, error) { switch x := expr.(type) { case *ScalarFunction: allConstant := true newArgs := make([]Expression, 0, len(x.GetArgs())) for _, arg := range x.GetArgs() { newArg, err := SubstituteCorCol2Constant(arg) if err != nil { return nil, errors.Trace(err) } _, ok := newArg.(*Constant) newArgs = append(newArgs, newArg) allConstant = allConstant && ok } if allConstant { val, err := x.Eval(nil) if err != nil { return nil, errors.Trace(err) } return &Constant{Value: val}, nil } var newSf Expression if x.FuncName.L == ast.Cast { newSf = NewCastFunc(x.RetType, newArgs[0], x.GetCtx()) } else { newSf, _ = NewFunction(x.GetCtx(), x.FuncName.L, x.GetType(), newArgs...) } return newSf, nil case *CorrelatedColumn: return &Constant{Value: *x.Data, RetType: x.GetType()}, nil default: return x.Clone(), nil } } // ConvertCol2CorCol will convert the column in the condition which can be found in outerSchema to a correlated column whose // Column is this column. And please make sure the outerSchema.Columns[i].Equal(corCols[i].Column)) holds when you call this. func ConvertCol2CorCol(cond Expression, corCols []*CorrelatedColumn, outerSchema *Schema) Expression { switch x := cond.(type) { case *ScalarFunction: newArgs := make([]Expression, 0, len(x.GetArgs())) for _, arg := range x.GetArgs() { newArg := ConvertCol2CorCol(arg, corCols, outerSchema) newArgs = append(newArgs, newArg) } var newSf Expression if x.FuncName.L == ast.Cast { newSf = NewCastFunc(x.RetType, newArgs[0], x.GetCtx()) } else { newSf, _ = NewFunction(x.GetCtx(), x.FuncName.L, x.GetType(), newArgs...) } return newSf case *Column: if pos := outerSchema.ColumnIndex(x); pos >= 0 { return corCols[pos] } } return cond }
expression/util.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.00017848049174062908, 0.00017088418826460838, 0.00015927385538816452, 0.00017180843860842288, 0.000004790405910171103 ]
{ "id": 0, "code_window": [ "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a order by t.a limit 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Sort + Limit(5) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection\",\n", "\t\t},\n", "\t\t// Test Limit + Left Join + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a limit 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Limit->DataScan(s)}(test.t.a,s.a)->Limit->Projection\",\n", "\t\t},\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t// Test TopN + Left Join + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a order by t.a limit 5, 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Sort + Limit(10) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(5)->Projection\",\n", "\t\t},\n" ], "file_path": "plan/logical_plan_test.go", "type": "add", "edit_start_line_idx": 1000 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package statistics import ( "fmt" "sort" "strings" "github.com/juju/errors" "github.com/pingcap/tidb/context" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/types" ) // Column represents statistics for a column. type Column struct { ID int64 // Column ID. NDV int64 // Number of distinct values. Buckets []bucket } // bucket is an element of histogram. // // A bucket count is the number of items stored in all previous buckets and the current bucket. // bucket numbers are always in increasing order. // // A bucket value is the greatest item value stored in the bucket. // // Repeat is the number of repeats of the bucket value, it can be used to find popular values. // type bucket struct { Count int64 Value types.Datum Repeats int64 } func (c *Column) saveToStorage(ctx context.Context, tableID int64, isIndex int) error { insertSQL := fmt.Sprintf("insert into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count) values (%d, %d, %d, %d)", tableID, isIndex, c.ID, c.NDV) _, err := ctx.(sqlexec.SQLExecutor).Execute(insertSQL) if err != nil { return errors.Trace(err) } for i, bucket := range c.Buckets { var count int64 if i == 0 { count = bucket.Count } else { count = bucket.Count - c.Buckets[i-1].Count } val, err := bucket.Value.ConvertTo(ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) if err != nil { return errors.Trace(err) } insertSQL = fmt.Sprintf("insert into mysql.stats_buckets values(%d, %d, %d, %d, %d, %d, X'%X')", tableID, isIndex, c.ID, i, count, bucket.Repeats, val.GetBytes()) _, err = ctx.(sqlexec.SQLExecutor).Execute(insertSQL) if err != nil { return errors.Trace(err) } } return nil } func colStatsFromStorage(ctx context.Context, tableID int64, colID int64, tp *types.FieldType, distinct int64, isIndex int) (*Column, error) { selSQL := fmt.Sprintf("select bucket_id, count, repeats, value from mysql.stats_buckets where table_id = %d and is_index = %d and hist_id = %d", tableID, isIndex, colID) rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, selSQL) if err != nil { return nil, errors.Trace(err) } bucketSize := len(rows) colStats := &Column{ ID: colID, NDV: distinct, Buckets: make([]bucket, bucketSize), } for i := 0; i < bucketSize; i++ { bucketID := rows[i].Data[0].GetInt64() count := rows[i].Data[1].GetInt64() repeats := rows[i].Data[2].GetInt64() var value types.Datum if isIndex == 1 { value = rows[i].Data[3] } else { value, err = rows[i].Data[3].ConvertTo(ctx.GetSessionVars().StmtCtx, tp) if err != nil { return nil, errors.Trace(err) } } colStats.Buckets[bucketID] = bucket{ Count: count, Value: value, Repeats: repeats, } } for i := 1; i < bucketSize; i++ { colStats.Buckets[i].Count += colStats.Buckets[i-1].Count } return colStats, nil } func (c *Column) String() string { strs := make([]string, 0, len(c.Buckets)+1) strs = append(strs, fmt.Sprintf("column:%d ndv:%d", c.ID, c.NDV)) for _, bucket := range c.Buckets { strVal, _ := bucket.Value.ToString() strs = append(strs, fmt.Sprintf("num: %d\tvalue: %s\trepeats: %d", bucket.Count, strVal, bucket.Repeats)) } return strings.Join(strs, "\n") } // EqualRowCount estimates the row count where the column equals to value. func (c *Column) EqualRowCount(sc *variable.StatementContext, value types.Datum) (int64, error) { if len(c.Buckets) == 0 { return pseudoRowCount / pseudoEqualRate, nil } index, match, err := c.lowerBound(sc, value) if err != nil { return 0, errors.Trace(err) } if index == len(c.Buckets) { return 0, nil } if match { return c.Buckets[index].Repeats, nil } return c.totalRowCount() / c.NDV, nil } // GreaterRowCount estimates the row count where the column greater than value. func (c *Column) GreaterRowCount(sc *variable.StatementContext, value types.Datum) (int64, error) { if len(c.Buckets) == 0 { return pseudoRowCount / pseudoLessRate, nil } lessCount, err := c.LessRowCount(sc, value) if err != nil { return 0, errors.Trace(err) } eqCount, err := c.EqualRowCount(sc, value) if err != nil { return 0, errors.Trace(err) } gtCount := c.totalRowCount() - lessCount - eqCount if gtCount < 0 { gtCount = 0 } return gtCount, nil } // LessRowCount estimates the row count where the column less than value. func (c *Column) LessRowCount(sc *variable.StatementContext, value types.Datum) (int64, error) { if len(c.Buckets) == 0 { return pseudoRowCount / pseudoLessRate, nil } index, match, err := c.lowerBound(sc, value) if err != nil { return 0, errors.Trace(err) } if index == len(c.Buckets) { return c.totalRowCount(), nil } curCount := c.Buckets[index].Count prevCount := int64(0) if index > 0 { prevCount = c.Buckets[index-1].Count } lessThanBucketValueCount := curCount - c.Buckets[index].Repeats if match { return lessThanBucketValueCount, nil } return (prevCount + lessThanBucketValueCount) / 2, nil } // BetweenRowCount estimates the row count where column greater or equal to a and less than b. func (c *Column) BetweenRowCount(sc *variable.StatementContext, a, b types.Datum) (int64, error) { if len(c.Buckets) == 0 { return pseudoRowCount / pseudoBetweenRate, nil } lessCountA, err := c.LessRowCount(sc, a) if err != nil { return 0, errors.Trace(err) } lessCountB, err := c.LessRowCount(sc, b) if err != nil { return 0, errors.Trace(err) } if lessCountA >= lessCountB { return c.inBucketBetweenCount(), nil } return lessCountB - lessCountA, nil } func (c *Column) totalRowCount() int64 { return c.Buckets[len(c.Buckets)-1].Count } func (c *Column) bucketRowCount() int64 { return c.totalRowCount() / int64(len(c.Buckets)) } func (c *Column) inBucketBetweenCount() int64 { // TODO: Make this estimation more accurate using uniform spread assumption. return c.bucketRowCount()/3 + 1 } func (c *Column) lowerBound(sc *variable.StatementContext, target types.Datum) (index int, match bool, err error) { index = sort.Search(len(c.Buckets), func(i int) bool { cmp, err1 := c.Buckets[i].Value.CompareDatum(sc, target) if err1 != nil { err = errors.Trace(err1) return false } if cmp == 0 { match = true } return cmp >= 0 }) return } // mergeBuckets is used to merge every two neighbor buckets. func (c *Column) mergeBuckets(bucketIdx int64) { curBuck := 0 for i := int64(0); i+1 <= bucketIdx; i += 2 { c.Buckets[curBuck] = bucket{ Count: c.Buckets[i+1].Count, Value: c.Buckets[i+1].Value, Repeats: c.Buckets[i+1].Repeats, } curBuck++ } if bucketIdx%2 == 0 { c.Buckets[curBuck] = c.Buckets[bucketIdx] curBuck++ } c.Buckets = c.Buckets[:curBuck] return }
statistics/column.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.0001785591448424384, 0.00017086410662159324, 0.000164319048053585, 0.00017145600577350706, 0.0000033623575745878043 ]
{ "id": 0, "code_window": [ "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a order by t.a limit 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Sort + Limit(5) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection\",\n", "\t\t},\n", "\t\t// Test Limit + Left Join + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a limit 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Limit->DataScan(s)}(test.t.a,s.a)->Limit->Projection\",\n", "\t\t},\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t// Test TopN + Left Join + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t left outer join t s on t.a = s.a order by t.a limit 5, 5\",\n", "\t\t\tbest: \"Join{DataScan(t)->Sort + Limit(10) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(5)->Projection\",\n", "\t\t},\n" ], "file_path": "plan/logical_plan_test.go", "type": "add", "edit_start_line_idx": 1000 }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package expression import ( "sort" "strings" . "github.com/pingcap/check" "github.com/pingcap/tidb/ast" "github.com/pingcap/tidb/model" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tidb/util/types" ) var _ = Suite(&testExpressionSuite{}) type testExpressionSuite struct{} func newColumn(name string) *Column { return &Column{ FromID: name, ColName: model.NewCIStr(name), TblName: model.NewCIStr("t"), DBName: model.NewCIStr("test"), RetType: types.NewFieldType(mysql.TypeLonglong), } } func newLonglong(value int64) *Constant { return &Constant{ Value: types.NewIntDatum(value), RetType: types.NewFieldType(mysql.TypeLonglong), } } func newFunction(funcName string, args ...Expression) Expression { typeLong := types.NewFieldType(mysql.TypeLonglong) newFunc, _ := NewFunction(mock.NewContext(), funcName, typeLong, args...) return newFunc } func (*testExpressionSuite) TestConstantPropagation(c *C) { defer testleak.AfterTest(c)() nullValue := &Constant{Value: types.Datum{}} cases := []struct { conditions []Expression result string }{ { conditions: []Expression{ newFunction(ast.EQ, newColumn("a"), newColumn("b")), newFunction(ast.EQ, newColumn("b"), newColumn("c")), newFunction(ast.EQ, newColumn("c"), newColumn("d")), newFunction(ast.EQ, newColumn("d"), newLonglong(1)), newFunction(ast.OrOr, newLonglong(1), newColumn("a")), }, result: "eq(test.t.a, 1), eq(test.t.b, 1), eq(test.t.c, 1), eq(test.t.d, 1), or(1, 1)", }, { conditions: []Expression{ newFunction(ast.EQ, newColumn("a"), newColumn("b")), newFunction(ast.EQ, newColumn("b"), newLonglong(1)), newFunction(ast.EQ, newColumn("a"), nullValue), newFunction(ast.NE, newColumn("c"), newLonglong(2)), }, result: "0", }, { conditions: []Expression{ newFunction(ast.EQ, newColumn("a"), newColumn("b")), newFunction(ast.EQ, newColumn("b"), newLonglong(1)), newFunction(ast.EQ, newColumn("c"), newColumn("d")), newFunction(ast.GE, newColumn("c"), newLonglong(2)), newFunction(ast.NE, newColumn("c"), newLonglong(4)), newFunction(ast.NE, newColumn("d"), newLonglong(5)), }, result: "eq(test.t.a, 1), eq(test.t.b, 1), eq(test.t.c, test.t.d), ge(test.t.c, 2), ge(test.t.d, 2), ne(test.t.c, 4), ne(test.t.c, 5), ne(test.t.d, 4), ne(test.t.d, 5)", }, { conditions: []Expression{ newFunction(ast.EQ, newColumn("a"), newColumn("b")), newFunction(ast.EQ, newColumn("a"), newColumn("c")), newFunction(ast.GE, newColumn("b"), newLonglong(0)), }, result: "eq(test.t.a, test.t.b), eq(test.t.a, test.t.c), ge(test.t.a, 0), ge(test.t.b, 0), ge(test.t.c, 0)", }, { conditions: []Expression{ newFunction(ast.EQ, newColumn("a"), newColumn("b")), newFunction(ast.GT, newColumn("a"), newLonglong(2)), newFunction(ast.GT, newColumn("b"), newLonglong(3)), newFunction(ast.LT, newColumn("a"), newLonglong(1)), newFunction(ast.GT, newLonglong(2), newColumn("b")), }, result: "eq(test.t.a, test.t.b), gt(2, test.t.a), gt(2, test.t.b), gt(test.t.a, 2), gt(test.t.a, 3), gt(test.t.b, 2), gt(test.t.b, 3), lt(test.t.a, 1), lt(test.t.b, 1)", }, { conditions: []Expression{ newFunction(ast.EQ, newLonglong(1), newColumn("a")), newLonglong(0), }, result: "0", }, } for _, ca := range cases { ctx := mock.NewContext() newConds := PropagateConstant(ctx, ca.conditions) var result []string for _, v := range newConds { result = append(result, v.String()) } sort.Strings(result) c.Assert(strings.Join(result, ", "), Equals, ca.result, Commentf("different for expr %s", ca.conditions)) } } func (*testExpressionSuite) TestConstantFolding(c *C) { defer testleak.AfterTest(c)() cases := []struct { condition Expression result string }{ { condition: newFunction(ast.LT, newColumn("a"), newFunction(ast.Plus, newLonglong(1), newLonglong(2))), result: "lt(test.t.a, 3)", }, { condition: newFunction(ast.LT, newColumn("a"), newFunction(ast.Greatest, newLonglong(1), newLonglong(2))), result: "lt(test.t.a, 2)", }, { condition: newFunction(ast.EQ, newColumn("a"), newFunction(ast.Rand)), result: "eq(test.t.a, rand())", }, { condition: newFunction(ast.In, newColumn("a"), newLonglong(1), newLonglong(2), newLonglong(3)), result: "in(test.t.a, 1, 2, 3)", }, { condition: newFunction(ast.IsNull, newLonglong(1)), result: "0", }, { condition: newFunction(ast.EQ, newColumn("a"), newFunction(ast.UnaryNot, newFunction(ast.Plus, newLonglong(1), newLonglong(1)))), result: "eq(test.t.a, 0)", }, { condition: newFunction(ast.LT, newColumn("a"), newFunction(ast.Plus, newColumn("b"), newFunction(ast.Plus, newLonglong(2), newLonglong(1)))), result: "lt(test.t.a, plus(test.t.b, 3))", }, } for _, ca := range cases { newConds := FoldConstant(ca.condition) c.Assert(newConds.String(), Equals, ca.result, Commentf("different for expr %s", ca.condition)) } }
expression/constant_test.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.00017848049174062908, 0.0001696057297522202, 0.0001638142130104825, 0.00016929057892411947, 0.000004218584308546269 ]
{ "id": 1, "code_window": [ "\t\t\tsql: \"select * from t union all (select * from t s) order by a,b limit 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Sort + Limit(5) + Offset(0)->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection}->Sort + Limit(5) + Offset(0)\",\n", "\t\t},\n", "\t\t// Test Limit + UA + Proj + Sort.\n", "\t\t{\n", "\t\t\tsql: \"select * from t union all (select * from t s order by a) limit 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Limit->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection->Projection}->Limit\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t// Test TopN + UA + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t union all (select * from t s) order by a,b limit 5, 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Sort + Limit(10) + Offset(0)->Projection->DataScan(s)->Sort + Limit(10) + Offset(0)->Projection}->Sort + Limit(5) + Offset(5)\",\n", "\t\t},\n" ], "file_path": "plan/logical_plan_test.go", "type": "add", "edit_start_line_idx": 1040 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package plan import ( "github.com/pingcap/tidb/context" "github.com/pingcap/tidb/expression" ) // pushDownTopNOptimizer pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. type pushDownTopNOptimizer struct { } func (s *pushDownTopNOptimizer) optimize(p LogicalPlan, ctx context.Context, allocator *idAllocator) (LogicalPlan, error) { return p.pushDownTopN(Sort{}.init(allocator, ctx)), nil } func (s *baseLogicalPlan) pushDownTopN(topN *Sort) LogicalPlan { p := s.basePlan.self.(LogicalPlan) for i, child := range p.Children() { p.Children()[i] = child.(LogicalPlan).pushDownTopN(Sort{}.init(topN.allocator, topN.ctx)) p.Children()[i].SetParents(p) } return topN.setChild(p) } func (s *Sort) isEmpty() bool { return s.ExecLimit == nil && len(s.ByItems) == 0 } func (s *Sort) isLimit() bool { return len(s.ByItems) == 0 && s.ExecLimit != nil } func (s *Sort) isTopN() bool { return len(s.ByItems) != 0 && s.ExecLimit != nil } func (s *Sort) setChild(p LogicalPlan) LogicalPlan { if s.isEmpty() { return p } else if s.isLimit() { limit := Limit{Count: s.ExecLimit.Count, Offset: s.ExecLimit.Offset}.init(s.allocator, s.ctx) limit.SetChildren(p) p.SetParents(limit) limit.SetSchema(p.Schema().Clone()) return limit } // Then s must be topN. s.SetChildren(p) p.SetParents(s) s.SetSchema(p.Schema().Clone()) return s } func (s *Sort) pushDownTopN(topN *Sort) LogicalPlan { if topN.isLimit() { s.ExecLimit = topN.ExecLimit // If a Limit is pushed down, the Sort should be converted to topN and be pushed again. return s.children[0].(LogicalPlan).pushDownTopN(s) } else if topN.isEmpty() { // If nothing is pushed down, just continue to push nothing to its child. return s.baseLogicalPlan.pushDownTopN(topN) } // If a TopN is pushed down, this sort is useless. return s.children[0].(LogicalPlan).pushDownTopN(topN) } func (p *Limit) pushDownTopN(topN *Sort) LogicalPlan { child := p.children[0].(LogicalPlan).pushDownTopN(Sort{ExecLimit: p}.init(p.allocator, p.ctx)) return topN.setChild(child) } func (p *Union) pushDownTopN(topN *Sort) LogicalPlan { for i, child := range p.children { newTopN := Sort{}.init(p.allocator, p.ctx) for _, by := range topN.ByItems { newExpr := expression.ColumnSubstitute(by.Expr, p.schema, expression.Column2Exprs(child.Schema().Columns)) newTopN.ByItems = append(newTopN.ByItems, &ByItems{newExpr, by.Desc}) } if !topN.isEmpty() { newTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count} } p.children[i] = child.(LogicalPlan).pushDownTopN(newTopN) p.children[i].SetParents(p) } return topN.setChild(p) } func (p *Projection) pushDownTopN(topN *Sort) LogicalPlan { for _, by := range topN.ByItems { by.Expr = expression.ColumnSubstitute(by.Expr, p.schema, p.Exprs) } child := p.children[0].(LogicalPlan).pushDownTopN(topN) p.SetChildren(child) child.SetParents(p) return p } func (p *LogicalJoin) pushDownTopNToChild(topN *Sort, idx int) LogicalPlan { canPush := true for _, by := range topN.ByItems { cols := expression.ExtractColumns(by.Expr) if len(p.children[1-idx].Schema().ColumnsIndices(cols)) != 0 { canPush = false break } } newTopN := Sort{}.init(topN.allocator, topN.ctx) if canPush { if !topN.isEmpty() { newTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count} } newTopN.ByItems = make([]*ByItems, len(topN.ByItems)) copy(newTopN.ByItems, topN.ByItems) } return p.children[idx].(LogicalPlan).pushDownTopN(newTopN) } func (p *LogicalJoin) pushDownTopN(topN *Sort) LogicalPlan { var leftChild, rightChild LogicalPlan emptySort := Sort{}.init(p.allocator, p.ctx) switch p.JoinType { case LeftOuterJoin, LeftOuterSemiJoin: leftChild = p.pushDownTopNToChild(topN, 0) rightChild = p.children[1].(LogicalPlan).pushDownTopN(emptySort) case RightOuterJoin: leftChild = p.children[0].(LogicalPlan).pushDownTopN(emptySort) rightChild = p.pushDownTopNToChild(topN, 1) default: return p.baseLogicalPlan.pushDownTopN(topN) } p.SetChildren(leftChild, rightChild) // The LogicalJoin may be also a LogicalApply. So we must use self to set parents. self := p.self.(LogicalPlan) leftChild.SetParents(self) rightChild.SetParents(self) return topN.setChild(self) }
plan/topn_push_down.go
1
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.0005130292265675962, 0.0002162406308343634, 0.00016403163317590952, 0.00017405964899808168, 0.00010101230145664886 ]
{ "id": 1, "code_window": [ "\t\t\tsql: \"select * from t union all (select * from t s) order by a,b limit 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Sort + Limit(5) + Offset(0)->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection}->Sort + Limit(5) + Offset(0)\",\n", "\t\t},\n", "\t\t// Test Limit + UA + Proj + Sort.\n", "\t\t{\n", "\t\t\tsql: \"select * from t union all (select * from t s order by a) limit 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Limit->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection->Projection}->Limit\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t// Test TopN + UA + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t union all (select * from t s) order by a,b limit 5, 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Sort + Limit(10) + Offset(0)->Projection->DataScan(s)->Sort + Limit(10) + Offset(0)->Projection}->Sort + Limit(5) + Offset(5)\",\n", "\t\t},\n" ], "file_path": "plan/logical_plan_test.go", "type": "add", "edit_start_line_idx": 1040 }
package localstore import "github.com/pingcap/tidb/kv" type localPD struct { regions []*regionInfo } type regionInfo struct { startKey kv.Key endKey kv.Key rs *localRegion } func (pd *localPD) GetRegionInfo() []*regionInfo { return pd.regions } func (pd *localPD) SetRegionInfo(regions []*regionInfo) { pd.regions = regions } // ChangeRegionInfo used for test handling region info change. func ChangeRegionInfo(store kv.Storage, regionID int, startKey, endKey []byte) { s := store.(*dbStore) for i, region := range s.pd.regions { if region.rs.id == regionID { newRegionInfo := &regionInfo{ startKey: startKey, endKey: endKey, rs: region.rs, } region.rs.startKey = startKey region.rs.endKey = endKey s.pd.regions[i] = newRegionInfo break } } }
store/localstore/local_pd.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.0007662894786335528, 0.0003183616790920496, 0.00016783922910690308, 0.0001696589752100408, 0.00025861363974399865 ]
{ "id": 1, "code_window": [ "\t\t\tsql: \"select * from t union all (select * from t s) order by a,b limit 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Sort + Limit(5) + Offset(0)->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection}->Sort + Limit(5) + Offset(0)\",\n", "\t\t},\n", "\t\t// Test Limit + UA + Proj + Sort.\n", "\t\t{\n", "\t\t\tsql: \"select * from t union all (select * from t s order by a) limit 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Limit->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection->Projection}->Limit\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t// Test TopN + UA + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t union all (select * from t s) order by a,b limit 5, 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Sort + Limit(10) + Offset(0)->Projection->DataScan(s)->Sort + Limit(10) + Offset(0)->Projection}->Sort + Limit(5) + Offset(5)\",\n", "\t\t},\n" ], "file_path": "plan/logical_plan_test.go", "type": "add", "edit_start_line_idx": 1040 }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package oracles import ( "sync" "time" "github.com/pingcap/tidb/store/tikv/oracle" "golang.org/x/net/context" ) var _ oracle.Oracle = &localOracle{} type localOracle struct { sync.Mutex lastTimeStampTS uint64 n uint64 } // NewLocalOracle creates an Oracle that uses local time as data source. func NewLocalOracle() oracle.Oracle { return &localOracle{} } func (l *localOracle) IsExpired(lockTS uint64, TTL uint64) bool { return oracle.GetPhysical(time.Now()) >= oracle.ExtractPhysical(lockTS)+int64(TTL) } func (l *localOracle) GetTimestamp(context.Context) (uint64, error) { l.Lock() defer l.Unlock() physical := oracle.GetPhysical(time.Now()) ts := oracle.ComposeTS(physical, 0) if l.lastTimeStampTS == ts { l.n++ return uint64(ts + l.n), nil } l.lastTimeStampTS = ts l.n = 0 return uint64(ts), nil } func (l *localOracle) Close() { }
store/tikv/oracle/oracles/local.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.00017673856928013265, 0.0001687814947217703, 0.00016237942327279598, 0.00016824642079882324, 0.000005010108452552231 ]
{ "id": 1, "code_window": [ "\t\t\tsql: \"select * from t union all (select * from t s) order by a,b limit 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Sort + Limit(5) + Offset(0)->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection}->Sort + Limit(5) + Offset(0)\",\n", "\t\t},\n", "\t\t// Test Limit + UA + Proj + Sort.\n", "\t\t{\n", "\t\t\tsql: \"select * from t union all (select * from t s order by a) limit 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Limit->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection->Projection}->Limit\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t// Test TopN + UA + Proj.\n", "\t\t{\n", "\t\t\tsql: \"select * from t union all (select * from t s) order by a,b limit 5, 5\",\n", "\t\t\tbest: \"UnionAll{DataScan(t)->Sort + Limit(10) + Offset(0)->Projection->DataScan(s)->Sort + Limit(10) + Offset(0)->Projection}->Sort + Limit(5) + Offset(5)\",\n", "\t\t},\n" ], "file_path": "plan/logical_plan_test.go", "type": "add", "edit_start_line_idx": 1040 }
// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus // Gauge is a Metric that represents a single numerical value that can // arbitrarily go up and down. // // A Gauge is typically used for measured values like temperatures or current // memory usage, but also "counts" that can go up and down, like the number of // running goroutines. // // To create Gauge instances, use NewGauge. type Gauge interface { Metric Collector // Set sets the Gauge to an arbitrary value. Set(float64) // Inc increments the Gauge by 1. Inc() // Dec decrements the Gauge by 1. Dec() // Add adds the given value to the Gauge. (The value can be // negative, resulting in a decrease of the Gauge.) Add(float64) // Sub subtracts the given value from the Gauge. (The value can be // negative, resulting in an increase of the Gauge.) Sub(float64) } // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts // NewGauge creates a new Gauge based on the provided GaugeOpts. func NewGauge(opts GaugeOpts) Gauge { return newValue(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), GaugeValue, 0) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same // Desc, but have different values for their variable labels. This is used if // you want to count the same thing partitioned by various dimensions // (e.g. number of operations queued, partitioned by user and operation // type). Create instances with NewGaugeVec. type GaugeVec struct { *MetricVec } // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // partitioned by the given label names. At least one label name must be // provided. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, labelNames, opts.ConstLabels, ) return &GaugeVec{ MetricVec: newMetricVec(desc, func(lvs ...string) Metric { return newValue(desc, GaugeValue, 0, lvs...) }), } } // GetMetricWithLabelValues replaces the method of the same name in // MetricVec. The difference is that this method returns a Gauge and not a // Metric so that no type conversion is required. func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Gauge), err } return nil, err } // GetMetricWith replaces the method of the same name in MetricVec. The // difference is that this method returns a Gauge and not a Metric so that no // type conversion is required. func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Gauge), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. By not returning an // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { return m.MetricVec.WithLabelValues(lvs...).(Gauge) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) func (m *GaugeVec) With(labels Labels) Gauge { return m.MetricVec.With(labels).(Gauge) } // GaugeFunc is a Gauge whose value is determined at collect time by calling a // provided function. // // To create GaugeFunc instances, use NewGaugeFunc. type GaugeFunc interface { Metric Collector } // NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The // value reported is determined by calling the given function from within the // Write method. Take into account that metric collection may happen // concurrently. If that results in concurrent calls to Write, like in the case // where a GaugeFunc is directly registered with Prometheus, the provided // function must be concurrency-safe. func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), GaugeValue, function) }
_vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.0005130296922288835, 0.0001986384449992329, 0.00016368681099265814, 0.00017300421313848346, 0.00008636943675810471 ]
{ "id": 2, "code_window": [ "\t\t\tnewTopN.ByItems = append(newTopN.ByItems, &ByItems{newExpr, by.Desc})\n", "\t\t}\n", "\t\tif !topN.isEmpty() {\n", "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count}\n", "\t\t}\n", "\t\tp.children[i] = child.(LogicalPlan).pushDownTopN(newTopN)\n", "\t\tp.children[i].SetParents(p)\n", "\t}\n", "\treturn topN.setChild(p)\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count + topN.ExecLimit.Offset}\n" ], "file_path": "plan/topn_push_down.go", "type": "replace", "edit_start_line_idx": 92 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package plan import ( "github.com/pingcap/tidb/context" "github.com/pingcap/tidb/expression" ) // pushDownTopNOptimizer pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. type pushDownTopNOptimizer struct { } func (s *pushDownTopNOptimizer) optimize(p LogicalPlan, ctx context.Context, allocator *idAllocator) (LogicalPlan, error) { return p.pushDownTopN(Sort{}.init(allocator, ctx)), nil } func (s *baseLogicalPlan) pushDownTopN(topN *Sort) LogicalPlan { p := s.basePlan.self.(LogicalPlan) for i, child := range p.Children() { p.Children()[i] = child.(LogicalPlan).pushDownTopN(Sort{}.init(topN.allocator, topN.ctx)) p.Children()[i].SetParents(p) } return topN.setChild(p) } func (s *Sort) isEmpty() bool { return s.ExecLimit == nil && len(s.ByItems) == 0 } func (s *Sort) isLimit() bool { return len(s.ByItems) == 0 && s.ExecLimit != nil } func (s *Sort) isTopN() bool { return len(s.ByItems) != 0 && s.ExecLimit != nil } func (s *Sort) setChild(p LogicalPlan) LogicalPlan { if s.isEmpty() { return p } else if s.isLimit() { limit := Limit{Count: s.ExecLimit.Count, Offset: s.ExecLimit.Offset}.init(s.allocator, s.ctx) limit.SetChildren(p) p.SetParents(limit) limit.SetSchema(p.Schema().Clone()) return limit } // Then s must be topN. s.SetChildren(p) p.SetParents(s) s.SetSchema(p.Schema().Clone()) return s } func (s *Sort) pushDownTopN(topN *Sort) LogicalPlan { if topN.isLimit() { s.ExecLimit = topN.ExecLimit // If a Limit is pushed down, the Sort should be converted to topN and be pushed again. return s.children[0].(LogicalPlan).pushDownTopN(s) } else if topN.isEmpty() { // If nothing is pushed down, just continue to push nothing to its child. return s.baseLogicalPlan.pushDownTopN(topN) } // If a TopN is pushed down, this sort is useless. return s.children[0].(LogicalPlan).pushDownTopN(topN) } func (p *Limit) pushDownTopN(topN *Sort) LogicalPlan { child := p.children[0].(LogicalPlan).pushDownTopN(Sort{ExecLimit: p}.init(p.allocator, p.ctx)) return topN.setChild(child) } func (p *Union) pushDownTopN(topN *Sort) LogicalPlan { for i, child := range p.children { newTopN := Sort{}.init(p.allocator, p.ctx) for _, by := range topN.ByItems { newExpr := expression.ColumnSubstitute(by.Expr, p.schema, expression.Column2Exprs(child.Schema().Columns)) newTopN.ByItems = append(newTopN.ByItems, &ByItems{newExpr, by.Desc}) } if !topN.isEmpty() { newTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count} } p.children[i] = child.(LogicalPlan).pushDownTopN(newTopN) p.children[i].SetParents(p) } return topN.setChild(p) } func (p *Projection) pushDownTopN(topN *Sort) LogicalPlan { for _, by := range topN.ByItems { by.Expr = expression.ColumnSubstitute(by.Expr, p.schema, p.Exprs) } child := p.children[0].(LogicalPlan).pushDownTopN(topN) p.SetChildren(child) child.SetParents(p) return p } func (p *LogicalJoin) pushDownTopNToChild(topN *Sort, idx int) LogicalPlan { canPush := true for _, by := range topN.ByItems { cols := expression.ExtractColumns(by.Expr) if len(p.children[1-idx].Schema().ColumnsIndices(cols)) != 0 { canPush = false break } } newTopN := Sort{}.init(topN.allocator, topN.ctx) if canPush { if !topN.isEmpty() { newTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count} } newTopN.ByItems = make([]*ByItems, len(topN.ByItems)) copy(newTopN.ByItems, topN.ByItems) } return p.children[idx].(LogicalPlan).pushDownTopN(newTopN) } func (p *LogicalJoin) pushDownTopN(topN *Sort) LogicalPlan { var leftChild, rightChild LogicalPlan emptySort := Sort{}.init(p.allocator, p.ctx) switch p.JoinType { case LeftOuterJoin, LeftOuterSemiJoin: leftChild = p.pushDownTopNToChild(topN, 0) rightChild = p.children[1].(LogicalPlan).pushDownTopN(emptySort) case RightOuterJoin: leftChild = p.children[0].(LogicalPlan).pushDownTopN(emptySort) rightChild = p.pushDownTopNToChild(topN, 1) default: return p.baseLogicalPlan.pushDownTopN(topN) } p.SetChildren(leftChild, rightChild) // The LogicalJoin may be also a LogicalApply. So we must use self to set parents. self := p.self.(LogicalPlan) leftChild.SetParents(self) rightChild.SetParents(self) return topN.setChild(self) }
plan/topn_push_down.go
1
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.9975411891937256, 0.06996246427297592, 0.00016827591753099114, 0.003843903075903654, 0.23984022438526154 ]
{ "id": 2, "code_window": [ "\t\t\tnewTopN.ByItems = append(newTopN.ByItems, &ByItems{newExpr, by.Desc})\n", "\t\t}\n", "\t\tif !topN.isEmpty() {\n", "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count}\n", "\t\t}\n", "\t\tp.children[i] = child.(LogicalPlan).pushDownTopN(newTopN)\n", "\t\tp.children[i].SetParents(p)\n", "\t}\n", "\treturn topN.setChild(p)\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count + topN.ExecLimit.Offset}\n" ], "file_path": "plan/topn_push_down.go", "type": "replace", "edit_start_line_idx": 92 }
// Copyright 2013 The ql Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSES/QL-LICENSE file. // Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tidb import ( "fmt" "runtime/debug" "strconv" "strings" "time" "github.com/juju/errors" "github.com/ngaut/log" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/types" ) const ( // CreateUserTable is the SQL statement creates User table in system db. CreateUserTable = `CREATE TABLE if not exists mysql.user ( Host CHAR(64), User CHAR(16), Password CHAR(41), Select_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Insert_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Update_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Delete_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Create_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Drop_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Grant_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Alter_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Show_db_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Super_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Execute_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Index_priv ENUM('N','Y') NOT NULL DEFAULT 'N', Create_user_priv ENUM('N','Y') NOT NULL DEFAULT 'N', PRIMARY KEY (Host, User));` // CreateDBPrivTable is the SQL statement creates DB scope privilege table in system db. CreateDBPrivTable = `CREATE TABLE if not exists mysql.db ( Host CHAR(60), DB CHAR(64), User CHAR(16), Select_priv ENUM('N','Y') Not Null DEFAULT 'N', Insert_priv ENUM('N','Y') Not Null DEFAULT 'N', Update_priv ENUM('N','Y') Not Null DEFAULT 'N', Delete_priv ENUM('N','Y') Not Null DEFAULT 'N', Create_priv ENUM('N','Y') Not Null DEFAULT 'N', Drop_priv ENUM('N','Y') Not Null DEFAULT 'N', Grant_priv ENUM('N','Y') Not Null DEFAULT 'N', Index_priv ENUM('N','Y') Not Null DEFAULT 'N', Alter_priv ENUM('N','Y') Not Null DEFAULT 'N', Execute_priv ENUM('N','Y') Not Null DEFAULT 'N', PRIMARY KEY (Host, DB, User));` // CreateTablePrivTable is the SQL statement creates table scope privilege table in system db. CreateTablePrivTable = `CREATE TABLE if not exists mysql.tables_priv ( Host CHAR(60), DB CHAR(64), User CHAR(16), Table_name CHAR(64), Grantor CHAR(77), Timestamp Timestamp DEFAULT CURRENT_TIMESTAMP, Table_priv SET('Select','Insert','Update','Delete','Create','Drop','Grant', 'Index','Alter'), Column_priv SET('Select','Insert','Update'), PRIMARY KEY (Host, DB, User, Table_name));` // CreateColumnPrivTable is the SQL statement creates column scope privilege table in system db. CreateColumnPrivTable = `CREATE TABLE if not exists mysql.columns_priv( Host CHAR(60), DB CHAR(64), User CHAR(16), Table_name CHAR(64), Column_name CHAR(64), Timestamp Timestamp DEFAULT CURRENT_TIMESTAMP, Column_priv SET('Select','Insert','Update'), PRIMARY KEY (Host, DB, User, Table_name, Column_name));` // CreateGloablVariablesTable is the SQL statement creates global variable table in system db. // TODO: MySQL puts GLOBAL_VARIABLES table in INFORMATION_SCHEMA db. // INFORMATION_SCHEMA is a virtual db in TiDB. So we put this table in system db. // Maybe we will put it back to INFORMATION_SCHEMA. CreateGloablVariablesTable = `CREATE TABLE if not exists mysql.GLOBAL_VARIABLES( VARIABLE_NAME VARCHAR(64) Not Null PRIMARY KEY, VARIABLE_VALUE VARCHAR(1024) DEFAULT Null);` // CreateTiDBTable is the SQL statement creates a table in system db. // This table is a key-value struct contains some information used by TiDB. // Currently we only put bootstrapped in it which indicates if the system is already bootstrapped. CreateTiDBTable = `CREATE TABLE if not exists mysql.tidb( VARIABLE_NAME VARCHAR(64) Not Null PRIMARY KEY, VARIABLE_VALUE VARCHAR(1024) DEFAULT Null, COMMENT VARCHAR(1024));` // CreateHelpTopic is the SQL statement creates help_topic table in system db. // See: https://dev.mysql.com/doc/refman/5.5/en/system-database.html#system-database-help-tables CreateHelpTopic = `CREATE TABLE if not exists mysql.help_topic ( help_topic_id int(10) unsigned NOT NULL, name char(64) NOT NULL, help_category_id smallint(5) unsigned NOT NULL, description text NOT NULL, example text NOT NULL, url text NOT NULL, PRIMARY KEY (help_topic_id), UNIQUE KEY name (name) ) ENGINE=InnoDB DEFAULT CHARSET=utf8 STATS_PERSISTENT=0 COMMENT='help topics';` // CreateStatsMetaTable stores the meta of table statistics. CreateStatsMetaTable = `CREATE TABLE if not exists mysql.stats_meta ( version bigint(64) unsigned NOT NULL, table_id bigint(64) NOT NULL, modify_count bigint(64) NOT NULL DEFAULT 0, count bigint(64) unsigned NOT NULL DEFAULT 0, index idx_ver(version), unique index tbl(table_id) );` // CreateStatsColsTable stores the statistics of table columns. CreateStatsColsTable = `CREATE TABLE if not exists mysql.stats_histograms ( table_id bigint(64) NOT NULL, is_index tinyint(2) NOT NULL, hist_id bigint(64) NOT NULL, distinct_count bigint(64) NOT NULL, distinct_ratio double(64) NOT NULL DEFAULT 0, use_count_to_estimate tinyint(2) NOT NULL DEFAULT 0, modify_count bigint(64) NOT NULL DEFAULT 0, version bigint(64) unsigned NOT NULL DEFAULT 0, unique index tbl(table_id, is_index, hist_id) );` // CreateStatsBucketsTable stores the histogram info for every table columns. CreateStatsBucketsTable = `CREATE TABLE if not exists mysql.stats_buckets ( table_id bigint(64) NOT NULL, is_index tinyint(2) NOT NULL, hist_id bigint(64) NOT NULL, bucket_id bigint(64) NOT NULL, count bigint(64) NOT NULL, repeats bigint(64) NOT NULL, value blob NOT NULL, unique index tbl(table_id, is_index, hist_id, bucket_id) );` ) // Bootstrap initiates system DB for a store. func bootstrap(s Session) { b, err := checkBootstrapped(s) if err != nil { log.Fatal(err) } if b { upgrade(s) } doDDLWorks(s) doDMLWorks(s) } const ( // The variable name in mysql.TiDB table. // It is used for checking if the store is boostrapped by any TiDB server. bootstrappedVar = "bootstrapped" // The variable value in mysql.TiDB table for bootstrappedVar. // If the value true, the store is already boostrapped by a TiDB server. bootstrappedVarTrue = "True" // The variable name in mysql.TiDB table. // It is used for getting the version of the TiDB server which bootstrapped the store. tidbServerVersionVar = "tidb_server_version" // // Const for TiDB server version 2. version2 = 2 version3 = 3 version4 = 4 version5 = 5 version6 = 6 ) func checkBootstrapped(s Session) (bool, error) { // Check if system db exists. _, err := s.Execute(fmt.Sprintf("USE %s;", mysql.SystemDB)) if err != nil && infoschema.ErrDatabaseNotExists.NotEqual(err) { log.Fatal(err) } // Check bootstrapped variable value in TiDB table. d, err := getTiDBVar(s, bootstrappedVar) if err != nil { if infoschema.ErrTableNotExists.Equal(err) { return false, nil } return false, errors.Trace(err) } isBootstrapped := d.GetString() == bootstrappedVarTrue if isBootstrapped { // Make sure that doesn't affect the following operations. if err = s.CommitTxn(); err != nil { return false, errors.Trace(err) } } return isBootstrapped, nil } // Get variable value from mysql.tidb table. // Those variables are used by TiDB server. func getTiDBVar(s Session, name string) (types.Datum, error) { sql := fmt.Sprintf(`SELECT VARIABLE_VALUE FROM %s.%s WHERE VARIABLE_NAME="%s"`, mysql.SystemDB, mysql.TiDBTable, name) rs, err := s.Execute(sql) if err != nil { return types.Datum{}, errors.Trace(err) } if len(rs) != 1 { return types.Datum{}, errors.New("Wrong number of Recordset") } r := rs[0] defer r.Close() row, err := r.Next() if err != nil || row == nil { return types.Datum{}, errors.Trace(err) } return row.Data[0], nil } // When the system is boostrapped by low version TiDB server, we should do some upgrade works. // For example, add new system variables into mysql.global_variables table. func upgrade(s Session) { ver, err := getBootstrapVersion(s) if err != nil { log.Fatal(errors.Trace(err)) } if ver >= currentBootstrapVersion { // It is already bootstrapped/upgraded by a higher version TiDB server. return } // Do upgrade works then update bootstrap version. if ver < version2 { upgradeToVer2(s) ver = version2 } if ver < version3 { upgradeToVer3(s) } if ver < version4 { upgradeToVer4(s) } if ver < version5 { upgradeToVer5(s) } if ver < version6 { upgradeToVer6(s) } updateBootstrapVer(s) _, err = s.Execute("COMMIT") if err != nil { time.Sleep(1 * time.Second) // Check if TiDB is already upgraded. v, err1 := getBootstrapVersion(s) if err1 != nil { log.Fatal(err1) } if v >= currentBootstrapVersion { // It is already bootstrapped/upgraded by a higher version TiDB server. return } log.Errorf("[Upgrade] upgrade from %d to %d error", ver, currentBootstrapVersion) log.Fatal(err) } return } // Update to version 2. func upgradeToVer2(s Session) { // Version 2 add two system variable for DistSQL concurrency controlling. // Insert distsql related system variable. distSQLVars := []string{variable.TiDBDistSQLScanConcurrency} values := make([]string, 0, len(distSQLVars)) for _, v := range distSQLVars { value := fmt.Sprintf(`("%s", "%s")`, v, variable.SysVars[v].Value) values = append(values, value) } sql := fmt.Sprintf("INSERT IGNORE INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, strings.Join(values, ", ")) mustExecute(s, sql) } // Update to version 3. func upgradeToVer3(s Session) { // Version 3 fix tx_read_only variable value. sql := fmt.Sprintf("UPDATE %s.%s set variable_value = '0' where variable_name = 'tx_read_only';", mysql.SystemDB, mysql.GlobalVariablesTable) mustExecute(s, sql) } // Update to version 4. func upgradeToVer4(s Session) { sql := CreateStatsMetaTable mustExecute(s, sql) } func upgradeToVer5(s Session) { mustExecute(s, CreateStatsColsTable) mustExecute(s, CreateStatsBucketsTable) } func upgradeToVer6(s Session) { s.Execute("ALTER TABLE mysql.user ADD COLUMN `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Show_db_priv`") // For reasons of compatibility, set the non-exists privilege column value to 'Y', as TiDB doesn't check them in older versions. s.Execute("UPDATE mysql.user SET Super_priv='Y'") } // Update boostrap version variable in mysql.TiDB table. func updateBootstrapVer(s Session) { // Update bootstrap version. sql := fmt.Sprintf(`INSERT INTO %s.%s VALUES ("%s", "%d", "TiDB bootstrap version.") ON DUPLICATE KEY UPDATE VARIABLE_VALUE="%d"`, mysql.SystemDB, mysql.TiDBTable, tidbServerVersionVar, currentBootstrapVersion, currentBootstrapVersion) mustExecute(s, sql) } // Gets bootstrap version from mysql.tidb table; func getBootstrapVersion(s Session) (int64, error) { d, err := getTiDBVar(s, tidbServerVersionVar) if err != nil { return 0, errors.Trace(err) } if d.IsNull() { return 0, nil } return strconv.ParseInt(d.GetString(), 10, 64) } // Execute DDL statements in bootstrap stage. func doDDLWorks(s Session) { // Create a test database. mustExecute(s, "CREATE DATABASE IF NOT EXISTS test") // Create system db. mustExecute(s, fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", mysql.SystemDB)) // Create user table. mustExecute(s, CreateUserTable) // Create privilege tables. mustExecute(s, CreateDBPrivTable) mustExecute(s, CreateTablePrivTable) mustExecute(s, CreateColumnPrivTable) // Create global system variable table. mustExecute(s, CreateGloablVariablesTable) // Create TiDB table. mustExecute(s, CreateTiDBTable) // Create help table. mustExecute(s, CreateHelpTopic) // Create stats_meta table. mustExecute(s, CreateStatsMetaTable) // Create stats_columns table. mustExecute(s, CreateStatsColsTable) // Create stats_buckets table. mustExecute(s, CreateStatsBucketsTable) } // Execute DML statements in bootstrap stage. // All the statements run in a single transaction. func doDMLWorks(s Session) { mustExecute(s, "BEGIN") // Insert a default user with empty password. mustExecute(s, `INSERT INTO mysql.user VALUES ("%", "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y")`) // Init global system variables table. values := make([]string, 0, len(variable.SysVars)) for k, v := range variable.SysVars { // Session only variable should not be inserted. if v.Scope != variable.ScopeSession { value := fmt.Sprintf(`("%s", "%s")`, strings.ToLower(k), v.Value) values = append(values, value) } } sql := fmt.Sprintf("INSERT INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, strings.Join(values, ", ")) mustExecute(s, sql) sql = fmt.Sprintf(`INSERT INTO %s.%s VALUES("%s", "%s", "Bootstrap flag. Do not delete.") ON DUPLICATE KEY UPDATE VARIABLE_VALUE="%s"`, mysql.SystemDB, mysql.TiDBTable, bootstrappedVar, bootstrappedVarTrue, bootstrappedVarTrue) mustExecute(s, sql) sql = fmt.Sprintf(`INSERT INTO %s.%s VALUES("%s", "%d", "Bootstrap version. Do not delete.")`, mysql.SystemDB, mysql.TiDBTable, tidbServerVersionVar, currentBootstrapVersion) mustExecute(s, sql) _, err := s.Execute("COMMIT") if err != nil { time.Sleep(1 * time.Second) // Check if TiDB is already bootstrapped. b, err1 := checkBootstrapped(s) if err1 != nil { log.Fatal(err1) } if b { return } log.Fatal(err) } } func mustExecute(s Session, sql string) { _, err := s.Execute(sql) if err != nil { debug.PrintStack() log.Fatal(err) } }
bootstrap.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.00038333964766934514, 0.00017508503515273333, 0.00016022617637645453, 0.00016902945935726166, 0.000032980631658574566 ]
{ "id": 2, "code_window": [ "\t\t\tnewTopN.ByItems = append(newTopN.ByItems, &ByItems{newExpr, by.Desc})\n", "\t\t}\n", "\t\tif !topN.isEmpty() {\n", "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count}\n", "\t\t}\n", "\t\tp.children[i] = child.(LogicalPlan).pushDownTopN(newTopN)\n", "\t\tp.children[i].SetParents(p)\n", "\t}\n", "\treturn topN.setChild(p)\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count + topN.ExecLimit.Offset}\n" ], "file_path": "plan/topn_push_down.go", "type": "replace", "edit_start_line_idx": 92 }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tikv import ( "fmt" "math/rand" "net/url" "strings" "sync" "time" "github.com/juju/errors" "github.com/ngaut/log" pb "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/pd/pd-client" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/mock-tikv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/store/tikv/oracle/oracles" goctx "golang.org/x/net/context" ) type storeCache struct { sync.Mutex cache map[string]*tikvStore } var mc storeCache // Driver implements engine Driver. type Driver struct { } // Open opens or creates an TiKV storage with given path. // Path example: tikv://etcd-node1:port,etcd-node2:port?cluster=1&disableGC=false func (d Driver) Open(path string) (kv.Storage, error) { mc.Lock() defer mc.Unlock() etcdAddrs, disableGC, err := parsePath(path) if err != nil { return nil, errors.Trace(err) } pdCli, err := pd.NewClient(etcdAddrs) if err != nil { if strings.Contains(err.Error(), "i/o timeout") { return nil, errors.Annotate(err, txnRetryableMark) } return nil, errors.Trace(err) } // FIXME: uuid will be a very long and ugly string, simplify it. uuid := fmt.Sprintf("tikv-%v", pdCli.GetClusterID(goctx.TODO())) if store, ok := mc.cache[uuid]; ok { return store, nil } s, err := newTikvStore(uuid, &codecPDClient{pdCli}, newRPCClient(), !disableGC) if err != nil { return nil, errors.Trace(err) } mc.cache[uuid] = s return s, nil } // update oracle's lastTS every 2000ms. var oracleUpdateInterval = 2000 type tikvStore struct { clusterID uint64 uuid string oracle oracle.Oracle client Client regionCache *RegionCache lockResolver *LockResolver gcWorker *GCWorker } func newTikvStore(uuid string, pdClient pd.Client, client Client, enableGC bool) (*tikvStore, error) { oracle, err := oracles.NewPdOracle(pdClient, time.Duration(oracleUpdateInterval)*time.Millisecond) if err != nil { return nil, errors.Trace(err) } store := &tikvStore{ clusterID: pdClient.GetClusterID(goctx.TODO()), uuid: uuid, oracle: oracle, client: client, regionCache: NewRegionCache(pdClient), } store.lockResolver = newLockResolver(store) if enableGC { store.gcWorker, err = NewGCWorker(store) if err != nil { return nil, errors.Trace(err) } } return store, nil } // NewMockTikvStore creates a mocked tikv store. func NewMockTikvStore() (kv.Storage, error) { cluster := mocktikv.NewCluster() mocktikv.BootstrapWithSingleStore(cluster) mvccStore := mocktikv.NewMvccStore() client := mocktikv.NewRPCClient(cluster, mvccStore) uuid := fmt.Sprintf("mock-tikv-store-:%v", time.Now().Unix()) pdCli := &codecPDClient{mocktikv.NewPDClient(cluster)} return newTikvStore(uuid, pdCli, client, false) } // NewMockTikvStoreWithCluster creates a mocked tikv store with cluster. func NewMockTikvStoreWithCluster(cluster *mocktikv.Cluster) (kv.Storage, error) { mocktikv.BootstrapWithSingleStore(cluster) mvccStore := mocktikv.NewMvccStore() client := mocktikv.NewRPCClient(cluster, mvccStore) uuid := fmt.Sprintf("mock-tikv-store-:%v", time.Now().Unix()) pdCli := &codecPDClient{mocktikv.NewPDClient(cluster)} return newTikvStore(uuid, pdCli, client, false) } // GetMockTiKVClient gets the *mocktikv.RPCClient from a mocktikv store. // Used for test. func GetMockTiKVClient(store kv.Storage) *mocktikv.RPCClient { s := store.(*tikvStore) return s.client.(*mocktikv.RPCClient) } func (s *tikvStore) Begin() (kv.Transaction, error) { txn, err := newTiKVTxn(s) if err != nil { return nil, errors.Trace(err) } txnCounter.Inc() return txn, nil } // BeginWithStartTS begins a transaction with startTS. func (s *tikvStore) BeginWithStartTS(startTS uint64) (kv.Transaction, error) { txn, err := newTikvTxnWithStartTS(s, startTS) if err != nil { return nil, errors.Trace(err) } txnCounter.Inc() return txn, nil } func (s *tikvStore) GetSnapshot(ver kv.Version) (kv.Snapshot, error) { snapshot := newTiKVSnapshot(s, ver) snapshotCounter.Inc() return snapshot, nil } func (s *tikvStore) Close() error { mc.Lock() defer mc.Unlock() delete(mc.cache, s.uuid) s.oracle.Close() if s.gcWorker != nil { s.gcWorker.Close() } // Make sure all connections are put back into the pools. if err := s.client.Close(); err != nil { return errors.Trace(err) } return nil } func (s *tikvStore) UUID() string { return s.uuid } func (s *tikvStore) CurrentVersion() (kv.Version, error) { bo := NewBackoffer(tsoMaxBackoff, goctx.Background()) startTS, err := s.getTimestampWithRetry(bo) if err != nil { return kv.NewVersion(0), errors.Trace(err) } return kv.NewVersion(startTS), nil } func (s *tikvStore) getTimestampWithRetry(bo *Backoffer) (uint64, error) { for { startTS, err := s.oracle.GetTimestamp(bo.ctx) if err == nil { return startTS, nil } err = bo.Backoff(boPDRPC, errors.Errorf("get timestamp failed: %v", err)) if err != nil { return 0, errors.Trace(err) } } } func (s *tikvStore) GetClient() kv.Client { txnCmdCounter.WithLabelValues("get_client").Inc() return &CopClient{ store: s, } } func (s *tikvStore) SendKVReq(bo *Backoffer, req *pb.Request, regionID RegionVerID, timeout time.Duration) (*pb.Response, error) { sender := NewRegionRequestSender(bo, s.regionCache, s.client) return sender.SendKVReq(req, regionID, timeout) } // ParseEtcdAddr parses path to etcd address list func ParseEtcdAddr(path string) (etcdAddrs []string, err error) { etcdAddrs, _, err = parsePath(path) return } func parsePath(path string) (etcdAddrs []string, disableGC bool, err error) { var u *url.URL u, err = url.Parse(path) if err != nil { err = errors.Trace(err) return } if strings.ToLower(u.Scheme) != "tikv" { err = errors.Errorf("Uri scheme expected[tikv] but found [%s]", u.Scheme) log.Error(err) return } switch strings.ToLower(u.Query().Get("disableGC")) { case "true": disableGC = true case "false", "": default: err = errors.New("disableGC flag should be true/false") return } etcdAddrs = strings.Split(u.Host, ",") return } func init() { mc.cache = make(map[string]*tikvStore) rand.Seed(time.Now().UnixNano()) }
store/tikv/kv.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.00017736606241669506, 0.00016928420518524945, 0.00016230357869062573, 0.00016952173609752208, 0.000004099420948477928 ]
{ "id": 2, "code_window": [ "\t\t\tnewTopN.ByItems = append(newTopN.ByItems, &ByItems{newExpr, by.Desc})\n", "\t\t}\n", "\t\tif !topN.isEmpty() {\n", "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count}\n", "\t\t}\n", "\t\tp.children[i] = child.(LogicalPlan).pushDownTopN(newTopN)\n", "\t\tp.children[i].SetParents(p)\n", "\t}\n", "\treturn topN.setChild(p)\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count + topN.ExecLimit.Offset}\n" ], "file_path": "plan/topn_push_down.go", "type": "replace", "edit_start_line_idx": 92 }
// Copyright 2013 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package model import ( "fmt" "regexp" "sort" "strings" ) var ( separator = []byte{0} MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) ) // A Metric is similar to a LabelSet, but the key difference is that a Metric is // a singleton and refers to one and only one stream of samples. type Metric LabelSet // Equal compares the metrics. func (m Metric) Equal(o Metric) bool { return LabelSet(m).Equal(LabelSet(o)) } // Before compares the metrics' underlying label sets. func (m Metric) Before(o Metric) bool { return LabelSet(m).Before(LabelSet(o)) } // Clone returns a copy of the Metric. func (m Metric) Clone() Metric { clone := Metric{} for k, v := range m { clone[k] = v } return clone } func (m Metric) String() string { metricName, hasName := m[MetricNameLabel] numLabels := len(m) - 1 if !hasName { numLabels = len(m) } labelStrings := make([]string, 0, numLabels) for label, value := range m { if label != MetricNameLabel { labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) } } switch numLabels { case 0: if hasName { return string(metricName) } return "{}" default: sort.Strings(labelStrings) return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) } } // Fingerprint returns a Metric's Fingerprint. func (m Metric) Fingerprint() Fingerprint { return LabelSet(m).Fingerprint() } // FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing // algorithm, which is, however, more susceptible to hash collisions. func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } // IsValidMetricName returns true iff name matches the pattern of MetricNameRE. func IsValidMetricName(n LabelValue) bool { if len(n) == 0 { return false } for i, b := range n { if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { return false } } return true }
_vendor/src/github.com/prometheus/common/model/metric.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.0017431644955649972, 0.0003269925364293158, 0.00015825880109332502, 0.00017197980196215212, 0.0004720923025161028 ]
{ "id": 3, "code_window": [ "\t\t}\n", "\t}\n", "\tnewTopN := Sort{}.init(topN.allocator, topN.ctx)\n", "\tif canPush {\n", "\t\tif !topN.isEmpty() {\n", "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count}\n", "\t\t}\n", "\t\tnewTopN.ByItems = make([]*ByItems, len(topN.ByItems))\n", "\t\tcopy(newTopN.ByItems, topN.ByItems)\n", "\t}\n", "\treturn p.children[idx].(LogicalPlan).pushDownTopN(newTopN)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count + topN.ExecLimit.Offset}\n" ], "file_path": "plan/topn_push_down.go", "type": "replace", "edit_start_line_idx": 122 }
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package plan import ( "sort" "testing" . "github.com/pingcap/check" "github.com/pingcap/tidb/ast" "github.com/pingcap/tidb/context" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/model" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/terror" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tidb/util/types" "github.com/pingcap/tipb/go-tipb" goctx "golang.org/x/net/context" ) var _ = Suite(&testPlanSuite{}) func TestT(t *testing.T) { CustomVerboseFlag = true TestingT(t) } type testPlanSuite struct { *parser.Parser } func (s *testPlanSuite) SetUpSuite(c *C) { s.Parser = parser.New() } func newLongType() types.FieldType { return *(types.NewFieldType(mysql.TypeLong)) } func newStringType() types.FieldType { ft := types.NewFieldType(mysql.TypeVarchar) ft.Charset, ft.Collate = types.DefaultCharsetForType(mysql.TypeVarchar) return *ft } func mockResolve(node ast.Node) (infoschema.InfoSchema, error) { indices := []*model.IndexInfo{ { Name: model.NewCIStr("c_d_e"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("c"), Length: types.UnspecifiedLength, Offset: 1, }, { Name: model.NewCIStr("d"), Length: types.UnspecifiedLength, Offset: 2, }, { Name: model.NewCIStr("e"), Length: types.UnspecifiedLength, Offset: 3, }, }, State: model.StatePublic, Unique: true, }, { Name: model.NewCIStr("e"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("e"), Length: types.UnspecifiedLength, }, }, State: model.StateWriteOnly, Unique: true, }, { Name: model.NewCIStr("f"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("f"), Length: types.UnspecifiedLength, Offset: 1, }, }, State: model.StatePublic, Unique: true, }, { Name: model.NewCIStr("g"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("g"), Length: types.UnspecifiedLength, Offset: 1, }, }, State: model.StatePublic, Unique: true, }, { Name: model.NewCIStr("f_g"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("f"), Length: types.UnspecifiedLength, Offset: 1, }, { Name: model.NewCIStr("g"), Length: types.UnspecifiedLength, Offset: 2, }, }, State: model.StatePublic, Unique: true, }, { Name: model.NewCIStr("c_d_e_str"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("c_str"), Length: types.UnspecifiedLength, Offset: 5, }, { Name: model.NewCIStr("d_str"), Length: types.UnspecifiedLength, Offset: 6, }, { Name: model.NewCIStr("e_str"), Length: types.UnspecifiedLength, Offset: 7, }, }, State: model.StatePublic, }, } pkColumn := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("a"), FieldType: newLongType(), ID: 1, } col0 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("b"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("c"), FieldType: newLongType(), ID: 3, } col2 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("d"), FieldType: newLongType(), ID: 4, } col3 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("e"), FieldType: newLongType(), ID: 5, } colStr1 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("c_str"), FieldType: newStringType(), ID: 6, } colStr2 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("d_str"), FieldType: newStringType(), ID: 7, } colStr3 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("e_str"), FieldType: newStringType(), ID: 8, } col4 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("f"), FieldType: newLongType(), ID: 9, } col5 := &model.ColumnInfo{ State: model.StatePublic, Name: model.NewCIStr("g"), FieldType: newLongType(), ID: 10, } pkColumn.Flag = mysql.PriKeyFlag // Column 'b', 'c', 'd', 'f', 'g' is not null. col0.Flag = mysql.NotNullFlag col1.Flag = mysql.NotNullFlag col2.Flag = mysql.NotNullFlag col4.Flag = mysql.NotNullFlag col5.Flag = mysql.NotNullFlag table := &model.TableInfo{ Columns: []*model.ColumnInfo{pkColumn, col0, col1, col2, col3, colStr1, colStr2, colStr3, col4, col5}, Indices: indices, Name: model.NewCIStr("t"), PKIsHandle: true, } is := infoschema.MockInfoSchema([]*model.TableInfo{table}) ctx := mockContext() err := MockResolveName(node, is, "test", ctx) if err != nil { return nil, err } return is, InferType(ctx.GetSessionVars().StmtCtx, node) } func supportExpr(exprType tipb.ExprType) bool { switch exprType { // data type case tipb.ExprType_Null, tipb.ExprType_Int64, tipb.ExprType_Uint64, tipb.ExprType_Float32, tipb.ExprType_Float64, tipb.ExprType_String, tipb.ExprType_Bytes, tipb.ExprType_MysqlDuration, tipb.ExprType_MysqlDecimal, tipb.ExprType_MysqlTime, tipb.ExprType_ColumnRef: return true // logic operators case tipb.ExprType_And, tipb.ExprType_Or, tipb.ExprType_Not, tipb.ExprType_Xor: return true // compare operators case tipb.ExprType_LT, tipb.ExprType_LE, tipb.ExprType_EQ, tipb.ExprType_NE, tipb.ExprType_GE, tipb.ExprType_GT, tipb.ExprType_NullEQ, tipb.ExprType_In, tipb.ExprType_ValueList, tipb.ExprType_Like: return true // arithmetic operators case tipb.ExprType_Plus, tipb.ExprType_Div, tipb.ExprType_Minus, tipb.ExprType_Mul, tipb.ExprType_IntDiv, tipb.ExprType_Mod: return true // aggregate functions case tipb.ExprType_Count, tipb.ExprType_First, tipb.ExprType_Sum, tipb.ExprType_Avg, tipb.ExprType_Max, tipb.ExprType_Min: return true // bitwise operators case tipb.ExprType_BitAnd, tipb.ExprType_BitOr, tipb.ExprType_BitXor, tipb.ExprType_BitNeg: return true // control functions case tipb.ExprType_Case, tipb.ExprType_If, tipb.ExprType_IfNull, tipb.ExprType_NullIf: return true // other functions case tipb.ExprType_Coalesce, tipb.ExprType_IsNull: return true case kv.ReqSubTypeDesc: return true default: return false } } type mockClient struct { } func (c *mockClient) Send(ctx goctx.Context, _ *kv.Request) kv.Response { return nil } func (c *mockClient) SupportRequestType(reqType, subType int64) bool { switch reqType { case kv.ReqTypeSelect, kv.ReqTypeIndex: switch subType { case kv.ReqSubTypeGroupBy, kv.ReqSubTypeBasic, kv.ReqSubTypeTopN: return true default: return supportExpr(tipb.ExprType(subType)) } } return false } type mockStore struct { client *mockClient } func (m *mockStore) GetClient() kv.Client { return m.client } func (m *mockStore) Begin() (kv.Transaction, error) { return nil, nil } // BeginWithStartTS begins with startTS. func (m *mockStore) BeginWithStartTS(startTS uint64) (kv.Transaction, error) { return m.Begin() } func (m *mockStore) GetSnapshot(ver kv.Version) (kv.Snapshot, error) { return nil, nil } func (m *mockStore) Close() error { return nil } func (m *mockStore) UUID() string { return "mock" } func (m *mockStore) CurrentVersion() (kv.Version, error) { return kv.Version{}, nil } func mockContext() context.Context { ctx := mock.NewContext() ctx.Store = &mockStore{ client: &mockClient{}, } return ctx } func (s *testPlanSuite) TestPredicatePushDown(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string first string best string }{ { sql: "select count(*) from t a, t b where a.a = b.a", best: "Join{DataScan(a)->DataScan(b)}(a.a,b.a)->Aggr(count(1))->Projection", }, { sql: "select a from (select a from t where d = 0) k where k.a = 5", best: "DataScan(t)->Selection->Projection->Projection", }, { sql: "select a from (select 1+2 as a from t where d = 0) k where k.a = 5", best: "DataScan(t)->Selection->Projection->Projection", }, { sql: "select a from (select d as a from t where d = 0) k where k.a = 5", best: "DataScan(t)->Selection->Projection->Projection", }, { sql: "select * from t ta, t tb where (ta.d, ta.a) = (tb.b, tb.c)", best: "Join{DataScan(ta)->DataScan(tb)}(ta.d,tb.b)(ta.a,tb.c)->Projection", }, { sql: "select * from t t1, t t2 where t1.a = t2.b and t2.b > 0 and t1.a = t1.c and t1.d like 'abc' and t2.d = t1.d", best: "Join{DataScan(t2)->Selection->DataScan(t1)->Selection}(t2.b,t1.a)(t2.d,t1.d)->Projection", }, { sql: "select * from t ta join t tb on ta.d = tb.d and ta.d > 1 where tb.a = 0", best: "Join{DataScan(ta)->Selection->DataScan(tb)->Selection}(ta.d,tb.d)->Projection", }, { sql: "select * from t ta join t tb on ta.d = tb.d where ta.d > 1 and tb.a = 0", best: "Join{DataScan(ta)->Selection->DataScan(tb)->Selection}(ta.d,tb.d)->Projection", }, { sql: "select * from t ta left outer join t tb on ta.d = tb.d and ta.d > 1 where tb.a = 0", best: "Join{DataScan(ta)->Selection->DataScan(tb)->Selection}(ta.d,tb.d)->Projection", }, { sql: "select * from t ta right outer join t tb on ta.d = tb.d and ta.a > 1 where tb.a = 0", best: "Join{DataScan(ta)->Selection->DataScan(tb)->Selection}(ta.d,tb.d)->Projection", }, { sql: "select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where ta.d = 0", best: "Join{DataScan(ta)->Selection->DataScan(tb)}(ta.d,tb.d)->Projection", }, { sql: "select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where tb.d = 0", best: "Join{DataScan(ta)->Selection->DataScan(tb)->Selection}->Projection", }, { sql: "select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where tb.c is not null and tb.c = 0 and ifnull(tb.d, 1)", best: "Join{DataScan(ta)->Selection->DataScan(tb)->Selection}(ta.d,tb.d)->Projection", }, { sql: "select * from t ta left outer join t tb on ta.a = tb.a left outer join t tc on tb.b = tc.b where tc.c > 0", best: "Join{Join{DataScan(ta)->DataScan(tb)}(ta.a,tb.a)->DataScan(tc)->Selection}(tb.b,tc.b)->Projection", }, { sql: "select * from t ta left outer join t tb on ta.a = tb.a left outer join t tc on tc.b = ta.b where tb.c > 0", best: "Join{Join{DataScan(ta)->DataScan(tb)->Selection}(ta.a,tb.a)->DataScan(tc)}(ta.b,tc.b)->Projection", }, { sql: "select * from t as ta left outer join (t as tb left join t as tc on tc.b = tb.b) on tb.a = ta.a where tc.c > 0", best: "Join{DataScan(ta)->Join{DataScan(tb)->DataScan(tc)->Selection}(tb.b,tc.b)}(ta.a,tb.a)->Projection", }, { sql: "select * from ( t as ta left outer join t as tb on ta.a = tb.a) join ( t as tc left join t as td on tc.b = td.b) on ta.c = td.c where tb.c = 2 and td.a = 1", best: "Join{Join{DataScan(ta)->DataScan(tb)->Selection}(ta.a,tb.a)->Join{DataScan(tc)->DataScan(td)->Selection}(tc.b,td.b)}(ta.c,td.c)->Projection", }, { sql: "select * from t ta left outer join (t tb left outer join t tc on tc.b = tb.b) on tb.a = ta.a and tc.c = ta.c where tc.d > 0 or ta.d > 0", best: "Join{DataScan(ta)->Join{DataScan(tb)->DataScan(tc)}(tb.b,tc.b)}(ta.a,tb.a)(ta.c,tc.c)->Selection->Projection", }, { sql: "select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where ifnull(tb.d, null) or tb.d is null", best: "Join{DataScan(ta)->DataScan(tb)}(ta.d,tb.d)->Selection->Projection", }, { sql: "select a, d from (select * from t union all select * from t union all select * from t) z where a < 10", best: "UnionAll{DataScan(t)->Selection->Projection->DataScan(t)->Selection->Projection->DataScan(t)->Selection->Projection}->Projection", }, { sql: "select (select count(*) from t where t.a = k.a) from t k", best: "Apply{DataScan(k)->DataScan(t)->Selection->Aggr(count(1))->Projection->MaxOneRow}->Projection", }, { sql: "select a from t where exists(select 1 from t as x where x.a < t.a)", best: "Join{DataScan(t)->DataScan(x)}->Projection", }, { sql: "select a from t where exists(select 1 from t as x where x.a = t.a and t.a < 1 and x.a < 1)", best: "Join{DataScan(t)->Selection->DataScan(x)->Selection}(test.t.a,x.a)->Projection", }, { sql: "select a from t where exists(select 1 from t as x where x.a = t.a and x.a < 1) and a < 1", best: "Join{DataScan(t)->Selection->DataScan(x)->Selection}(test.t.a,x.a)->Projection", }, { sql: "select a from t where exists(select 1 from t as x where x.a = t.a) and exists(select 1 from t as x where x.a = t.a)", best: "Join{Join{DataScan(t)->DataScan(x)}(test.t.a,x.a)->DataScan(x)}(test.t.a,x.a)->Projection", }, { sql: "select * from (select a, b, sum(c) as s from t group by a, b) k where k.a > k.b * 2 + 1", best: "DataScan(t)->Selection->Aggr(sum(test.t.c),firstrow(test.t.a),firstrow(test.t.b))->Projection->Projection", }, { sql: "select * from (select a, b, sum(c) as s from t group by a, b) k where k.a > 1 and k.b > 2", best: "DataScan(t)->Selection->Aggr(sum(test.t.c),firstrow(test.t.a),firstrow(test.t.b))->Projection->Projection", }, { sql: "select * from (select k.a, sum(k.s) as ss from (select a, sum(b) as s from t group by a) k group by k.a) l where l.a > 2", best: "DataScan(t)->Selection->Aggr(sum(test.t.b),firstrow(test.t.a))->Projection->Aggr(sum(k.s),firstrow(k.a))->Projection->Projection", }, { sql: "select * from (select a, sum(b) as s from t group by a) k where a > s", best: "DataScan(t)->Aggr(sum(test.t.b),firstrow(test.t.a))->Selection->Projection->Projection", }, { sql: "select * from (select a, sum(b) as s from t group by a + 1) k where a > 1", best: "DataScan(t)->Aggr(sum(test.t.b),firstrow(test.t.a))->Selection->Projection->Projection", }, { sql: "select * from (select a, sum(b) as s from t group by a having 1 = 0) k where a > 1", best: "DataScan(t)->Selection->Aggr(sum(test.t.b),firstrow(test.t.a))->Selection->Projection->Projection", }, { sql: "select a, count(a) cnt from t group by a having cnt < 1", best: "DataScan(t)->Aggr(count(test.t.a),firstrow(test.t.a))->Selection->Projection", }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil, comment) builder := &planBuilder{ allocator: new(idAllocator), ctx: mockContext(), is: is, colMapper: make(map[*ast.ColumnNameExpr]int), } p := builder.build(stmt) c.Assert(builder.err, IsNil, comment) c.Assert(builder.optFlag&flagPredicatePushDown, Greater, uint64(0)) p, err = logicalOptimize(flagPredicatePushDown|flagDecorrelate|flagPrunColumns, p.(LogicalPlan), builder.ctx, builder.allocator) c.Assert(err, IsNil) c.Assert(ToString(p), Equals, ca.best, Commentf("for %s", ca.sql)) } } func (s *testPlanSuite) TestPlanBuilder(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string plan string }{ { // This will be resolved as in sub query. sql: "select * from t where 10 in (select b from t s where s.a = t.a)", plan: "Join{DataScan(t)->DataScan(s)}(test.t.a,s.a)->Projection", }, { sql: "select count(c) ,(select b from t s where s.a = t.a) from t", plan: "Join{DataScan(t)->Aggr(count(test.t.c),firstrow(test.t.a))->DataScan(s)}(test.t.a,s.a)->Projection->Projection", }, { sql: "select count(c) ,(select count(s.b) from t s where s.a = t.a) from t", plan: "Join{DataScan(t)->Aggr(count(test.t.c),firstrow(test.t.a))->DataScan(s)}(test.t.a,s.a)->Aggr(firstrow(aggregation_2_col_0),firstrow(test.t.a),count(s.b))->Projection->Projection", }, { // Semi-join with agg cannot decorrelate. sql: "select t.c in (select count(s.b) from t s where s.a = t.a) from t", plan: "Apply{DataScan(t)->DataScan(s)->Selection->Aggr(count(s.b))}->Projection", }, { // Theta-join with agg cannot decorrelate. sql: "select (select count(s.b) k from t s where s.a = t.a having k != 0) from t", plan: "Apply{DataScan(t)->DataScan(s)->Selection->Aggr(count(s.b))}->Projection->Projection", }, { // Relation without keys cannot decorrelate. sql: "select (select count(s.b) k from t s where s.a = t1.a) from t t1, t t2", plan: "Apply{Join{DataScan(t1)->DataScan(t2)}->DataScan(s)->Selection->Aggr(count(s.b))}->Projection->Projection", }, { // Aggregate function like count(1) cannot decorrelate. sql: "select (select count(1) k from t s where s.a = t.a having k != 0) from t", plan: "Apply{DataScan(t)->DataScan(s)->Selection->Aggr(count(1))}->Projection->Projection", }, { sql: "select a from t where a in (select a from t s group by t.b)", plan: "Join{DataScan(t)->DataScan(s)->Aggr(firstrow(s.a))->Projection}(test.t.a,a)->Projection", }, { // This will be resolved as in sub query. sql: "select * from t where 10 in (((select b from t s where s.a = t.a)))", plan: "Join{DataScan(t)->DataScan(s)}(test.t.a,s.a)->Projection", }, { // This will be resolved as in function. sql: "select * from t where 10 in (((select b from t s where s.a = t.a)), 10)", plan: "Join{DataScan(t)->DataScan(s)}(test.t.a,s.a)->Projection->Selection->Projection", }, { sql: "select * from t where exists (select s.a from t s having sum(s.a) = t.a )", plan: "Join{DataScan(t)->DataScan(s)->Aggr(sum(s.a))->Projection}(test.t.a,sel_agg_1)->Projection", }, { // Test Nested sub query. sql: "select * from t where exists (select s.a from t s where s.c in (select c from t as k where k.d = s.d) having sum(s.a) = t.a )", plan: "Join{DataScan(t)->Join{DataScan(s)->DataScan(k)}(s.d,k.d)(s.c,k.c)->Aggr(sum(s.a))->Projection}(test.t.a,sel_agg_1)->Projection", }, { sql: "select * from t for update", plan: "DataScan(t)->Lock->Projection", }, { sql: "update t set t.a = t.a * 1.5 where t.a >= 1000 order by t.a desc limit 10", plan: "DataScan(t)->Selection->Sort->Limit->*plan.Update", }, { sql: "delete from t where t.a >= 1000 order by t.a desc limit 10", plan: "DataScan(t)->Selection->Sort->Limit->*plan.Delete", }, { sql: "explain select * from t union all select * from t limit 1, 1", plan: "UnionAll{Table(t)->Table(t)->Limit}->*plan.Explain", }, { sql: "insert into t select * from t", plan: "DataScan(t)->Projection->*plan.Insert", }, { sql: "show columns from t where `Key` = 'pri' like 't*'", plan: "*plan.Show->Selection", }, { sql: "do sleep(5)", plan: "Dual->Projection", }, { sql: "select substr(\"abc\", 1)", plan: "Dual->Projection", }, { sql: "analyze table t, t", plan: "*plan.Analyze->*plan.Analyze->*plan.Analyze", }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ allocator: new(idAllocator), ctx: mockContext(), colMapper: make(map[*ast.ColumnNameExpr]int), is: is, } p := builder.build(stmt) if lp, ok := p.(LogicalPlan); ok { p, err = logicalOptimize(flagBuildKeyInfo|flagDecorrelate|flagPrunColumns, lp.(LogicalPlan), builder.ctx, builder.allocator) } c.Assert(builder.err, IsNil) c.Assert(ToString(p), Equals, ca.plan, Commentf("for %s", ca.sql)) } } func (s *testPlanSuite) TestJoinReOrder(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string best string }{ { sql: "select * from t t1, t t2, t t3, t t4, t t5, t t6 where t1.a = t2.b and t2.a = t3.b and t3.c = t4.a and t4.d = t2.c and t5.d = t6.d", best: "Join{Join{Join{Join{DataScan(t1)->DataScan(t2)}(t1.a,t2.b)->DataScan(t3)}(t2.a,t3.b)->DataScan(t4)}(t3.c,t4.a)(t2.c,t4.d)->Join{DataScan(t5)->DataScan(t6)}(t5.d,t6.d)}->Projection", }, { sql: "select * from t t1, t t2, t t3, t t4, t t5, t t6, t t7, t t8 where t1.a = t8.a", best: "Join{Join{Join{Join{DataScan(t1)->DataScan(t8)}(t1.a,t8.a)->DataScan(t2)}->Join{DataScan(t3)->DataScan(t4)}}->Join{Join{DataScan(t5)->DataScan(t6)}->DataScan(t7)}}->Projection", }, { sql: "select * from t t1, t t2, t t3, t t4, t t5 where t1.a = t5.a and t5.a = t4.a and t4.a = t3.a and t3.a = t2.a and t2.a = t1.a and t1.a = t3.a and t2.a = t4.a and t5.b < 8", best: "Join{Join{Join{Join{DataScan(t5)->Selection->DataScan(t1)}(t5.a,t1.a)->DataScan(t2)}(t1.a,t2.a)->DataScan(t3)}(t2.a,t3.a)(t1.a,t3.a)->DataScan(t4)}(t5.a,t4.a)(t3.a,t4.a)(t2.a,t4.a)->Projection", }, { sql: "select * from t t1, t t2, t t3, t t4, t t5 where t1.a = t5.a and t5.a = t4.a and t4.a = t3.a and t3.a = t2.a and t2.a = t1.a and t1.a = t3.a and t2.a = t4.a and t3.b = 1 and t4.a = 1", best: "Join{Join{Join{Join{DataScan(t3)->Selection->DataScan(t4)->Selection}->DataScan(t5)->Selection}->DataScan(t1)->Selection}->DataScan(t2)->Selection}->Projection", }, { sql: "select * from t o where o.b in (select t3.c from t t1, t t2, t t3 where t1.a = t3.a and t2.a = t3.a and t2.a = o.a)", best: "Apply{DataScan(o)->Join{Join{DataScan(t2)->Selection->DataScan(t3)}(t2.a,t3.a)->DataScan(t1)}(t3.a,t1.a)->Projection}->Projection", }, { sql: "select * from t o where o.b in (select t3.c from t t1, t t2, t t3 where t1.a = t3.a and t2.a = t3.a and t2.a = o.a and t1.a = 1)", best: "Apply{DataScan(o)->Join{Join{DataScan(t1)->Selection->DataScan(t3)->Selection}->DataScan(t2)->Selection}->Projection}->Projection", }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ allocator: new(idAllocator), ctx: mockContext(), colMapper: make(map[*ast.ColumnNameExpr]int), is: is, } p := builder.build(stmt) c.Assert(builder.err, IsNil) lp := p.(LogicalPlan) p, err = logicalOptimize(flagPredicatePushDown, lp.(LogicalPlan), builder.ctx, builder.allocator) c.Assert(err, IsNil) c.Assert(ToString(lp), Equals, ca.best, Commentf("for %s", ca.sql)) } } func (s *testPlanSuite) TestAggPushDown(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string best string }{ { sql: "select sum(t.a), sum(t.a+1), sum(t.a), count(t.a), sum(t.a) + count(t.a) from t", best: "DataScan(t)->Aggr(sum(test.t.a),sum(plus(test.t.a, 1)),count(test.t.a))->Projection", }, { sql: "select sum(t.a + t.b), sum(t.a + t.c), sum(t.a + t.b), count(t.a) from t having sum(t.a + t.b) > 0 order by sum(t.a + t.c)", best: "DataScan(t)->Aggr(sum(plus(test.t.a, test.t.b)),sum(plus(test.t.a, test.t.c)),count(test.t.a))->Selection->Projection->Sort->Projection", }, { sql: "select sum(a.a) from t a, t b where a.c = b.c", best: "Join{DataScan(a)->Aggr(sum(a.a),firstrow(a.c))->DataScan(b)}(a.c,b.c)->Aggr(sum(join_agg_0))->Projection", }, { sql: "select sum(b.a) from t a, t b where a.c = b.c", best: "Join{DataScan(a)->DataScan(b)->Aggr(sum(b.a),firstrow(b.c))}(a.c,b.c)->Aggr(sum(join_agg_0))->Projection", }, { sql: "select sum(b.a), a.a from t a, t b where a.c = b.c", best: "Join{DataScan(a)->DataScan(b)->Aggr(sum(b.a),firstrow(b.c))}(a.c,b.c)->Aggr(sum(join_agg_0),firstrow(a.a))->Projection", }, { sql: "select sum(a.a), b.a from t a, t b where a.c = b.c", best: "Join{DataScan(a)->Aggr(sum(a.a),firstrow(a.c))->DataScan(b)}(a.c,b.c)->Aggr(sum(join_agg_0),firstrow(b.a))->Projection", }, { sql: "select sum(a.a), sum(b.a) from t a, t b where a.c = b.c", best: "Join{DataScan(a)->DataScan(b)}(a.c,b.c)->Aggr(sum(a.a),sum(b.a))->Projection", }, { sql: "select sum(a.a), max(b.a) from t a, t b where a.c = b.c", best: "Join{DataScan(a)->Aggr(sum(a.a),firstrow(a.c))->DataScan(b)}(a.c,b.c)->Aggr(sum(join_agg_0),max(b.a))->Projection", }, { sql: "select max(a.a), sum(b.a) from t a, t b where a.c = b.c", best: "Join{DataScan(a)->DataScan(b)->Aggr(sum(b.a),firstrow(b.c))}(a.c,b.c)->Aggr(max(a.a),sum(join_agg_0))->Projection", }, { sql: "select sum(a.a) from t a, t b, t c where a.c = b.c and b.c = c.c", best: "Join{Join{DataScan(a)->Aggr(sum(a.a),firstrow(a.c))->DataScan(b)}(a.c,b.c)->Aggr(sum(join_agg_0),firstrow(b.c))->DataScan(c)}(b.c,c.c)->Aggr(sum(join_agg_0))->Projection", }, { sql: "select sum(b.a) from t a left join t b on a.c = b.c", best: "Join{DataScan(a)->DataScan(b)->Aggr(sum(b.a),firstrow(b.c))}(a.c,b.c)->Aggr(sum(join_agg_0))->Projection", }, { sql: "select sum(a.a) from t a left join t b on a.c = b.c", best: "Join{DataScan(a)->Aggr(sum(a.a),firstrow(a.c))->DataScan(b)}(a.c,b.c)->Aggr(sum(join_agg_0))->Projection", }, { sql: "select sum(a.a) from t a right join t b on a.c = b.c", best: "Join{DataScan(a)->Aggr(sum(a.a),firstrow(a.c))->DataScan(b)}(a.c,b.c)->Aggr(sum(join_agg_0))->Projection", }, { sql: "select sum(a) from (select * from t) x", best: "DataScan(t)->Aggr(sum(test.t.a))->Projection", }, { sql: "select sum(c1) from (select c c1, d c2 from t a union all select a c1, b c2 from t b union all select b c1, e c2 from t c) x group by c2", best: "UnionAll{DataScan(a)->Aggr(sum(a.c),firstrow(a.d))->DataScan(b)->Aggr(sum(b.a),firstrow(b.b))->DataScan(c)->Aggr(sum(c.b),firstrow(c.e))}->Aggr(sum(join_agg_0))->Projection", }, { sql: "select max(a.b), max(b.b) from t a join t b on a.c = b.c group by a.a", best: "Join{DataScan(a)->DataScan(b)->Aggr(max(b.b),firstrow(b.c))}(a.c,b.c)->Projection->Projection", }, { sql: "select max(a.b), max(b.b) from t a join t b on a.a = b.a group by a.c", best: "Join{DataScan(a)->DataScan(b)}(a.a,b.a)->Aggr(max(a.b),max(b.b))->Projection", }, { sql: "select max(c.b) from (select * from t a union all select * from t b) c group by c.a", best: "UnionAll{DataScan(a)->Projection->Projection->DataScan(b)->Projection->Projection}->Aggr(max(join_agg_0))->Projection", }, { sql: "select max(a.c) from t a join t b on a.a=b.a and a.b=b.b group by a.b", best: "Join{DataScan(a)->DataScan(b)}(a.a,b.a)(a.b,b.b)->Aggr(max(a.c))->Projection", }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ allocator: new(idAllocator), ctx: mockContext(), colMapper: make(map[*ast.ColumnNameExpr]int), is: is, } p := builder.build(stmt) c.Assert(builder.err, IsNil) lp := p.(LogicalPlan) p, err = logicalOptimize(flagBuildKeyInfo|flagPredicatePushDown|flagPrunColumns|flagAggregationOptimize, lp.(LogicalPlan), builder.ctx, builder.allocator) lp.ResolveIndicesAndCorCols() c.Assert(err, IsNil) c.Assert(ToString(lp), Equals, ca.best, Commentf("for %s", ca.sql)) } } func (s *testPlanSuite) TestRefine(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string best string }{ { sql: "select a from t where c is not null", best: "Table(t)->Projection", }, { sql: "select a from t where c >= 4", best: "Index(t.c_d_e)[[4,+inf]]->Projection", }, { sql: "select a from t where c <= 4", best: "Index(t.c_d_e)[[-inf,4]]->Projection", }, { sql: "select a from t where c = 4 and d = 5 and e = 6", best: "Index(t.c_d_e)[[4 5 6,4 5 6]]->Projection", }, { sql: "select a from t where d = 4 and c = 5", best: "Index(t.c_d_e)[[5 4,5 4]]->Projection", }, { sql: "select a from t where c = 4 and e < 5", best: "Index(t.c_d_e)[[4,4]]->Projection", }, { sql: "select a from t where c = 4 and d <= 5 and d > 3", best: "Index(t.c_d_e)[(4 3 +inf,4 5 +inf]]->Projection", }, { sql: "select a from t where d <= 5 and d > 3", best: "Table(t)->Projection", }, { sql: "select a from t where c between 1 and 2", best: "Index(t.c_d_e)[[1,2]]->Projection", }, { sql: "select a from t where c not between 1 and 2", best: "Index(t.c_d_e)[[-inf <nil>,1 <nil>) (2 +inf,+inf +inf]]->Projection", }, { sql: "select a from t where c <= 5 and c >= 3 and d = 1", best: "Index(t.c_d_e)[[3,5]]->Projection", }, { sql: "select a from t where c = 1 or c = 2 or c = 3", best: "Index(t.c_d_e)[[1,1] [2,2] [3,3]]->Projection", }, { sql: "select b from t where c = 1 or c = 2 or c = 3 or c = 4 or c = 5", best: "Index(t.c_d_e)[[1,1] [2,2] [3,3] [4,4] [5,5]]->Projection", }, { sql: "select a from t where c = 5", best: "Index(t.c_d_e)[[5,5]]->Projection", }, { sql: "select a from t where c = 5 and b = 1", best: "Index(t.c_d_e)[[5,5]]->Projection", }, { sql: "select a from t where not a", best: "Table(t)->Projection", }, { sql: "select a from t where c in (1)", best: "Index(t.c_d_e)[[1,1]]->Projection", }, { sql: "select a from t where c in (1) and d > 3", best: "Index(t.c_d_e)[(1 3 +inf,1 +inf +inf]]->Projection", }, { sql: "select a from t where c in (1, 2, 3) and (d > 3 and d < 4 or d > 5 and d < 6)", best: "Index(t.c_d_e)[(1 3 +inf,1 4 <nil>) (1 5 +inf,1 6 <nil>) (2 3 +inf,2 4 <nil>) (2 5 +inf,2 6 <nil>) (3 3 +inf,3 4 <nil>) (3 5 +inf,3 6 <nil>)]->Projection", }, { sql: "select a from t where c in (1, 2, 3)", best: "Index(t.c_d_e)[[1,1] [2,2] [3,3]]->Projection", }, { sql: "select a from t where c in (1, 2, 3) and d in (1,2) and e = 1", best: "Index(t.c_d_e)[[1 1 1,1 1 1] [1 2 1,1 2 1] [2 1 1,2 1 1] [2 2 1,2 2 1] [3 1 1,3 1 1] [3 2 1,3 2 1]]->Projection", }, { sql: "select a from t where d in (1, 2, 3)", best: "Table(t)->Projection", }, { sql: "select a from t where c not in (1)", best: "Table(t)->Projection", }, { sql: "select a from t use index(c_d_e) where c != 1", best: "Index(t.c_d_e)[[-inf <nil>,1 <nil>) (1 +inf,+inf +inf]]->Projection", }, { sql: "select a from t where c_str like ''", best: "Index(t.c_d_e_str)[[,]]->Projection", }, { sql: "select a from t where c_str like 'abc'", best: "Index(t.c_d_e_str)[[abc,abc]]->Projection", }, { sql: "select a from t where c_str not like 'abc'", best: "Table(t)->Projection", }, { sql: "select a from t where not (c_str like 'abc' or c_str like 'abd')", best: "Table(t)->Projection", }, { sql: "select a from t where c_str like '_abc'", best: "Table(t)->Selection->Projection", }, { sql: "select a from t where c_str like 'abc%'", best: "Index(t.c_d_e_str)[[abc <nil>,abd <nil>)]->Projection", }, { sql: "select a from t where c_str like 'abc_'", best: "Index(t.c_d_e_str)[(abc +inf,abd <nil>)]->Selection->Projection", }, { sql: "select a from t where c_str like 'abc%af'", best: "Index(t.c_d_e_str)[[abc <nil>,abd <nil>)]->Selection->Projection", }, { sql: `select a from t where c_str like 'abc\\_' escape ''`, best: "Index(t.c_d_e_str)[[abc_,abc_]]->Projection", }, { sql: `select a from t where c_str like 'abc\\_'`, best: "Index(t.c_d_e_str)[[abc_,abc_]]->Projection", }, { sql: `select a from t where c_str like 'abc\\\\_'`, best: "Index(t.c_d_e_str)[(abc\\ +inf,abc] <nil>)]->Selection->Projection", }, { sql: `select a from t where c_str like 'abc\\_%'`, best: "Index(t.c_d_e_str)[[abc_ <nil>,abc` <nil>)]->Projection", }, { sql: `select a from t where c_str like 'abc=_%' escape '='`, best: "Index(t.c_d_e_str)[[abc_ <nil>,abc` <nil>)]->Projection", }, { sql: `select a from t where c_str like 'abc\\__'`, best: "Index(t.c_d_e_str)[(abc_ +inf,abc` <nil>)]->Selection->Projection", }, { // Check that 123 is converted to string '123'. index can be used. sql: `select a from t where c_str like 123`, best: "Index(t.c_d_e_str)[[123,123]]->Projection", }, { // c is not string type, added cast to string during InferType, no index can be used. sql: `select a from t where c like '1'`, best: "Table(t)->Selection->Projection", }, { sql: `select a from t where c = 1.1 and d > 3`, best: "Index(t.c_d_e)[]->Projection", }, { sql: `select a from t where c = 1.9 and d > 3`, best: "Index(t.c_d_e)[]->Projection", }, { sql: `select a from t where c < 1.1`, best: "Index(t.c_d_e)[[-inf,1]]->Projection", }, { sql: `select a from t where c <= 1.9`, best: "Index(t.c_d_e)[[-inf <nil>,2 <nil>)]->Projection", }, { sql: `select a from t where c >= 1.1`, best: "Index(t.c_d_e)[(1 +inf,+inf +inf]]->Projection", }, { sql: `select a from t where c > 1.9`, best: "Index(t.c_d_e)[[2,+inf]]->Projection", }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ allocator: new(idAllocator), ctx: mockContext(), is: is, } p := builder.build(stmt).(LogicalPlan) c.Assert(builder.err, IsNil) p, err = logicalOptimize(flagPredicatePushDown|flagPrunColumns, p.(LogicalPlan), builder.ctx, builder.allocator) info, err := p.convert2PhysicalPlan(&requiredProperty{}) c.Assert(err, IsNil) jsonPlan, _ := info.p.MarshalJSON() c.Assert(ToString(info.p), Equals, ca.best, Commentf("for %s, %s", ca.sql, string(jsonPlan))) } } func (s *testPlanSuite) TestColumnPruning(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string ans map[string][]string }{ { sql: "select count(*) from t group by a", ans: map[string][]string{ "TableScan_1": {"a"}, }, }, { sql: "select count(*) from t", ans: map[string][]string{ "TableScan_1": {}, }, }, { sql: "select count(*) from t a join t b where a.a < 1", ans: map[string][]string{ "TableScan_1": {"a"}, "TableScan_2": {}, }, }, { sql: "select count(*) from t a join t b on a.a = b.d", ans: map[string][]string{ "TableScan_1": {"a"}, "TableScan_2": {"d"}, }, }, { sql: "select count(*) from t a join t b on a.a = b.d order by sum(a.d)", ans: map[string][]string{ "TableScan_1": {"a", "d"}, "TableScan_2": {"d"}, }, }, { sql: "select count(b.a) from t a join t b on a.a = b.d group by b.b order by sum(a.d)", ans: map[string][]string{ "TableScan_1": {"a", "d"}, "TableScan_2": {"a", "b", "d"}, }, }, { sql: "select * from (select count(b.a) from t a join t b on a.a = b.d group by b.b having sum(a.d) < 0) tt", ans: map[string][]string{ "TableScan_1": {"a", "d"}, "TableScan_2": {"a", "b", "d"}, }, }, { sql: "select (select count(a) from t where b = k.a) from t k", ans: map[string][]string{ "TableScan_1": {"a"}, "TableScan_3": {"a", "b"}, }, }, { sql: "select exists (select count(*) from t where b = k.a) from t k", ans: map[string][]string{ "TableScan_1": {}, }, }, { sql: "select b = (select count(*) from t where b = k.a) from t k", ans: map[string][]string{ "TableScan_1": {"a", "b"}, "TableScan_3": {"b"}, }, }, { sql: "select exists (select count(a) from t where b = k.a group by b) from t k", ans: map[string][]string{ "TableScan_1": {"a"}, "TableScan_3": {"b"}, }, }, { sql: "select a as c1, b as c2 from t order by 1, c1 + c2 + c", ans: map[string][]string{ "TableScan_1": {"a", "b", "c"}, }, }, { sql: "select a from t where b < any (select c from t)", ans: map[string][]string{ "TableScan_1": {"a", "b"}, "TableScan_3": {"c"}, }, }, { sql: "select a from t where (b,a) != all (select c,d from t)", ans: map[string][]string{ "TableScan_1": {"a", "b"}, "TableScan_3": {"c", "d"}, }, }, { sql: "select a from t where (b,a) in (select c,d from t)", ans: map[string][]string{ "TableScan_1": {"a", "b"}, "TableScan_3": {"c", "d"}, }, }, { sql: "select a from t where a in (select a from t s group by t.b)", ans: map[string][]string{ "TableScan_1": {"a"}, "TableScan_3": {"a"}, }, }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil, comment) builder := &planBuilder{ colMapper: make(map[*ast.ColumnNameExpr]int), allocator: new(idAllocator), ctx: mockContext(), is: is, } p := builder.build(stmt).(LogicalPlan) c.Assert(builder.err, IsNil, comment) p, err = logicalOptimize(flagPredicatePushDown|flagPrunColumns, p.(LogicalPlan), builder.ctx, builder.allocator) c.Assert(err, IsNil) checkDataSourceCols(p, c, ca.ans, comment) } } func (s *testPlanSuite) TestAllocID(c *C) { ctx := mockContext() pA := DataSource{}.init(new(idAllocator), ctx) pB := DataSource{}.init(new(idAllocator), ctx) c.Assert(pA.id, Equals, pB.id) } func checkDataSourceCols(p Plan, c *C, ans map[string][]string, comment CommentInterface) { switch p.(type) { case *DataSource: colList, ok := ans[p.ID()] c.Assert(ok, IsTrue, comment) for i, colName := range colList { c.Assert(colName, Equals, p.Schema().Columns[i].ColName.L, comment) } } for _, child := range p.Children() { checkDataSourceCols(child, c, ans, comment) } } func (s *testPlanSuite) TestValidate(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string err *terror.Error }{ { sql: "select date_format((1,2), '%H');", err: ErrOperandColumns, }, { sql: "select cast((1,2) as date)", err: ErrOperandColumns, }, { sql: "select (1,2) between (3,4) and (5,6)", err: ErrOperandColumns, }, { sql: "select (1,2) rlike '1'", err: ErrOperandColumns, }, { sql: "select (1,2) like '1'", err: ErrOperandColumns, }, { sql: "select case(1,2) when(1,2) then true end", err: ErrOperandColumns, }, { sql: "select (1,2) in ((3,4),(5,6))", err: nil, }, { sql: "select row(1,(2,3)) in (select a,b from t)", err: ErrOperandColumns, }, { sql: "select row(1,2) in (select a,b from t)", err: nil, }, { sql: "select (1,2) in ((3,4),5)", err: ErrOperandColumns, }, { sql: "select (1,2) is true", err: ErrOperandColumns, }, { sql: "select (1,2) is null", err: ErrOperandColumns, }, { sql: "select (+(1,2))=(1,2)", err: nil, }, { sql: "select (-(1,2))=(1,2)", err: ErrOperandColumns, }, { sql: "select (1,2)||(1,2)", err: ErrOperandColumns, }, { sql: "select (1,2) < (3,4)", err: nil, }, { sql: "select (1,2) < 3", err: ErrOperandColumns, }, { sql: "select 1, * from t", err: ErrInvalidWildCard, }, { sql: "select *, 1 from t", err: nil, }, { sql: "select 1, t.* from t", err: nil, }, } for _, ca := range cases { sql := ca.sql comment := Commentf("for %s", sql) stmt, err := s.ParseOneStmt(sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ allocator: new(idAllocator), ctx: mockContext(), colMapper: make(map[*ast.ColumnNameExpr]int), is: is, } builder.build(stmt) if ca.err == nil { c.Assert(builder.err, IsNil, comment) } else { c.Assert(ca.err.Equal(builder.err), IsTrue, comment) } } } func checkUniqueKeys(p Plan, c *C, ans map[string][][]string, sql string) { keyList, ok := ans[p.ID()] c.Assert(ok, IsTrue, Commentf("for %s, %v not found", sql, p.ID())) c.Assert(len(p.Schema().Keys), Equals, len(keyList), Commentf("for %s, %v, the number of key doesn't match, the schema is %s", sql, p.ID(), p.Schema())) for i, key := range keyList { c.Assert(len(key), Equals, len(p.Schema().Keys[i]), Commentf("for %s, %v %v, the number of column doesn't match", sql, p.ID(), key)) for j, colName := range key { c.Assert(colName, Equals, p.Schema().Keys[i][j].String(), Commentf("for %s, %v %v, column dosen't match", sql, p.ID(), key)) } } for _, child := range p.Children() { checkUniqueKeys(child, c, ans, sql) } } func (s *testPlanSuite) TestUniqueKeyInfo(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string ans map[string][][]string }{ { sql: "select a, sum(e) from t group by b", ans: map[string][][]string{ "TableScan_1": {{"test.t.a"}}, "Aggregation_2": {{"test.t.a"}}, "Projection_3": {{"a"}}, }, }, { sql: "select a, b, sum(f) from t group by b", ans: map[string][][]string{ "TableScan_1": {{"test.t.f"}, {"test.t.a"}}, "Aggregation_2": {{"test.t.a"}, {"test.t.b"}}, "Projection_3": {{"a"}, {"b"}}, }, }, { sql: "select c, d, e, sum(a) from t group by c, d, e", ans: map[string][][]string{ "TableScan_1": {{"test.t.a"}}, "Aggregation_2": {{"test.t.c", "test.t.d", "test.t.e"}}, "Projection_3": {{"c", "d", "e"}}, }, }, { sql: "select f, g, sum(a) from t", ans: map[string][][]string{ "TableScan_1": {{"test.t.f"}, {"test.t.g"}, {"test.t.f", "test.t.g"}, {"test.t.a"}}, "Aggregation_2": {{"test.t.f"}, {"test.t.g"}, {"test.t.f", "test.t.g"}}, "Projection_3": {{"f"}, {"g"}, {"f", "g"}}, }, }, { sql: "select * from t t1 join t t2 on t1.a = t2.e", ans: map[string][][]string{ "TableScan_1": {{"t1.f"}, {"t1.g"}, {"t1.f", "t1.g"}, {"t1.a"}}, "TableScan_2": {{"t2.f"}, {"t2.g"}, {"t2.f", "t2.g"}, {"t2.a"}}, "Join_3": {{"t2.f"}, {"t2.g"}, {"t2.f", "t2.g"}, {"t2.a"}}, "Projection_4": {{"t2.f"}, {"t2.g"}, {"t2.f", "t2.g"}, {"t2.a"}}, }, }, { sql: "select f from t having sum(a) > 0", ans: map[string][][]string{ "TableScan_1": {{"test.t.f"}, {"test.t.a"}}, "Aggregation_2": {{"test.t.f"}}, "Selection_6": {{"test.t.f"}}, "Projection_3": {{"f"}}, "Projection_5": {{"f"}}, }, }, { sql: "select * from t t1 left join t t2 on t1.a = t2.a", ans: map[string][][]string{ "TableScan_1": {{"t1.f"}, {"t1.g"}, {"t1.f", "t1.g"}, {"t1.a"}}, "TableScan_2": {{"t2.f"}, {"t2.g"}, {"t2.f", "t2.g"}, {"t2.a"}}, "Join_3": {{"t1.f"}, {"t1.g"}, {"t1.f", "t1.g"}, {"t1.a"}}, "Projection_4": {{"t1.f"}, {"t1.g"}, {"t1.f", "t1.g"}, {"t1.a"}}, }, }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ colMapper: make(map[*ast.ColumnNameExpr]int), allocator: new(idAllocator), ctx: mockContext(), is: is, } p := builder.build(stmt).(LogicalPlan) c.Assert(builder.err, IsNil, comment) p, err = logicalOptimize(flagPredicatePushDown|flagPrunColumns|flagBuildKeyInfo, p.(LogicalPlan), builder.ctx, builder.allocator) checkUniqueKeys(p, c, ca.ans, ca.sql) } } func (s *testPlanSuite) TestAggPrune(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string best string }{ { sql: "select a, count(b) from t group by a", best: "DataScan(t)->Projection->Projection", }, { sql: "select sum(b) from t group by c, d, e", best: "DataScan(t)->Aggr(sum(test.t.b))->Projection", }, { sql: "select t1.a, count(t2.b) from t t1, t t2 where t1.a = t2.a group by t1.a", best: "Join{DataScan(t1)->DataScan(t2)}(t1.a,t2.a)->Projection->Projection", }, { sql: "select tt.a, sum(tt.b) from (select a, b from t) tt group by tt.a", best: "DataScan(t)->Projection->Projection->Projection", }, { sql: "select count(1) from (select count(1), a as b from t group by a) tt group by b", best: "DataScan(t)->Projection->Projection->Projection->Projection", }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ allocator: new(idAllocator), ctx: mockContext(), is: is, } p := builder.build(stmt).(LogicalPlan) c.Assert(builder.err, IsNil) p, err = logicalOptimize(flagPredicatePushDown|flagPrunColumns|flagBuildKeyInfo|flagAggregationOptimize, p.(LogicalPlan), builder.ctx, builder.allocator) c.Assert(err, IsNil) c.Assert(ToString(p), Equals, ca.best, comment) } } func (s *testPlanSuite) TestVisitInfo(c *C) { defer testleak.AfterTest(c)() cases := []struct { sql string ans []visitInfo }{ { sql: "insert into t values (1)", ans: []visitInfo{ {mysql.InsertPriv, "test", "t", ""}, }, }, { sql: "delete from t where a = 1", ans: []visitInfo{ {mysql.DeletePriv, "test", "t", ""}, {mysql.SelectPriv, "test", "t", ""}, }, }, { sql: "delete from a1 using t as a1 inner join t as a2 where a1.a = a2.a", ans: []visitInfo{ {mysql.DeletePriv, "test", "t", ""}, {mysql.SelectPriv, "test", "t", ""}, }, }, { sql: "update t set a = 7 where a = 1", ans: []visitInfo{ {mysql.UpdatePriv, "test", "t", ""}, {mysql.SelectPriv, "test", "t", ""}, }, }, { sql: "update t, (select * from t) a1 set t.a = a1.a;", ans: []visitInfo{ {mysql.UpdatePriv, "test", "t", ""}, {mysql.SelectPriv, "test", "t", ""}, }, }, { sql: "select a, sum(e) from t group by a", ans: []visitInfo{ {mysql.SelectPriv, "test", "t", ""}, }, }, { sql: "truncate table t", ans: []visitInfo{ {mysql.DeletePriv, "test", "t", ""}, }, }, { sql: "drop table t", ans: []visitInfo{ {mysql.DropPriv, "test", "t", ""}, }, }, { sql: "create table t (a int)", ans: []visitInfo{ {mysql.CreatePriv, "test", "t", ""}, }, }, { sql: "create table t1 like t", ans: []visitInfo{ {mysql.CreatePriv, "test", "t1", ""}, {mysql.SelectPriv, "test", "t", ""}, }, }, { sql: "create database test", ans: []visitInfo{ {mysql.CreatePriv, "test", "", ""}, }, }, { sql: "drop database test", ans: []visitInfo{ {mysql.DropPriv, "test", "", ""}, }, }, { sql: "create index t_1 on t (a)", ans: []visitInfo{ {mysql.IndexPriv, "test", "t", ""}, }, }, { sql: "drop index e on t", ans: []visitInfo{ {mysql.IndexPriv, "test", "t", ""}, }, }, { sql: `create user 'test'@'%' identified by '123456'`, ans: []visitInfo{ {mysql.CreateUserPriv, "", "", ""}, }, }, { sql: `drop user 'test'@'%'`, ans: []visitInfo{ {mysql.CreateUserPriv, "", "", ""}, }, }, { sql: `grant all privileges on test.* to 'test'@'%'`, ans: []visitInfo{ {mysql.SelectPriv, "test", "", ""}, {mysql.InsertPriv, "test", "", ""}, {mysql.UpdatePriv, "test", "", ""}, {mysql.DeletePriv, "test", "", ""}, {mysql.CreatePriv, "test", "", ""}, {mysql.DropPriv, "test", "", ""}, {mysql.GrantPriv, "test", "", ""}, {mysql.AlterPriv, "test", "", ""}, {mysql.ExecutePriv, "test", "", ""}, {mysql.IndexPriv, "test", "", ""}, }, }, { sql: `grant select on test.ttt to 'test'@'%'`, ans: []visitInfo{ {mysql.SelectPriv, "test", "ttt", ""}, {mysql.GrantPriv, "test", "ttt", ""}, }, }, { sql: `revoke all privileges on *.* from 'test'@'%'`, ans: []visitInfo{ {mysql.SuperPriv, "", "", ""}, }, }, { sql: `set password for 'root'@'%' = 'xxxxx'`, ans: []visitInfo{ {mysql.SuperPriv, "", "", ""}, }, }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ colMapper: make(map[*ast.ColumnNameExpr]int), allocator: new(idAllocator), ctx: mockContext(), is: is, } builder.build(stmt) c.Assert(builder.err, IsNil, comment) checkVisitInfo(c, builder.visitInfo, ca.ans, comment) } } type visitInfoArray []visitInfo func (v visitInfoArray) Len() int { return len(v) } func (v visitInfoArray) Less(i, j int) bool { if v[i].privilege < v[j].privilege { return true } if v[i].db < v[j].db { return true } if v[i].table < v[j].table { return true } if v[i].column < v[j].column { return true } return false } func (v visitInfoArray) Swap(i, j int) { v[i], v[j] = v[j], v[i] } func unique(v []visitInfo) []visitInfo { repeat := 0 for i := 1; i < len(v); i++ { if v[i] == v[i-1] { repeat++ } else { v[i-repeat] = v[i] } } return v[:len(v)-repeat] } func checkVisitInfo(c *C, v1, v2 []visitInfo, comment CommentInterface) { sort.Sort(visitInfoArray(v1)) sort.Sort(visitInfoArray(v2)) v1 = unique(v1) v2 = unique(v2) c.Assert(len(v1), Equals, len(v2), comment) for i := 0; i < len(v1); i++ { c.Assert(v1[i], Equals, v2[i], comment) } } func (s *testPlanSuite) TestTopNPushDown(c *C) { UseDAGPlanBuilder = true defer func() { testleak.AfterTest(c)() UseDAGPlanBuilder = false }() cases := []struct { sql string best string }{ // Test TopN + Selection. { sql: "select * from t where a < 1 order by b limit 5", best: "DataScan(t)->Selection->Sort + Limit(5) + Offset(0)->Projection", }, // Test Limit + Selection. { sql: "select * from t where a < 1 limit 5", best: "DataScan(t)->Selection->Limit->Projection", }, // Test Limit + Agg + Proj . { sql: "select a, count(b) from t group by b limit 5", best: "DataScan(t)->Aggr(count(test.t.b),firstrow(test.t.a))->Limit->Projection", }, // Test TopN + Agg + Proj . { sql: "select a, count(b) from t group by b order by c limit 5", best: "DataScan(t)->Aggr(count(test.t.b),firstrow(test.t.a),firstrow(test.t.c))->Sort + Limit(5) + Offset(0)->Projection->Projection", }, // Test TopN + Join + Proj. { sql: "select * from t, t s order by t.a limit 5", best: "Join{DataScan(t)->DataScan(s)}->Sort + Limit(5) + Offset(0)->Projection", }, // Test Limit + Join + Proj. { sql: "select * from t, t s limit 5", best: "Join{DataScan(t)->DataScan(s)}->Limit->Projection", }, // Test TopN + Left Join + Proj. { sql: "select * from t left outer join t s on t.a = s.a order by t.a limit 5", best: "Join{DataScan(t)->Sort + Limit(5) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection", }, // Test Limit + Left Join + Proj. { sql: "select * from t left outer join t s on t.a = s.a limit 5", best: "Join{DataScan(t)->Limit->DataScan(s)}(test.t.a,s.a)->Limit->Projection", }, // Test Limit + Left Join Apply + Proj. { sql: "select (select s.a from t s where t.a = s.a) from t limit 5", best: "Join{DataScan(t)->Limit->DataScan(s)}(test.t.a,s.a)->Limit->Projection->Projection", }, // Test TopN + Left Join Apply + Proj. { sql: "select (select s.a from t s where t.a = s.a) from t order by t.a limit 5", best: "Join{DataScan(t)->Sort + Limit(5) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection->Projection->Projection", }, // Test TopN + Left Semi Join Apply + Proj. { sql: "select exists (select s.a from t s where t.a = s.a) from t order by t.a limit 5", best: "Join{DataScan(t)->Sort + Limit(5) + Offset(0)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection->Projection", }, // Test TopN + Semi Join Apply + Proj. { sql: "select * from t where exists (select s.a from t s where t.a = s.a) order by t.a limit 5", best: "Join{DataScan(t)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection", }, // Test TopN + Right Join + Proj. { sql: "select * from t right outer join t s on t.a = s.a order by s.a limit 5", best: "Join{DataScan(t)->DataScan(s)->Sort + Limit(5) + Offset(0)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection", }, // Test Limit + Right Join + Proj. { sql: "select * from t right outer join t s on t.a = s.a order by s.a,t.b limit 5", best: "Join{DataScan(t)->DataScan(s)}(test.t.a,s.a)->Sort + Limit(5) + Offset(0)->Projection", }, // Test TopN + UA + Proj. { sql: "select * from t union all (select * from t s) order by a,b limit 5", best: "UnionAll{DataScan(t)->Sort + Limit(5) + Offset(0)->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection}->Sort + Limit(5) + Offset(0)", }, // Test Limit + UA + Proj + Sort. { sql: "select * from t union all (select * from t s order by a) limit 5", best: "UnionAll{DataScan(t)->Limit->Projection->DataScan(s)->Sort + Limit(5) + Offset(0)->Projection->Projection}->Limit", }, } for _, ca := range cases { comment := Commentf("for %s", ca.sql) stmt, err := s.ParseOneStmt(ca.sql, "", "") c.Assert(err, IsNil, comment) is, err := mockResolve(stmt) c.Assert(err, IsNil) builder := &planBuilder{ allocator: new(idAllocator), ctx: mockContext(), is: is, colMapper: make(map[*ast.ColumnNameExpr]int), } p := builder.build(stmt).(LogicalPlan) c.Assert(builder.err, IsNil) p, err = logicalOptimize(builder.optFlag, p.(LogicalPlan), builder.ctx, builder.allocator) c.Assert(err, IsNil) c.Assert(ToString(p), Equals, ca.best, comment) } }
plan/logical_plan_test.go
1
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.0024060590658336878, 0.0002847309224307537, 0.00015686354890931398, 0.00017448593280278146, 0.0004009062540717423 ]
{ "id": 3, "code_window": [ "\t\t}\n", "\t}\n", "\tnewTopN := Sort{}.init(topN.allocator, topN.ctx)\n", "\tif canPush {\n", "\t\tif !topN.isEmpty() {\n", "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count}\n", "\t\t}\n", "\t\tnewTopN.ByItems = make([]*ByItems, len(topN.ByItems))\n", "\t\tcopy(newTopN.ByItems, topN.ByItems)\n", "\t}\n", "\treturn p.children[idx].(LogicalPlan).pushDownTopN(newTopN)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count + topN.ExecLimit.Offset}\n" ], "file_path": "plan/topn_push_down.go", "type": "replace", "edit_start_line_idx": 122 }
// Code generated by protoc-gen-gogo. // source: metapb.proto // DO NOT EDIT! /* Package metapb is a generated protocol buffer package. It is generated from these files: metapb.proto It has these top-level messages: Cluster StoreLabel Store RegionEpoch Region Peer */ package metapb import ( "fmt" "io" "math" proto "github.com/golang/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type StoreState int32 const ( StoreState_Up StoreState = 0 StoreState_Offline StoreState = 1 StoreState_Tombstone StoreState = 2 ) var StoreState_name = map[int32]string{ 0: "Up", 1: "Offline", 2: "Tombstone", } var StoreState_value = map[string]int32{ "Up": 0, "Offline": 1, "Tombstone": 2, } func (x StoreState) Enum() *StoreState { p := new(StoreState) *p = x return p } func (x StoreState) String() string { return proto.EnumName(StoreState_name, int32(x)) } func (x *StoreState) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(StoreState_value, data, "StoreState") if err != nil { return err } *x = StoreState(value) return nil } func (StoreState) EnumDescriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{0} } type Cluster struct { Id uint64 `protobuf:"varint,1,opt,name=id" json:"id"` // max peer count for a region. // pd will do the auto-balance if region peer count mismatches. MaxPeerCount uint32 `protobuf:"varint,2,opt,name=max_peer_count,json=maxPeerCount" json:"max_peer_count"` XXX_unrecognized []byte `json:"-"` } func (m *Cluster) Reset() { *m = Cluster{} } func (m *Cluster) String() string { return proto.CompactTextString(m) } func (*Cluster) ProtoMessage() {} func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{0} } func (m *Cluster) GetId() uint64 { if m != nil { return m.Id } return 0 } func (m *Cluster) GetMaxPeerCount() uint32 { if m != nil { return m.MaxPeerCount } return 0 } // Case insensitive key/value for replica constraints. type StoreLabel struct { Key string `protobuf:"bytes,1,opt,name=key" json:"key"` Value string `protobuf:"bytes,2,opt,name=value" json:"value"` XXX_unrecognized []byte `json:"-"` } func (m *StoreLabel) Reset() { *m = StoreLabel{} } func (m *StoreLabel) String() string { return proto.CompactTextString(m) } func (*StoreLabel) ProtoMessage() {} func (*StoreLabel) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{1} } func (m *StoreLabel) GetKey() string { if m != nil { return m.Key } return "" } func (m *StoreLabel) GetValue() string { if m != nil { return m.Value } return "" } type Store struct { Id uint64 `protobuf:"varint,1,opt,name=id" json:"id"` Address string `protobuf:"bytes,2,opt,name=address" json:"address"` State StoreState `protobuf:"varint,3,opt,name=state,enum=metapb.StoreState" json:"state"` Labels []*StoreLabel `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Store) Reset() { *m = Store{} } func (m *Store) String() string { return proto.CompactTextString(m) } func (*Store) ProtoMessage() {} func (*Store) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{2} } func (m *Store) GetId() uint64 { if m != nil { return m.Id } return 0 } func (m *Store) GetAddress() string { if m != nil { return m.Address } return "" } func (m *Store) GetState() StoreState { if m != nil { return m.State } return StoreState_Up } func (m *Store) GetLabels() []*StoreLabel { if m != nil { return m.Labels } return nil } type RegionEpoch struct { // Conf change version, auto increment when add or remove peer ConfVer uint64 `protobuf:"varint,1,opt,name=conf_ver,json=confVer" json:"conf_ver"` // Region version, auto increment when split or merge Version uint64 `protobuf:"varint,2,opt,name=version" json:"version"` XXX_unrecognized []byte `json:"-"` } func (m *RegionEpoch) Reset() { *m = RegionEpoch{} } func (m *RegionEpoch) String() string { return proto.CompactTextString(m) } func (*RegionEpoch) ProtoMessage() {} func (*RegionEpoch) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{3} } func (m *RegionEpoch) GetConfVer() uint64 { if m != nil { return m.ConfVer } return 0 } func (m *RegionEpoch) GetVersion() uint64 { if m != nil { return m.Version } return 0 } type Region struct { Id uint64 `protobuf:"varint,1,opt,name=id" json:"id"` // Region key range [start_key, end_key). StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey" json:"start_key,omitempty"` EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey" json:"end_key,omitempty"` RegionEpoch *RegionEpoch `protobuf:"bytes,4,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` Peers []*Peer `protobuf:"bytes,5,rep,name=peers" json:"peers,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Region) Reset() { *m = Region{} } func (m *Region) String() string { return proto.CompactTextString(m) } func (*Region) ProtoMessage() {} func (*Region) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{4} } func (m *Region) GetId() uint64 { if m != nil { return m.Id } return 0 } func (m *Region) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *Region) GetEndKey() []byte { if m != nil { return m.EndKey } return nil } func (m *Region) GetRegionEpoch() *RegionEpoch { if m != nil { return m.RegionEpoch } return nil } func (m *Region) GetPeers() []*Peer { if m != nil { return m.Peers } return nil } type Peer struct { Id uint64 `protobuf:"varint,1,opt,name=id" json:"id"` StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId" json:"store_id"` XXX_unrecognized []byte `json:"-"` } func (m *Peer) Reset() { *m = Peer{} } func (m *Peer) String() string { return proto.CompactTextString(m) } func (*Peer) ProtoMessage() {} func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptorMetapb, []int{5} } func (m *Peer) GetId() uint64 { if m != nil { return m.Id } return 0 } func (m *Peer) GetStoreId() uint64 { if m != nil { return m.StoreId } return 0 } func init() { proto.RegisterType((*Cluster)(nil), "metapb.Cluster") proto.RegisterType((*StoreLabel)(nil), "metapb.StoreLabel") proto.RegisterType((*Store)(nil), "metapb.Store") proto.RegisterType((*RegionEpoch)(nil), "metapb.RegionEpoch") proto.RegisterType((*Region)(nil), "metapb.Region") proto.RegisterType((*Peer)(nil), "metapb.Peer") proto.RegisterEnum("metapb.StoreState", StoreState_name, StoreState_value) } func (m *Cluster) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0x8 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.Id)) dAtA[i] = 0x10 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.MaxPeerCount)) if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *StoreLabel) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *StoreLabel) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintMetapb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) dAtA[i] = 0x12 i++ i = encodeVarintMetapb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *Store) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Store) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0x8 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.Id)) dAtA[i] = 0x12 i++ i = encodeVarintMetapb(dAtA, i, uint64(len(m.Address))) i += copy(dAtA[i:], m.Address) dAtA[i] = 0x18 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.State)) if len(m.Labels) > 0 { for _, msg := range m.Labels { dAtA[i] = 0x22 i++ i = encodeVarintMetapb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RegionEpoch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RegionEpoch) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0x8 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.ConfVer)) dAtA[i] = 0x10 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.Version)) if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *Region) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Region) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0x8 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.Id)) if m.StartKey != nil { dAtA[i] = 0x12 i++ i = encodeVarintMetapb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if m.EndKey != nil { dAtA[i] = 0x1a i++ i = encodeVarintMetapb(dAtA, i, uint64(len(m.EndKey))) i += copy(dAtA[i:], m.EndKey) } if m.RegionEpoch != nil { dAtA[i] = 0x22 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.RegionEpoch.Size())) n1, err := m.RegionEpoch.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 } if len(m.Peers) > 0 { for _, msg := range m.Peers { dAtA[i] = 0x2a i++ i = encodeVarintMetapb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *Peer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Peer) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0x8 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.Id)) dAtA[i] = 0x10 i++ i = encodeVarintMetapb(dAtA, i, uint64(m.StoreId)) if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func encodeFixed64Metapb(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) dAtA[offset+2] = uint8(v >> 16) dAtA[offset+3] = uint8(v >> 24) dAtA[offset+4] = uint8(v >> 32) dAtA[offset+5] = uint8(v >> 40) dAtA[offset+6] = uint8(v >> 48) dAtA[offset+7] = uint8(v >> 56) return offset + 8 } func encodeFixed32Metapb(dAtA []byte, offset int, v uint32) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) dAtA[offset+2] = uint8(v >> 16) dAtA[offset+3] = uint8(v >> 24) return offset + 4 } func encodeVarintMetapb(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *Cluster) Size() (n int) { var l int _ = l n += 1 + sovMetapb(uint64(m.Id)) n += 1 + sovMetapb(uint64(m.MaxPeerCount)) if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *StoreLabel) Size() (n int) { var l int _ = l l = len(m.Key) n += 1 + l + sovMetapb(uint64(l)) l = len(m.Value) n += 1 + l + sovMetapb(uint64(l)) if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *Store) Size() (n int) { var l int _ = l n += 1 + sovMetapb(uint64(m.Id)) l = len(m.Address) n += 1 + l + sovMetapb(uint64(l)) n += 1 + sovMetapb(uint64(m.State)) if len(m.Labels) > 0 { for _, e := range m.Labels { l = e.Size() n += 1 + l + sovMetapb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RegionEpoch) Size() (n int) { var l int _ = l n += 1 + sovMetapb(uint64(m.ConfVer)) n += 1 + sovMetapb(uint64(m.Version)) if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *Region) Size() (n int) { var l int _ = l n += 1 + sovMetapb(uint64(m.Id)) if m.StartKey != nil { l = len(m.StartKey) n += 1 + l + sovMetapb(uint64(l)) } if m.EndKey != nil { l = len(m.EndKey) n += 1 + l + sovMetapb(uint64(l)) } if m.RegionEpoch != nil { l = m.RegionEpoch.Size() n += 1 + l + sovMetapb(uint64(l)) } if len(m.Peers) > 0 { for _, e := range m.Peers { l = e.Size() n += 1 + l + sovMetapb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *Peer) Size() (n int) { var l int _ = l n += 1 + sovMetapb(uint64(m.Id)) n += 1 + sovMetapb(uint64(m.StoreId)) if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovMetapb(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozMetapb(x uint64) (n int) { return sovMetapb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *Cluster) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Cluster: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } m.Id = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Id |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxPeerCount", wireType) } m.MaxPeerCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MaxPeerCount |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipMetapb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMetapb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *StoreLabel) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: StoreLabel: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: StoreLabel: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMetapb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMetapb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetapb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMetapb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Store) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Store: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Store: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } m.Id = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Id |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMetapb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.State |= (StoreState(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthMetapb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Labels = append(m.Labels, &StoreLabel{}) if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetapb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMetapb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RegionEpoch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RegionEpoch: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RegionEpoch: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ConfVer", wireType) } m.ConfVer = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ConfVer |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Version |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipMetapb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMetapb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Region) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Region: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Region: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } m.Id = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Id |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthMetapb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthMetapb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) if m.EndKey == nil { m.EndKey = []byte{} } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthMetapb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionEpoch == nil { m.RegionEpoch = &RegionEpoch{} } if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Peers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthMetapb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Peers = append(m.Peers, &Peer{}) if err := m.Peers[len(m.Peers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetapb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMetapb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Peer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Peer: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } m.Id = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Id |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StoreId", wireType) } m.StoreId = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetapb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StoreId |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipMetapb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMetapb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipMetapb(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowMetapb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowMetapb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowMetapb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthMetapb } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowMetapb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipMetapb(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthMetapb = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowMetapb = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("metapb.proto", fileDescriptorMetapb) } var fileDescriptorMetapb = []byte{ // 444 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x52, 0xcd, 0x6e, 0xd3, 0x40, 0x10, 0xce, 0x3a, 0xfe, 0x49, 0xc6, 0x6e, 0x15, 0x2d, 0x15, 0x58, 0x45, 0x72, 0x2c, 0x9f, 0xac, 0x1c, 0x02, 0xea, 0x81, 0x1b, 0x12, 0x6a, 0xc5, 0x01, 0x15, 0x01, 0x72, 0x81, 0xab, 0xe5, 0xc4, 0x93, 0x60, 0xe1, 0xec, 0x5a, 0xbb, 0x9b, 0xa8, 0x7d, 0x13, 0x78, 0x0b, 0x1e, 0xa3, 0x47, 0x9e, 0x00, 0xa1, 0xf0, 0x22, 0x68, 0xd7, 0x36, 0x6a, 0x90, 0x72, 0xf3, 0x7c, 0xdf, 0xcc, 0xe7, 0xef, 0x9b, 0x59, 0x08, 0x36, 0xa8, 0x8a, 0x66, 0x31, 0x6f, 0x04, 0x57, 0x9c, 0xba, 0x6d, 0x75, 0x7e, 0xb6, 0xe6, 0x6b, 0x6e, 0xa0, 0x67, 0xfa, 0xab, 0x65, 0x93, 0x6b, 0xf0, 0xae, 0xea, 0xad, 0x54, 0x28, 0xe8, 0x19, 0x58, 0x55, 0x19, 0x92, 0x98, 0xa4, 0xf6, 0xa5, 0x7d, 0xff, 0x6b, 0x3a, 0xc8, 0xac, 0xaa, 0xa4, 0x33, 0x38, 0xdd, 0x14, 0xb7, 0x79, 0x83, 0x28, 0xf2, 0x25, 0xdf, 0x32, 0x15, 0x5a, 0x31, 0x49, 0x4f, 0xba, 0x8e, 0x60, 0x53, 0xdc, 0x7e, 0x40, 0x14, 0x57, 0x9a, 0x49, 0x5e, 0x01, 0xdc, 0x28, 0x2e, 0xf0, 0x6d, 0xb1, 0xc0, 0x9a, 0x3e, 0x86, 0xe1, 0x57, 0xbc, 0x33, 0x82, 0xe3, 0xae, 0x5d, 0x03, 0xf4, 0x1c, 0x9c, 0x5d, 0x51, 0x6f, 0xd1, 0x08, 0xf5, 0x4c, 0x0b, 0x25, 0xdf, 0x09, 0x38, 0x46, 0xe2, 0x88, 0x9b, 0x08, 0xbc, 0xa2, 0x2c, 0x05, 0x4a, 0x79, 0x30, 0xdd, 0x83, 0x74, 0x0e, 0x8e, 0x54, 0x85, 0xc2, 0x70, 0x18, 0x93, 0xf4, 0xf4, 0x82, 0xce, 0xbb, 0x55, 0x18, 0xcd, 0x1b, 0xcd, 0xf4, 0xff, 0x33, 0x6d, 0x74, 0x06, 0x6e, 0xad, 0xcd, 0xca, 0xd0, 0x8e, 0x87, 0xa9, 0xff, 0xdf, 0x80, 0xc9, 0x91, 0x75, 0x1d, 0xc9, 0x3b, 0xf0, 0x33, 0x5c, 0x57, 0x9c, 0xbd, 0x6e, 0xf8, 0xf2, 0x0b, 0x9d, 0xc2, 0x68, 0xc9, 0xd9, 0x2a, 0xdf, 0xa1, 0x38, 0xb0, 0xe9, 0x69, 0xf4, 0x33, 0x0a, 0xed, 0x75, 0x87, 0x42, 0x56, 0x9c, 0x19, 0xaf, 0xff, 0xf8, 0x0e, 0x4c, 0x7e, 0x10, 0x70, 0x5b, 0xc1, 0x23, 0x61, 0x9f, 0xc2, 0x58, 0xaa, 0x42, 0xa8, 0x5c, 0xaf, 0x51, 0x4b, 0x04, 0xd9, 0xc8, 0x00, 0xd7, 0x78, 0x47, 0x9f, 0x80, 0x87, 0xac, 0x34, 0xd4, 0xd0, 0x50, 0x2e, 0xb2, 0x52, 0x13, 0x2f, 0x20, 0x10, 0x46, 0x35, 0x47, 0xed, 0x33, 0xb4, 0x63, 0x92, 0xfa, 0x17, 0x8f, 0xfa, 0x60, 0x0f, 0x22, 0x64, 0xbe, 0x78, 0x90, 0x27, 0x01, 0x47, 0x1f, 0x59, 0x86, 0x8e, 0xd9, 0x44, 0xd0, 0x0f, 0xe8, 0xf3, 0x66, 0x2d, 0x95, 0xbc, 0x04, 0x5b, 0x97, 0x47, 0xfc, 0x4e, 0x61, 0x24, 0xf5, 0xda, 0xf2, 0xaa, 0x3c, 0x4c, 0x6c, 0xd0, 0x37, 0xe5, 0xec, 0x79, 0xf7, 0x3e, 0xcc, 0x21, 0xa8, 0x0b, 0xd6, 0xa7, 0x66, 0x32, 0xa0, 0x3e, 0x78, 0xef, 0x57, 0xab, 0xba, 0x62, 0x38, 0x21, 0xf4, 0x04, 0xc6, 0x1f, 0xf9, 0x66, 0x21, 0x15, 0x67, 0x38, 0xb1, 0x2e, 0x27, 0xf7, 0xfb, 0x88, 0xfc, 0xdc, 0x47, 0xe4, 0xf7, 0x3e, 0x22, 0xdf, 0xfe, 0x44, 0x83, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x55, 0x17, 0x16, 0x3b, 0xdd, 0x02, 0x00, 0x00, }
_vendor/src/github.com/pingcap/kvproto/pkg/metapb/metapb.pb.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.001448467024601996, 0.0001898265181807801, 0.0001599654642632231, 0.00017317781748715788, 0.00012389715993776917 ]
{ "id": 3, "code_window": [ "\t\t}\n", "\t}\n", "\tnewTopN := Sort{}.init(topN.allocator, topN.ctx)\n", "\tif canPush {\n", "\t\tif !topN.isEmpty() {\n", "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count}\n", "\t\t}\n", "\t\tnewTopN.ByItems = make([]*ByItems, len(topN.ByItems))\n", "\t\tcopy(newTopN.ByItems, topN.ByItems)\n", "\t}\n", "\treturn p.children[idx].(LogicalPlan).pushDownTopN(newTopN)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count + topN.ExecLimit.Offset}\n" ], "file_path": "plan/topn_push_down.go", "type": "replace", "edit_start_line_idx": 122 }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tables_test import ( . "github.com/pingcap/check" "github.com/pingcap/tidb" "github.com/pingcap/tidb/context" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/model" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/store/localstore" "github.com/pingcap/tidb/store/localstore/goleveldb" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/util/types" ) var _ = Suite(&testMemoryTableSuite{}) type testMemoryTableSuite struct { store kv.Storage se tidb.Session tbl table.Table } func (ts *testMemoryTableSuite) SetUpSuite(c *C) { driver := localstore.Driver{Driver: goleveldb.MemoryDriver{}} store, err := driver.Open("memory") c.Check(err, IsNil) ts.store = store ts.se, err = tidb.CreateSession(ts.store) c.Assert(err, IsNil) // create table tp1 := types.NewFieldType(mysql.TypeLong) col1 := &model.ColumnInfo{ ID: 1, Name: model.NewCIStr("a"), Offset: 0, FieldType: *tp1, } tp2 := types.NewFieldType(mysql.TypeVarchar) tp2.Flen = 255 col2 := &model.ColumnInfo{ ID: 2, Name: model.NewCIStr("b"), Offset: 1, FieldType: *tp2, } tblInfo := &model.TableInfo{ ID: 100, Name: model.NewCIStr("t"), Columns: []*model.ColumnInfo{col1, col2}, } alloc := autoid.NewMemoryAllocator(int64(10)) ts.tbl, _ = tables.MemoryTableFromMeta(alloc, tblInfo) } func (ts *testMemoryTableSuite) TestMemoryBasic(c *C) { ctx := ts.se.(context.Context) tb := ts.tbl c.Assert(tb.Meta(), NotNil) c.Assert(tb.Meta().ID, Greater, int64(0)) c.Assert(tb.Meta().Name.L, Equals, "t") c.Assert(tb.Indices(), IsNil) c.Assert(string(tb.FirstKey()), Not(Equals), "") c.Assert(string(tb.RecordPrefix()), Not(Equals), "") autoid, err := tb.AllocAutoID() c.Assert(err, IsNil) c.Assert(autoid, Greater, int64(0)) rid, err := tb.AddRecord(ctx, types.MakeDatums(1, "abc")) c.Assert(err, IsNil) row, err := tb.Row(ctx, rid) c.Assert(err, IsNil) c.Assert(len(row), Equals, 2) c.Assert(row[0].GetInt64(), Equals, int64(1)) _, err = tb.AddRecord(ctx, types.MakeDatums(1, "aba")) c.Assert(err, IsNil) _, err = tb.AddRecord(ctx, types.MakeDatums(2, "abc")) c.Assert(err, IsNil) tb.IterRecords(ctx, tb.FirstKey(), tb.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { return true, nil }) // RowWithCols test vals, err := tb.RowWithCols(ctx, rid, tb.Cols()) c.Assert(err, IsNil) c.Assert(vals, HasLen, 2) c.Assert(vals[0].GetInt64(), Equals, int64(1)) cols := []*table.Column{tb.Cols()[1]} vals, err = tb.RowWithCols(ctx, rid, cols) c.Assert(err, IsNil) c.Assert(vals, HasLen, 1) c.Assert(vals[0].GetString(), Equals, "abc") c.Assert(tb.RemoveRecord(ctx, rid, types.MakeDatums(1, "cba")), IsNil) _, err = tb.AddRecord(ctx, types.MakeDatums(1, "abc")) c.Assert(err, IsNil) tb.(*tables.MemoryTable).Truncate() _, err = tb.Row(ctx, rid) c.Assert(err, NotNil) }
table/tables/memory_tables_test.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.00036896418896503747, 0.00018905801698565483, 0.00016515942115802318, 0.00017469485464971513, 0.00005205439811106771 ]
{ "id": 3, "code_window": [ "\t\t}\n", "\t}\n", "\tnewTopN := Sort{}.init(topN.allocator, topN.ctx)\n", "\tif canPush {\n", "\t\tif !topN.isEmpty() {\n", "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count}\n", "\t\t}\n", "\t\tnewTopN.ByItems = make([]*ByItems, len(topN.ByItems))\n", "\t\tcopy(newTopN.ByItems, topN.ByItems)\n", "\t}\n", "\treturn p.children[idx].(LogicalPlan).pushDownTopN(newTopN)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tnewTopN.ExecLimit = &Limit{Count: topN.ExecLimit.Count + topN.ExecLimit.Offset}\n" ], "file_path": "plan/topn_push_down.go", "type": "replace", "edit_start_line_idx": 122 }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Solaris system calls. // This file is compiled as ordinary Go code, // but it is also input to mksyscall, // which parses the //sys lines and generates system call stubs. // Note that sometimes we use a lowercase //sys name and wrap // it in our own nicer implementation, either here or in // syscall_solaris.go or syscall_unix.go. package unix import ( "sync/atomic" "syscall" "unsafe" ) // Implemented in runtime/syscall_solaris.go. type syscallFunc uintptr func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) type SockaddrDatalink struct { Family uint16 Index uint16 Type uint8 Nlen uint8 Alen uint8 Slen uint8 Data [244]int8 raw RawSockaddrDatalink } func clen(n []byte) int { for i := 0; i < len(n); i++ { if n[i] == 0 { return i } } return len(n) } // ParseDirent parses up to max directory entries in buf, // appending the names to names. It returns the number // bytes consumed from buf, the number of entries added // to names, and the new names slice. func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { origlen := len(buf) for max != 0 && len(buf) > 0 { dirent := (*Dirent)(unsafe.Pointer(&buf[0])) if dirent.Reclen == 0 { buf = nil break } buf = buf[dirent.Reclen:] if dirent.Ino == 0 { // File absent in directory. continue } bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) var name = string(bytes[0:clen(bytes[:])]) if name == "." || name == ".." { // Useless names continue } max-- count++ names = append(names, name) } return origlen - len(buf), count, names } //sysnb pipe(p *[2]_C_int) (n int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } var pp [2]_C_int n, err := pipe(&pp) if n != 0 { return err } p[0] = int(pp[0]) p[1] = int(pp[1]) return nil } func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL } sa.raw.Family = AF_INET p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) for i := 0; i < len(sa.Addr); i++ { sa.raw.Addr[i] = sa.Addr[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil } func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL } sa.raw.Family = AF_INET6 p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId for i := 0; i < len(sa.Addr); i++ { sa.raw.Addr[i] = sa.Addr[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil } func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { name := sa.Name n := len(name) if n >= len(sa.raw.Path) { return nil, 0, EINVAL } sa.raw.Family = AF_UNIX for i := 0; i < n; i++ { sa.raw.Path[i] = int8(name[i]) } // length is family (uint16), name, NUL. sl := _Socklen(2) if n > 0 { sl += _Socklen(n) + 1 } if sa.raw.Path[0] == '@' { sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- } return unsafe.Pointer(&sa.raw), sl, nil } //sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny if err = getsockname(fd, &rsa, &len); err != nil { return } return anyToSockaddr(&rsa) } const ImplementsGetwd = true //sys Getcwd(buf []byte) (n int, err error) func Getwd() (wd string, err error) { var buf [PathMax]byte // Getcwd will return an error if it failed for any reason. _, err = Getcwd(buf[0:]) if err != nil { return "", err } n := clen(buf[:]) if n < 1 { return "", EINVAL } return string(buf[:n]), nil } /* * Wrapped */ //sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error) //sysnb setgroups(ngid int, gid *_Gid_t) (err error) func Getgroups() (gids []int, err error) { n, err := getgroups(0, nil) // Check for error and sanity check group count. Newer versions of // Solaris allow up to 1024 (NGROUPS_MAX). if n < 0 || n > 1024 { if err != nil { return nil, err } return nil, EINVAL } else if n == 0 { return nil, nil } a := make([]_Gid_t, n) n, err = getgroups(n, &a[0]) if n == -1 { return nil, err } gids = make([]int, n) for i, v := range a[0:n] { gids[i] = int(v) } return } func Setgroups(gids []int) (err error) { if len(gids) == 0 { return setgroups(0, nil) } a := make([]_Gid_t, len(gids)) for i, v := range gids { a[i] = _Gid_t(v) } return setgroups(len(a), &a[0]) } func ReadDirent(fd int, buf []byte) (n int, err error) { // Final argument is (basep *uintptr) and the syscall doesn't take nil. // TODO(rsc): Can we use a single global basep for all calls? return Getdents(fd, buf, new(uintptr)) } // Wait status is 7 bits at bottom, either 0 (exited), // 0x7F (stopped), or a signal number that caused an exit. // The 0x80 bit is whether there was a core dump. // An extra number (exit code, signal causing a stop) // is in the high bits. type WaitStatus uint32 const ( mask = 0x7F core = 0x80 shift = 8 exited = 0 stopped = 0x7F ) func (w WaitStatus) Exited() bool { return w&mask == exited } func (w WaitStatus) ExitStatus() int { if w&mask != exited { return -1 } return int(w >> shift) } func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 } func (w WaitStatus) Signal() syscall.Signal { sig := syscall.Signal(w & mask) if sig == stopped || sig == 0 { return -1 } return sig } func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 } func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP } func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP } func (w WaitStatus) StopSignal() syscall.Signal { if !w.Stopped() { return -1 } return syscall.Signal(w>>shift) & 0xFF } func (w WaitStatus) TrapCause() int { return -1 } //sys wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, err error) func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (int, error) { var status _C_int rpid, err := wait4(int32(pid), &status, options, rusage) wpid := int(rpid) if wpid == -1 { return wpid, err } if wstatus != nil { *wstatus = WaitStatus(status) } return wpid, nil } //sys gethostname(buf []byte) (n int, err error) func Gethostname() (name string, err error) { var buf [MaxHostNameLen]byte n, err := gethostname(buf[:]) if n != 0 { return "", err } n = clen(buf[:]) if n < 1 { return "", EFAULT } return string(buf[:n]), nil } //sys utimes(path string, times *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) (err error) { if tv == nil { return utimes(path, nil) } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } //sys utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) func UtimesNano(path string, ts []Timespec) error { if ts == nil { return utimensat(AT_FDCWD, path, nil, 0) } if len(ts) != 2 { return EINVAL } return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) } func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { if ts == nil { return utimensat(dirfd, path, nil, flags) } if len(ts) != 2 { return EINVAL } return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) } //sys fcntl(fd int, cmd int, arg int) (val int, err error) // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0) if e1 != 0 { return e1 } return nil } //sys futimesat(fildes int, path *byte, times *[2]Timeval) (err error) func Futimesat(dirfd int, path string, tv []Timeval) error { pathp, err := BytePtrFromString(path) if err != nil { return err } if tv == nil { return futimesat(dirfd, pathp, nil) } if len(tv) != 2 { return EINVAL } return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } // Solaris doesn't have an futimes function because it allows NULL to be // specified as the path for futimesat. However, Go doesn't like // NULL-style string interfaces, so this simple wrapper is provided. func Futimes(fd int, tv []Timeval) error { if tv == nil { return futimesat(fd, nil, nil) } if len(tv) != 2 { return EINVAL } return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) sa := new(SockaddrUnix) // Assume path ends at NUL. // This is not technically the Solaris semantics for // abstract Unix domain sockets -- they are supposed // to be uninterpreted fixed-size binary blobs -- but // everyone uses this convention. n := 0 for n < len(pp.Path) && pp.Path[n] != 0 { n++ } bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil case AF_INET: pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) for i := 0; i < len(sa.Addr); i++ { sa.Addr[i] = pp.Addr[i] } return sa, nil case AF_INET6: pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) sa := new(SockaddrInet6) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id for i := 0; i < len(sa.Addr); i++ { sa.Addr[i] = pp.Addr[i] } return sa, nil } return nil, EAFNOSUPPORT } //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = libsocket.accept func Accept(fd int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny nfd, err = accept(fd, &rsa, &len) if nfd == -1 { return } sa, err = anyToSockaddr(&rsa) if err != nil { Close(nfd) nfd = 0 } return } //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr var rsa RawSockaddrAny msg.Name = (*byte)(unsafe.Pointer(&rsa)) msg.Namelen = uint32(SizeofSockaddrAny) var iov Iovec if len(p) > 0 { iov.Base = (*int8)(unsafe.Pointer(&p[0])) iov.SetLen(len(p)) } var dummy int8 if len(oob) > 0 { // receive at least one normal byte if len(p) == 0 { iov.Base = &dummy iov.SetLen(1) } msg.Accrights = (*int8)(unsafe.Pointer(&oob[0])) } msg.Iov = &iov msg.Iovlen = 1 if n, err = recvmsg(fd, &msg, flags); n == -1 { return } oobn = int(msg.Accrightslen) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { from, err = anyToSockaddr(&rsa) } return } func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { _, err = SendmsgN(fd, p, oob, to, flags) return } //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.sendmsg func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { var ptr unsafe.Pointer var salen _Socklen if to != nil { ptr, salen, err = to.sockaddr() if err != nil { return 0, err } } var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) var iov Iovec if len(p) > 0 { iov.Base = (*int8)(unsafe.Pointer(&p[0])) iov.SetLen(len(p)) } var dummy int8 if len(oob) > 0 { // send at least one normal byte if len(p) == 0 { iov.Base = &dummy iov.SetLen(1) } msg.Accrights = (*int8)(unsafe.Pointer(&oob[0])) } msg.Iov = &iov msg.Iovlen = 1 if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } if len(oob) > 0 && len(p) == 0 { n = 0 } return n, nil } //sys acct(path *byte) (err error) func Acct(path string) (err error) { if len(path) == 0 { // Assume caller wants to disable accounting. return acct(nil) } pathp, err := BytePtrFromString(path) if err != nil { return err } return acct(pathp) } /* * Expose the ioctl function */ //sys ioctl(fd int, req int, arg uintptr) (err error) func IoctlSetInt(fd int, req int, value int) (err error) { return ioctl(fd, req, uintptr(value)) } func IoctlSetWinsize(fd int, req int, value *Winsize) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } func IoctlSetTermios(fd int, req int, value *Termios) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } func IoctlSetTermio(fd int, req int, value *Termio) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } func IoctlGetInt(fd int, req int) (int, error) { var value int err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return value, err } func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return &value, err } func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return &value, err } func IoctlGetTermio(fd int, req int) (*Termio, error) { var value Termio err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return &value, err } /* * Exposed directly */ //sys Access(path string, mode uint32) (err error) //sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) //sys Chdir(path string) (err error) //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) //sys Close(fd int) (err error) //sys Creat(path string, mode uint32) (fd int, err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(oldfd int, newfd int) (err error) //sys Exit(code int) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) //sysnb Getpgrp() (pgid int, err error) //sys Geteuid() (euid int) //sys Getegid() (egid int) //sys Getppid() (ppid int) //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) //sys Listen(s int, backlog int) (err error) = libsocket.listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys Pause() (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) //sys Sethostname(p []byte) (err error) //sysnb Setpgid(pid int, pgid int) (err error) //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Setuid(uid int) (err error) //sys Shutdown(s int, how int) (err error) = libsocket.shutdown //sys Stat(path string, stat *Stat_t) (err error) //sys Symlink(path string, link string) (err error) //sys Sync() (err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sys Truncate(path string, length int64) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sys Umask(mask int) (oldmask int) //sysnb Uname(buf *Utsname) (err error) //sys Unmount(target string, flags int) (err error) = libc.umount //sys Unlink(path string) (err error) //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys Utime(path string, buf *Utimbuf) (err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.bind //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.connect //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.sendto //sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.socket //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.socketpair //sys write(fd int, p []byte) (n int, err error) //sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.getsockopt //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom func readlen(fd int, buf *byte, nbuf int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) n = int(r0) if e1 != 0 { err = e1 } return } func writelen(fd int, buf *byte, nbuf int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) n = int(r0) if e1 != 0 { err = e1 } return } var mapper = &mmapper{ active: make(map[*byte][]byte), mmap: mmap, munmap: munmap, } func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { return mapper.Mmap(fd, offset, length, prot, flags) } func Munmap(b []byte) (err error) { return mapper.Munmap(b) } //sys sysconf(name int) (n int64, err error) // pageSize caches the value of Getpagesize, since it can't change // once the system is booted. var pageSize int64 // accessed atomically func Getpagesize() int { n := atomic.LoadInt64(&pageSize) if n == 0 { n, _ = sysconf(_SC_PAGESIZE) atomic.StoreInt64(&pageSize, n) } return int(n) }
_vendor/src/golang.org/x/sys/unix/syscall_solaris.go
0
https://github.com/pingcap/tidb/commit/3386954d9fa30e4c0c28f8ebab0b9fd543a4fef3
[ 0.0015456259716302156, 0.00021061136794742197, 0.00016189862799365073, 0.00017172849038615823, 0.00019718537805601954 ]
{ "id": 0, "code_window": [ " return\n", " }\n", "\n", " // If successful, add npm\n", " npmv := getNpmVersion(version)\n", " success := web.GetNpm(getNpmVersion(version))\n", " if success {\n", " fmt.Printf(\"Installing npm v\"+npmv+\"...\")\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " success := web.GetNpm(env.root, getNpmVersion(version))\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 213 }
package main import ( "fmt" "os" "os/exec" "strings" "io/ioutil" "regexp" "bytes" "encoding/json" "strconv" "./nvm/web" "./nvm/arch" "./nvm/file" "./nvm/node" // "./ansi" ) const ( NvmVersion = "1.0.6" ) type Environment struct { settings string root string symlink string arch string proxy string originalpath string originalversion string } var env = &Environment{ settings: os.Getenv("NVM_HOME")+"\\settings.txt", root: "", symlink: os.Getenv("NVM_SYMLINK"), arch: os.Getenv("PROCESSOR_ARCHITECTURE"), proxy: "none", originalpath: "", originalversion: "", } func main() { args := os.Args detail := "" procarch := arch.Validate(env.arch) Setup() // Capture any additional arguments if len(args) > 2 { detail = strings.ToLower(args[2]) } if len(args) > 3 { procarch = args[3] } if len(args) < 2 { help() return } // Run the appropriate method switch args[1] { case "install": install(detail,procarch) case "uninstall": uninstall(detail) case "use": use(detail,procarch) case "list": list(detail) case "ls": list(detail) case "on": enable() case "off": disable() case "root": if len(args) == 3 { updateRootDir(args[2]) } else { fmt.Println("\nCurrent Root: "+env.root) } case "version": fmt.Println(NvmVersion) case "v": fmt.Println(NvmVersion) case "arch": if strings.Trim(detail," \r\n") != "" { detail = strings.Trim(detail," \r\n") if detail != "32" && detail != "64" { fmt.Println("\""+detail+"\" is an invalid architecture. Use 32 or 64.") return } env.arch = detail saveSettings() fmt.Println("Default architecture set to "+detail+"-bit.") return } _, a := node.GetCurrentVersion() fmt.Println("System Default: "+env.arch+"-bit.") fmt.Println("Currently Configured: "+a+"-bit.") case "proxy": if detail == "" { fmt.Println("Current proxy: "+env.proxy) } else { env.proxy = detail saveSettings() } case "update": update() default: help() } } func update() { // cmd := exec.Command("cmd", "/d", "echo", "testing") // var output bytes.Buffer // var _stderr bytes.Buffer // cmd.Stdout = &output // cmd.Stderr = &_stderr // perr := cmd.Run() // if perr != nil { // fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) // return // } } func CheckVersionExceedsLatest(version string) bool{ content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") latest := reg.ReplaceAllString(re.FindString(content),"") if version <= latest { return false } else { return true } } func install(version string, cpuarch string) { if version == "" { fmt.Println("\nInvalid version.") fmt.Println(" ") help() return } cpuarch = strings.ToLower(cpuarch) if cpuarch != "" { if cpuarch != "32" && cpuarch != "64" && cpuarch != "all" { fmt.Println("\""+cpuarch+"\" is not a valid CPU architecture. Must be 32 or 64.") return } } else { cpuarch = env.arch } if cpuarch != "all" { cpuarch = arch.Validate(cpuarch) } if CheckVersionExceedsLatest(version) { fmt.Println("Node.js v"+version+" is not yet released or available.") return } if cpuarch == "64" && !web.IsNode64bitAvailable(version) { fmt.Println("Node.js v"+version+" is only available in 32-bit.") return } // If user specifies "latest" version, find out what version is if version == "latest" { content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") version = reg.ReplaceAllString(re.FindString(content),"") } // Check to see if the version is already installed if !node.IsVersionInstalled(env.root,version,cpuarch) { if !node.IsVersionAvailable(version){ fmt.Println("Version "+version+" is not available. If you are attempting to download a \"just released\" version,") fmt.Println("it may not be recognized by the nvm service yet (updated hourly). If you feel this is in error and") fmt.Println("you know the version exists, please visit http://github.com/coreybutler/nodedistro and submit a PR.") return } // Make the output directories os.Mkdir(env.root+"\\v"+version,os.ModeDir) os.Mkdir(env.root+"\\v"+version+"\\node_modules",os.ModeDir) // Download node if (cpuarch == "32" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"32") { success := web.GetNodeJS(env.root,version,"32"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 32-bit executable.") return } } if (cpuarch == "64" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"64") { success := web.GetNodeJS(env.root,version,"64"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 64-bit executable.") return } } if file.Exists(env.root+"\\v"+version+"\\node_modules\\npm") { return } // If successful, add npm npmv := getNpmVersion(version) success := web.GetNpm(getNpmVersion(version)) if success { fmt.Printf("Installing npm v"+npmv+"...") // Extract npm to the temp directory file.Unzip(os.TempDir()+"\\npm-v"+npmv+".zip",os.TempDir()+"\\nvm-npm") // Copy the npm and npm.cmd files to the installation directory os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm",env.root+"\\v"+version+"\\npm") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm.cmd",env.root+"\\v"+version+"\\npm.cmd") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv,env.root+"\\v"+version+"\\node_modules\\npm") // Remove the source file os.RemoveAll(os.TempDir()+"\\nvm-npm") fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use "+version) } else { fmt.Println("Could not download npm for node v"+version+".") fmt.Println("Please visit https://github.com/npm/npm/releases/tag/v"+npmv+" to download npm.") fmt.Println("It should be extracted to "+env.root+"\\v"+version) } // If this is ever shipped for Mac, it should use homebrew. // If this ever ships on Linux, it should be on bintray so it can use yum, apt-get, etc. return } else { fmt.Println("Version "+version+" is already installed.") return } } func uninstall(version string) { // Make sure a version is specified if len(version) == 0 { fmt.Println("Provide the version you want to uninstall.") help() return } // Determine if the version exists and skip if it doesn't if node.IsVersionInstalled(env.root,version,"32") || node.IsVersionInstalled(env.root,version,"64") { fmt.Printf("Uninstalling node v"+version+"...") v, _ := node.GetCurrentVersion() if v == version { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() } e := os.RemoveAll(env.root+"\\v"+version) if e != nil { fmt.Println("Error removing node v"+version) fmt.Println("Manually remove "+env.root+"\\v"+version+".") } else { fmt.Printf(" done") } } else { fmt.Println("node v"+version+" is not installed. Type \"nvm list\" to see what is installed.") } return } func use(version string, cpuarch string) { if version == "32" || version == "64" { cpuarch = version v, _ := node.GetCurrentVersion() version = v } cpuarch = arch.Validate(cpuarch) // Make sure the version is installed. If not, warn. if !node.IsVersionInstalled(env.root,version,cpuarch) { fmt.Println("node v"+version+" ("+cpuarch+"-bit) is not installed.") if cpuarch == "32" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } if cpuarch == "64" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } return } // Create or update the symlink sym, _ := os.Stat(env.symlink) if sym != nil { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) var output bytes.Buffer var _stderr bytes.Buffer cmd.Stdout = &output cmd.Stderr = &_stderr perr := cmd.Run() if perr != nil { fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) return } } c := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "mklink", "/D", env.symlink, env.root+"\\v"+version) var out bytes.Buffer var stderr bytes.Buffer c.Stdout = &out c.Stderr = &stderr err := c.Run() if err != nil { fmt.Println(fmt.Sprint(err) + ": " + stderr.String()) return } // Use the assigned CPU architecture cpuarch = arch.Validate(cpuarch) e32 := file.Exists(env.root+"\\v"+version+"\\node32.exe") e64 := file.Exists(env.root+"\\v"+version+"\\node64.exe") used := file.Exists(env.root+"\\v"+version+"\\node.exe") if (e32 || e64) { if used { if e32 { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node64.exe") os.Rename(env.root+"\\v"+version+"\\node32.exe",env.root+"\\v"+version+"\\node.exe") } else { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node32.exe") os.Rename(env.root+"\\v"+version+"\\node64.exe",env.root+"\\v"+version+"\\node.exe") } } else if e32 || e64 { os.Rename(env.root+"\\v"+version+"\\node"+cpuarch+".exe",env.root+"\\v"+version+"\\node.exe") } } fmt.Println("Now using node v"+version+" ("+cpuarch+"-bit)") } func useArchitecture(a string) { if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) { fmt.Println("This computer only supports 32-bit processing.") return } if a == "32" || a == "64" { env.arch = a saveSettings() fmt.Println("Set to "+a+"-bit mode") } else { fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.") } } func list(listtype string) { if listtype == "" { listtype = "installed" } if listtype != "installed" && listtype != "available" { fmt.Println("\nInvalid list option.\n\nPlease use on of the following\n - nvm list\n - nvm list installed\n - nvm list available") help() return } if listtype == "installed" { fmt.Println("") inuse, a := node.GetCurrentVersion() v := node.GetInstalled(env.root) for i := 0; i < len(v); i++ { version := v[i] isnode, _ := regexp.MatchString("v",version) str := "" if isnode { if "v"+inuse == version { str = str+" * " } else { str = str+" " } str = str+regexp.MustCompile("v").ReplaceAllString(version,"") if "v"+inuse == version { str = str+" (Currently using "+a+"-bit executable)" // str = ansi.Color(str,"green:black") } fmt.Printf(str+"\n") } } if len(v) == 0 { fmt.Println("No installations recognized.") } } else { _, stable, unstable := node.GetAvailable() releases := 15 fmt.Println("\nShowing the "+strconv.Itoa(releases)+" latest available releases.\n") fmt.Println(" STABLE | UNSTABLE ") fmt.Println(" ---------------------------") for i := 0; i < releases; i++ { str := "v"+stable[i] for ii := 10-len(str); ii > 0; ii-- { str = " "+str } str = str+" | " str2 := "v"+unstable[i] for ii := 10-len(str2); ii > 0; ii-- { str2 = " "+str2 } fmt.Println(" "+str+str2) } fmt.Println("\nFor a complete list, visit http://coreybutler.github.io/nodedistro") } } func enable() { dir := "" files, _ := ioutil.ReadDir(env.root) for _, f := range files { if f.IsDir() { isnode, _ := regexp.MatchString("v",f.Name()) if isnode { dir = f.Name() } } } fmt.Println("nvm enabled") if dir != "" { use(strings.Trim(regexp.MustCompile("v").ReplaceAllString(dir,"")," \n\r"),env.arch) } else { fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest") } } func disable() { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() fmt.Println("nvm disabled") } func help() { fmt.Println("\nRunning version "+NvmVersion+".") fmt.Println("\nUsage:") fmt.Println(" ") fmt.Println(" nvm arch : Show if node is running in 32 or 64 bit mode.") fmt.Println(" nvm install <version> [arch] : The version can be a node.js version or \"latest\" for the latest stable version.") fmt.Println(" Optionally specify whether to install the 32 or 64 bit version (defaults to system arch).") fmt.Println(" Set [arch] to \"all\" to install 32 AND 64 bit versions.") fmt.Println(" nvm list [available] : List the node.js installations. Type \"available\" at the end to see what can be installed. Aliased as ls.") fmt.Println(" nvm on : Enable node.js version management.") fmt.Println(" nvm off : Disable node.js version management.") fmt.Println(" nvm proxy [url] : Set a proxy to use for downloads. Leave [url] blank to see the current proxy.") fmt.Println(" Set [url] to \"none\" to remove the proxy.") fmt.Println(" nvm uninstall <version> : The version must be a specific version.") // fmt.Println(" nvm update : Automatically update nvm to the latest version.") fmt.Println(" nvm use [version] [arch] : Switch to use the specified version. Optionally specify 32/64bit architecture.") fmt.Println(" nvm use <arch> will continue using the selected version, but switch to 32/64 bit mode.") fmt.Println(" nvm root [path] : Set the directory where nvm should store different versions of node.js.") fmt.Println(" If <path> is not set, the current root will be displayed.") fmt.Println(" nvm version : Displays the current running version of nvm for Windows. Aliased as v.") fmt.Println(" ") } // Given a node.js version, returns the associated npm version func getNpmVersion(nodeversion string) string { // Get raw text text := web.GetRemoteTextFile("https://raw.githubusercontent.com/coreybutler/nodedistro/master/nodeversions.json") // Parse var data interface{} json.Unmarshal([]byte(text), &data); body := data.(map[string]interface{}) all := body["all"] npm := all.(map[string]interface{}) return npm[nodeversion].(string) } func updateRootDir(path string) { _, err := os.Stat(path) if err != nil { fmt.Println(path+" does not exist or could not be found.") return } env.root = path saveSettings() fmt.Println("\nRoot has been set to "+path) } func saveSettings() { content := "root: "+strings.Trim(env.root," \n\r")+"\r\narch: "+strings.Trim(env.arch," \n\r")+"\r\nproxy: "+strings.Trim(env.proxy," \n\r")+"\r\noriginalpath: "+strings.Trim(env.originalpath," \n\r")+"\r\noriginalversion: "+strings.Trim(env.originalversion," \n\r") ioutil.WriteFile(env.settings, []byte(content), 0644) } func Setup() { lines, err := file.ReadLines(env.settings) if err != nil { fmt.Println("\nERROR",err) os.Exit(1) } // Process each line and extract the value for _, line := range lines { if strings.Contains(line,"root:") { env.root = strings.Trim(regexp.MustCompile("root:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalpath:") { env.originalpath = strings.Trim(regexp.MustCompile("originalpath:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalversion:") { env.originalversion = strings.Trim(regexp.MustCompile("originalversion:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"arch:"){ env.arch = strings.Trim(regexp.MustCompile("arch:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"proxy:"){ env.proxy = strings.Trim(regexp.MustCompile("proxy:").ReplaceAllString(line,"")," \r\n") if env.proxy != "none" && env.proxy != "" { if strings.ToLower(env.proxy[0:4]) != "http" { env.proxy = "http://"+env.proxy } web.SetProxy(env.proxy) } } } env.arch = arch.Validate(env.arch) // Make sure the directories exist _, e := os.Stat(env.root) if e != nil { fmt.Println(env.root+" could not be found or does not exist. Exiting.") return } }
src/nvm.go
1
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.9984837174415588, 0.0917591005563736, 0.00016214177594520152, 0.00026750550023280084, 0.2844984829425812 ]
{ "id": 0, "code_window": [ " return\n", " }\n", "\n", " // If successful, add npm\n", " npmv := getNpmVersion(version)\n", " success := web.GetNpm(getNpmVersion(version))\n", " if success {\n", " fmt.Printf(\"Installing npm v\"+npmv+\"...\")\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " success := web.GetNpm(env.root, getNpmVersion(version))\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 213 }
@setlocal @echo off set CMD=%* set APP=%1 start wscript //nologo "%~dpn0.vbs" %*
bin/elevate.cmd
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0001696870313026011, 0.0001696870313026011, 0.0001696870313026011, 0.0001696870313026011, 0 ]
{ "id": 0, "code_window": [ " return\n", " }\n", "\n", " // If successful, add npm\n", " npmv := getNpmVersion(version)\n", " success := web.GetNpm(getNpmVersion(version))\n", " if success {\n", " fmt.Printf(\"Installing npm v\"+npmv+\"...\")\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " success := web.GetNpm(env.root, getNpmVersion(version))\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 213 }
root: C:\Users\Corey\AppData\Roaming\nvm path: C:\Program Files\nodejs arch: 64 proxy: none
examples/settings.txt
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00016999342187773436, 0.00016999342187773436, 0.00016999342187773436, 0.00016999342187773436, 0 ]
{ "id": 0, "code_window": [ " return\n", " }\n", "\n", " // If successful, add npm\n", " npmv := getNpmVersion(version)\n", " success := web.GetNpm(getNpmVersion(version))\n", " if success {\n", " fmt.Printf(\"Installing npm v\"+npmv+\"...\")\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ " success := web.GetNpm(env.root, getNpmVersion(version))\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 213 }
# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof dist src/v* bin/*.exe !bin/buildtools/* bin/*.zip bin/nvm/*
.gitignore
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00016660764231346548, 0.00016484946536365896, 0.0001608840248081833, 0.00016595306806266308, 0.0000023050820345815737 ]
{ "id": 1, "code_window": [ " if success {\n", " fmt.Printf(\"Installing npm v\"+npmv+\"...\")\n", "\n", " // Extract npm to the temp directory\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ " // new temp directory under the nvm root\n", " tempDir := env.root + \"\\\\temp\"\n", "\n" ], "file_path": "src/nvm.go", "type": "add", "edit_start_line_idx": 217 }
package main import ( "fmt" "os" "os/exec" "strings" "io/ioutil" "regexp" "bytes" "encoding/json" "strconv" "./nvm/web" "./nvm/arch" "./nvm/file" "./nvm/node" // "./ansi" ) const ( NvmVersion = "1.0.6" ) type Environment struct { settings string root string symlink string arch string proxy string originalpath string originalversion string } var env = &Environment{ settings: os.Getenv("NVM_HOME")+"\\settings.txt", root: "", symlink: os.Getenv("NVM_SYMLINK"), arch: os.Getenv("PROCESSOR_ARCHITECTURE"), proxy: "none", originalpath: "", originalversion: "", } func main() { args := os.Args detail := "" procarch := arch.Validate(env.arch) Setup() // Capture any additional arguments if len(args) > 2 { detail = strings.ToLower(args[2]) } if len(args) > 3 { procarch = args[3] } if len(args) < 2 { help() return } // Run the appropriate method switch args[1] { case "install": install(detail,procarch) case "uninstall": uninstall(detail) case "use": use(detail,procarch) case "list": list(detail) case "ls": list(detail) case "on": enable() case "off": disable() case "root": if len(args) == 3 { updateRootDir(args[2]) } else { fmt.Println("\nCurrent Root: "+env.root) } case "version": fmt.Println(NvmVersion) case "v": fmt.Println(NvmVersion) case "arch": if strings.Trim(detail," \r\n") != "" { detail = strings.Trim(detail," \r\n") if detail != "32" && detail != "64" { fmt.Println("\""+detail+"\" is an invalid architecture. Use 32 or 64.") return } env.arch = detail saveSettings() fmt.Println("Default architecture set to "+detail+"-bit.") return } _, a := node.GetCurrentVersion() fmt.Println("System Default: "+env.arch+"-bit.") fmt.Println("Currently Configured: "+a+"-bit.") case "proxy": if detail == "" { fmt.Println("Current proxy: "+env.proxy) } else { env.proxy = detail saveSettings() } case "update": update() default: help() } } func update() { // cmd := exec.Command("cmd", "/d", "echo", "testing") // var output bytes.Buffer // var _stderr bytes.Buffer // cmd.Stdout = &output // cmd.Stderr = &_stderr // perr := cmd.Run() // if perr != nil { // fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) // return // } } func CheckVersionExceedsLatest(version string) bool{ content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") latest := reg.ReplaceAllString(re.FindString(content),"") if version <= latest { return false } else { return true } } func install(version string, cpuarch string) { if version == "" { fmt.Println("\nInvalid version.") fmt.Println(" ") help() return } cpuarch = strings.ToLower(cpuarch) if cpuarch != "" { if cpuarch != "32" && cpuarch != "64" && cpuarch != "all" { fmt.Println("\""+cpuarch+"\" is not a valid CPU architecture. Must be 32 or 64.") return } } else { cpuarch = env.arch } if cpuarch != "all" { cpuarch = arch.Validate(cpuarch) } if CheckVersionExceedsLatest(version) { fmt.Println("Node.js v"+version+" is not yet released or available.") return } if cpuarch == "64" && !web.IsNode64bitAvailable(version) { fmt.Println("Node.js v"+version+" is only available in 32-bit.") return } // If user specifies "latest" version, find out what version is if version == "latest" { content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") version = reg.ReplaceAllString(re.FindString(content),"") } // Check to see if the version is already installed if !node.IsVersionInstalled(env.root,version,cpuarch) { if !node.IsVersionAvailable(version){ fmt.Println("Version "+version+" is not available. If you are attempting to download a \"just released\" version,") fmt.Println("it may not be recognized by the nvm service yet (updated hourly). If you feel this is in error and") fmt.Println("you know the version exists, please visit http://github.com/coreybutler/nodedistro and submit a PR.") return } // Make the output directories os.Mkdir(env.root+"\\v"+version,os.ModeDir) os.Mkdir(env.root+"\\v"+version+"\\node_modules",os.ModeDir) // Download node if (cpuarch == "32" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"32") { success := web.GetNodeJS(env.root,version,"32"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 32-bit executable.") return } } if (cpuarch == "64" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"64") { success := web.GetNodeJS(env.root,version,"64"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 64-bit executable.") return } } if file.Exists(env.root+"\\v"+version+"\\node_modules\\npm") { return } // If successful, add npm npmv := getNpmVersion(version) success := web.GetNpm(getNpmVersion(version)) if success { fmt.Printf("Installing npm v"+npmv+"...") // Extract npm to the temp directory file.Unzip(os.TempDir()+"\\npm-v"+npmv+".zip",os.TempDir()+"\\nvm-npm") // Copy the npm and npm.cmd files to the installation directory os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm",env.root+"\\v"+version+"\\npm") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm.cmd",env.root+"\\v"+version+"\\npm.cmd") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv,env.root+"\\v"+version+"\\node_modules\\npm") // Remove the source file os.RemoveAll(os.TempDir()+"\\nvm-npm") fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use "+version) } else { fmt.Println("Could not download npm for node v"+version+".") fmt.Println("Please visit https://github.com/npm/npm/releases/tag/v"+npmv+" to download npm.") fmt.Println("It should be extracted to "+env.root+"\\v"+version) } // If this is ever shipped for Mac, it should use homebrew. // If this ever ships on Linux, it should be on bintray so it can use yum, apt-get, etc. return } else { fmt.Println("Version "+version+" is already installed.") return } } func uninstall(version string) { // Make sure a version is specified if len(version) == 0 { fmt.Println("Provide the version you want to uninstall.") help() return } // Determine if the version exists and skip if it doesn't if node.IsVersionInstalled(env.root,version,"32") || node.IsVersionInstalled(env.root,version,"64") { fmt.Printf("Uninstalling node v"+version+"...") v, _ := node.GetCurrentVersion() if v == version { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() } e := os.RemoveAll(env.root+"\\v"+version) if e != nil { fmt.Println("Error removing node v"+version) fmt.Println("Manually remove "+env.root+"\\v"+version+".") } else { fmt.Printf(" done") } } else { fmt.Println("node v"+version+" is not installed. Type \"nvm list\" to see what is installed.") } return } func use(version string, cpuarch string) { if version == "32" || version == "64" { cpuarch = version v, _ := node.GetCurrentVersion() version = v } cpuarch = arch.Validate(cpuarch) // Make sure the version is installed. If not, warn. if !node.IsVersionInstalled(env.root,version,cpuarch) { fmt.Println("node v"+version+" ("+cpuarch+"-bit) is not installed.") if cpuarch == "32" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } if cpuarch == "64" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } return } // Create or update the symlink sym, _ := os.Stat(env.symlink) if sym != nil { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) var output bytes.Buffer var _stderr bytes.Buffer cmd.Stdout = &output cmd.Stderr = &_stderr perr := cmd.Run() if perr != nil { fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) return } } c := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "mklink", "/D", env.symlink, env.root+"\\v"+version) var out bytes.Buffer var stderr bytes.Buffer c.Stdout = &out c.Stderr = &stderr err := c.Run() if err != nil { fmt.Println(fmt.Sprint(err) + ": " + stderr.String()) return } // Use the assigned CPU architecture cpuarch = arch.Validate(cpuarch) e32 := file.Exists(env.root+"\\v"+version+"\\node32.exe") e64 := file.Exists(env.root+"\\v"+version+"\\node64.exe") used := file.Exists(env.root+"\\v"+version+"\\node.exe") if (e32 || e64) { if used { if e32 { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node64.exe") os.Rename(env.root+"\\v"+version+"\\node32.exe",env.root+"\\v"+version+"\\node.exe") } else { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node32.exe") os.Rename(env.root+"\\v"+version+"\\node64.exe",env.root+"\\v"+version+"\\node.exe") } } else if e32 || e64 { os.Rename(env.root+"\\v"+version+"\\node"+cpuarch+".exe",env.root+"\\v"+version+"\\node.exe") } } fmt.Println("Now using node v"+version+" ("+cpuarch+"-bit)") } func useArchitecture(a string) { if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) { fmt.Println("This computer only supports 32-bit processing.") return } if a == "32" || a == "64" { env.arch = a saveSettings() fmt.Println("Set to "+a+"-bit mode") } else { fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.") } } func list(listtype string) { if listtype == "" { listtype = "installed" } if listtype != "installed" && listtype != "available" { fmt.Println("\nInvalid list option.\n\nPlease use on of the following\n - nvm list\n - nvm list installed\n - nvm list available") help() return } if listtype == "installed" { fmt.Println("") inuse, a := node.GetCurrentVersion() v := node.GetInstalled(env.root) for i := 0; i < len(v); i++ { version := v[i] isnode, _ := regexp.MatchString("v",version) str := "" if isnode { if "v"+inuse == version { str = str+" * " } else { str = str+" " } str = str+regexp.MustCompile("v").ReplaceAllString(version,"") if "v"+inuse == version { str = str+" (Currently using "+a+"-bit executable)" // str = ansi.Color(str,"green:black") } fmt.Printf(str+"\n") } } if len(v) == 0 { fmt.Println("No installations recognized.") } } else { _, stable, unstable := node.GetAvailable() releases := 15 fmt.Println("\nShowing the "+strconv.Itoa(releases)+" latest available releases.\n") fmt.Println(" STABLE | UNSTABLE ") fmt.Println(" ---------------------------") for i := 0; i < releases; i++ { str := "v"+stable[i] for ii := 10-len(str); ii > 0; ii-- { str = " "+str } str = str+" | " str2 := "v"+unstable[i] for ii := 10-len(str2); ii > 0; ii-- { str2 = " "+str2 } fmt.Println(" "+str+str2) } fmt.Println("\nFor a complete list, visit http://coreybutler.github.io/nodedistro") } } func enable() { dir := "" files, _ := ioutil.ReadDir(env.root) for _, f := range files { if f.IsDir() { isnode, _ := regexp.MatchString("v",f.Name()) if isnode { dir = f.Name() } } } fmt.Println("nvm enabled") if dir != "" { use(strings.Trim(regexp.MustCompile("v").ReplaceAllString(dir,"")," \n\r"),env.arch) } else { fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest") } } func disable() { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() fmt.Println("nvm disabled") } func help() { fmt.Println("\nRunning version "+NvmVersion+".") fmt.Println("\nUsage:") fmt.Println(" ") fmt.Println(" nvm arch : Show if node is running in 32 or 64 bit mode.") fmt.Println(" nvm install <version> [arch] : The version can be a node.js version or \"latest\" for the latest stable version.") fmt.Println(" Optionally specify whether to install the 32 or 64 bit version (defaults to system arch).") fmt.Println(" Set [arch] to \"all\" to install 32 AND 64 bit versions.") fmt.Println(" nvm list [available] : List the node.js installations. Type \"available\" at the end to see what can be installed. Aliased as ls.") fmt.Println(" nvm on : Enable node.js version management.") fmt.Println(" nvm off : Disable node.js version management.") fmt.Println(" nvm proxy [url] : Set a proxy to use for downloads. Leave [url] blank to see the current proxy.") fmt.Println(" Set [url] to \"none\" to remove the proxy.") fmt.Println(" nvm uninstall <version> : The version must be a specific version.") // fmt.Println(" nvm update : Automatically update nvm to the latest version.") fmt.Println(" nvm use [version] [arch] : Switch to use the specified version. Optionally specify 32/64bit architecture.") fmt.Println(" nvm use <arch> will continue using the selected version, but switch to 32/64 bit mode.") fmt.Println(" nvm root [path] : Set the directory where nvm should store different versions of node.js.") fmt.Println(" If <path> is not set, the current root will be displayed.") fmt.Println(" nvm version : Displays the current running version of nvm for Windows. Aliased as v.") fmt.Println(" ") } // Given a node.js version, returns the associated npm version func getNpmVersion(nodeversion string) string { // Get raw text text := web.GetRemoteTextFile("https://raw.githubusercontent.com/coreybutler/nodedistro/master/nodeversions.json") // Parse var data interface{} json.Unmarshal([]byte(text), &data); body := data.(map[string]interface{}) all := body["all"] npm := all.(map[string]interface{}) return npm[nodeversion].(string) } func updateRootDir(path string) { _, err := os.Stat(path) if err != nil { fmt.Println(path+" does not exist or could not be found.") return } env.root = path saveSettings() fmt.Println("\nRoot has been set to "+path) } func saveSettings() { content := "root: "+strings.Trim(env.root," \n\r")+"\r\narch: "+strings.Trim(env.arch," \n\r")+"\r\nproxy: "+strings.Trim(env.proxy," \n\r")+"\r\noriginalpath: "+strings.Trim(env.originalpath," \n\r")+"\r\noriginalversion: "+strings.Trim(env.originalversion," \n\r") ioutil.WriteFile(env.settings, []byte(content), 0644) } func Setup() { lines, err := file.ReadLines(env.settings) if err != nil { fmt.Println("\nERROR",err) os.Exit(1) } // Process each line and extract the value for _, line := range lines { if strings.Contains(line,"root:") { env.root = strings.Trim(regexp.MustCompile("root:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalpath:") { env.originalpath = strings.Trim(regexp.MustCompile("originalpath:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalversion:") { env.originalversion = strings.Trim(regexp.MustCompile("originalversion:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"arch:"){ env.arch = strings.Trim(regexp.MustCompile("arch:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"proxy:"){ env.proxy = strings.Trim(regexp.MustCompile("proxy:").ReplaceAllString(line,"")," \r\n") if env.proxy != "none" && env.proxy != "" { if strings.ToLower(env.proxy[0:4]) != "http" { env.proxy = "http://"+env.proxy } web.SetProxy(env.proxy) } } } env.arch = arch.Validate(env.arch) // Make sure the directories exist _, e := os.Stat(env.root) if e != nil { fmt.Println(env.root+" could not be found or does not exist. Exiting.") return } }
src/nvm.go
1
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.9506840705871582, 0.018454767763614655, 0.00016330018115695566, 0.00018187637033406645, 0.12689527869224548 ]
{ "id": 1, "code_window": [ " if success {\n", " fmt.Printf(\"Installing npm v\"+npmv+\"...\")\n", "\n", " // Extract npm to the temp directory\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ " // new temp directory under the nvm root\n", " tempDir := env.root + \"\\\\temp\"\n", "\n" ], "file_path": "src/nvm.go", "type": "add", "edit_start_line_idx": 217 }
package file import( "archive/zip" "bufio" "log" "io" "os" "path/filepath" "strings" ) // Function courtesy http://stackoverflow.com/users/1129149/swtdrgn func Unzip(src, dest string) error { r, err := zip.OpenReader(src) if err != nil { return err } defer r.Close() for _, f := range r.File { rc, err := f.Open() if err != nil { return err } defer rc.Close() fpath := filepath.Join(dest, f.Name) if f.FileInfo().IsDir() { os.MkdirAll(fpath, f.Mode()) } else { var fdir string if lastIndex := strings.LastIndex(fpath,string(os.PathSeparator)); lastIndex > -1 { fdir = fpath[:lastIndex] } err = os.MkdirAll(fdir, f.Mode()) if err != nil { log.Fatal(err) return err } f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return err } defer f.Close() _, err = io.Copy(f, rc) if err != nil { return err } } } return nil } func ReadLines(path string) ([]string, error) { file, err := os.Open(path) if err != nil { return nil, err } defer file.Close() var lines []string scanner := bufio.NewScanner(file) for scanner.Scan() { lines = append(lines, scanner.Text()) } return lines, scanner.Err() } func Exists(filename string) bool { _, err := os.Stat(filename); return err == nil }
src/nvm/file/file.go
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00025880883913487196, 0.00018489404465071857, 0.00016615296772215515, 0.00017179874703288078, 0.000029970968171255663 ]
{ "id": 1, "code_window": [ " if success {\n", " fmt.Printf(\"Installing npm v\"+npmv+\"...\")\n", "\n", " // Extract npm to the temp directory\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ " // new temp directory under the nvm root\n", " tempDir := env.root + \"\\\\temp\"\n", "\n" ], "file_path": "src/nvm.go", "type": "add", "edit_start_line_idx": 217 }
; *** Inno Setup version 5.5.3+ English messages *** ; ; To download user-contributed translations of this file, go to: ; http://www.jrsoftware.org/files/istrans/ ; ; Note: When translating this text, do not add periods (.) to the end of ; messages that didn't have them already, because on those messages Inno ; Setup adds the periods automatically (appending a period would result in ; two periods being displayed). [LangOptions] ; The following three entries are very important. Be sure to read and ; understand the '[LangOptions] section' topic in the help file. LanguageName=English LanguageID=$0409 LanguageCodePage=0 ; If the language you are translating to requires special font faces or ; sizes, uncomment any of the following entries and change them accordingly. ;DialogFontName= ;DialogFontSize=8 ;WelcomeFontName=Verdana ;WelcomeFontSize=12 ;TitleFontName=Arial ;TitleFontSize=29 ;CopyrightFontName=Arial ;CopyrightFontSize=8 [Messages] ; *** Application titles SetupAppTitle=Setup SetupWindowTitle=Setup - %1 UninstallAppTitle=Uninstall UninstallAppFullTitle=%1 Uninstall ; *** Misc. common InformationTitle=Information ConfirmTitle=Confirm ErrorTitle=Error ; *** SetupLdr messages SetupLdrStartupMessage=This will install %1. Do you wish to continue? LdrCannotCreateTemp=Unable to create a temporary file. Setup aborted LdrCannotExecTemp=Unable to execute file in the temporary directory. Setup aborted ; *** Startup error messages LastErrorMessage=%1.%n%nError %2: %3 SetupFileMissing=The file %1 is missing from the installation directory. Please correct the problem or obtain a new copy of the program. SetupFileCorrupt=The setup files are corrupted. Please obtain a new copy of the program. SetupFileCorruptOrWrongVer=The setup files are corrupted, or are incompatible with this version of Setup. Please correct the problem or obtain a new copy of the program. InvalidParameter=An invalid parameter was passed on the command line:%n%n%1 SetupAlreadyRunning=Setup is already running. WindowsVersionNotSupported=This program does not support the version of Windows your computer is running. WindowsServicePackRequired=This program requires %1 Service Pack %2 or later. NotOnThisPlatform=This program will not run on %1. OnlyOnThisPlatform=This program must be run on %1. OnlyOnTheseArchitectures=This program can only be installed on versions of Windows designed for the following processor architectures:%n%n%1 MissingWOW64APIs=The version of Windows you are running does not include functionality required by Setup to perform a 64-bit installation. To correct this problem, please install Service Pack %1. WinVersionTooLowError=This program requires %1 version %2 or later. WinVersionTooHighError=This program cannot be installed on %1 version %2 or later. AdminPrivilegesRequired=You must be logged in as an administrator when installing this program. PowerUserPrivilegesRequired=You must be logged in as an administrator or as a member of the Power Users group when installing this program. SetupAppRunningError=Setup has detected that %1 is currently running.%n%nPlease close all instances of it now, then click OK to continue, or Cancel to exit. UninstallAppRunningError=Uninstall has detected that %1 is currently running.%n%nPlease close all instances of it now, then click OK to continue, or Cancel to exit. ; *** Misc. errors ErrorCreatingDir=Setup was unable to create the directory "%1" ErrorTooManyFilesInDir=Unable to create a file in the directory "%1" because it contains too many files ; *** Setup common messages ExitSetupTitle=Exit Setup ExitSetupMessage=Setup is not complete. If you exit now, the program will not be installed.%n%nYou may run Setup again at another time to complete the installation.%n%nExit Setup? AboutSetupMenuItem=&About Setup... AboutSetupTitle=About Setup AboutSetupMessage=%1 version %2%n%3%n%n%1 home page:%n%4 AboutSetupNote= TranslatorNote= ; *** Buttons ButtonBack=< &Back ButtonNext=&Next > ButtonInstall=&Install ButtonOK=OK ButtonCancel=Cancel ButtonYes=&Yes ButtonYesToAll=Yes to &All ButtonNo=&No ButtonNoToAll=N&o to All ButtonFinish=&Finish ButtonBrowse=&Browse... ButtonWizardBrowse=B&rowse... ButtonNewFolder=&Make New Folder ; *** "Select Language" dialog messages SelectLanguageTitle=Select Setup Language SelectLanguageLabel=Select the language to use during the installation: ; *** Common wizard text ClickNext=Click Next to continue, or Cancel to exit Setup. BeveledLabel= BrowseDialogTitle=Browse For Folder BrowseDialogLabel=Select a folder in the list below, then click OK. NewFolderName=New Folder ; *** "Welcome" wizard page WelcomeLabel1=Welcome to the [name] Setup Wizard WelcomeLabel2=This will install [name/ver] on your computer.%n%nIt is recommended that you close all other applications before continuing. ; *** "Password" wizard page WizardPassword=Password PasswordLabel1=This installation is password protected. PasswordLabel3=Please provide the password, then click Next to continue. Passwords are case-sensitive. PasswordEditLabel=&Password: IncorrectPassword=The password you entered is not correct. Please try again. ; *** "License Agreement" wizard page WizardLicense=License Agreement LicenseLabel=Please read the following important information before continuing. LicenseLabel3=Please read the following License Agreement. You must accept the terms of this agreement before continuing with the installation. LicenseAccepted=I &accept the agreement LicenseNotAccepted=I &do not accept the agreement ; *** "Information" wizard pages WizardInfoBefore=Information InfoBeforeLabel=Please read the following important information before continuing. InfoBeforeClickLabel=When you are ready to continue with Setup, click Next. WizardInfoAfter=Information InfoAfterLabel=Please read the following important information before continuing. InfoAfterClickLabel=When you are ready to continue with Setup, click Next. ; *** "User Information" wizard page WizardUserInfo=User Information UserInfoDesc=Please enter your information. UserInfoName=&User Name: UserInfoOrg=&Organization: UserInfoSerial=&Serial Number: UserInfoNameRequired=You must enter a name. ; *** "Select Destination Location" wizard page WizardSelectDir=Select Destination Location SelectDirDesc=Where should [name] be installed? SelectDirLabel3=Setup will install [name] into the following folder. SelectDirBrowseLabel=To continue, click Next. If you would like to select a different folder, click Browse. DiskSpaceMBLabel=At least [mb] MB of free disk space is required. CannotInstallToNetworkDrive=Setup cannot install to a network drive. CannotInstallToUNCPath=Setup cannot install to a UNC path. InvalidPath=You must enter a full path with drive letter; for example:%n%nC:\APP%n%nor a UNC path in the form:%n%n\\server\share InvalidDrive=The drive or UNC share you selected does not exist or is not accessible. Please select another. DiskSpaceWarningTitle=Not Enough Disk Space DiskSpaceWarning=Setup requires at least %1 KB of free space to install, but the selected drive only has %2 KB available.%n%nDo you want to continue anyway? DirNameTooLong=The folder name or path is too long. InvalidDirName=The folder name is not valid. BadDirName32=Folder names cannot include any of the following characters:%n%n%1 DirExistsTitle=Folder Exists DirExists=The folder:%n%n%1%n%nalready exists. Would you like to install to that folder anyway? DirDoesntExistTitle=Folder Does Not Exist DirDoesntExist=The folder:%n%n%1%n%ndoes not exist. Would you like the folder to be created? ; *** "Select Components" wizard page WizardSelectComponents=Select Components SelectComponentsDesc=Which components should be installed? SelectComponentsLabel2=Select the components you want to install; clear the components you do not want to install. Click Next when you are ready to continue. FullInstallation=Full installation ; if possible don't translate 'Compact' as 'Minimal' (I mean 'Minimal' in your language) CompactInstallation=Compact installation CustomInstallation=Custom installation NoUninstallWarningTitle=Components Exist NoUninstallWarning=Setup has detected that the following components are already installed on your computer:%n%n%1%n%nDeselecting these components will not uninstall them.%n%nWould you like to continue anyway? ComponentSize1=%1 KB ComponentSize2=%1 MB ComponentsDiskSpaceMBLabel=Current selection requires at least [mb] MB of disk space. ; *** "Select Additional Tasks" wizard page WizardSelectTasks=Select Additional Tasks SelectTasksDesc=Which additional tasks should be performed? SelectTasksLabel2=Select the additional tasks you would like Setup to perform while installing [name], then click Next. ; *** "Select Start Menu Folder" wizard page WizardSelectProgramGroup=Select Start Menu Folder SelectStartMenuFolderDesc=Where should Setup place the program's shortcuts? SelectStartMenuFolderLabel3=Setup will create the program's shortcuts in the following Start Menu folder. SelectStartMenuFolderBrowseLabel=To continue, click Next. If you would like to select a different folder, click Browse. MustEnterGroupName=You must enter a folder name. GroupNameTooLong=The folder name or path is too long. InvalidGroupName=The folder name is not valid. BadGroupName=The folder name cannot include any of the following characters:%n%n%1 NoProgramGroupCheck2=&Don't create a Start Menu folder ; *** "Ready to Install" wizard page WizardReady=Ready to Install ReadyLabel1=Setup is now ready to begin installing [name] on your computer. ReadyLabel2a=Click Install to continue with the installation, or click Back if you want to review or change any settings. ReadyLabel2b=Click Install to continue with the installation. ReadyMemoUserInfo=User information: ReadyMemoDir=Destination location: ReadyMemoType=Setup type: ReadyMemoComponents=Selected components: ReadyMemoGroup=Start Menu folder: ReadyMemoTasks=Additional tasks: ; *** "Preparing to Install" wizard page WizardPreparing=Preparing to Install PreparingDesc=Setup is preparing to install [name] on your computer. PreviousInstallNotCompleted=The installation/removal of a previous program was not completed. You will need to restart your computer to complete that installation.%n%nAfter restarting your computer, run Setup again to complete the installation of [name]. CannotContinue=Setup cannot continue. Please click Cancel to exit. ApplicationsFound=The following applications are using files that need to be updated by Setup. It is recommended that you allow Setup to automatically close these applications. ApplicationsFound2=The following applications are using files that need to be updated by Setup. It is recommended that you allow Setup to automatically close these applications. After the installation has completed, Setup will attempt to restart the applications. CloseApplications=&Automatically close the applications DontCloseApplications=&Do not close the applications ErrorCloseApplications=Setup was unable to automatically close all applications. It is recommended that you close all applications using files that need to be updated by Setup before continuing. ; *** "Installing" wizard page WizardInstalling=Installing InstallingLabel=Please wait while Setup installs [name] on your computer. ; *** "Setup Completed" wizard page FinishedHeadingLabel=Completing the [name] Setup Wizard FinishedLabelNoIcons=Setup has finished installing [name] on your computer. FinishedLabel=Setup has finished installing [name] on your computer. The application may be launched by selecting the installed icons. ClickFinish=Click Finish to exit Setup. FinishedRestartLabel=To complete the installation of [name], Setup must restart your computer. Would you like to restart now? FinishedRestartMessage=To complete the installation of [name], Setup must restart your computer.%n%nWould you like to restart now? ShowReadmeCheck=Yes, I would like to view the README file YesRadio=&Yes, restart the computer now NoRadio=&No, I will restart the computer later ; used for example as 'Run MyProg.exe' RunEntryExec=Run %1 ; used for example as 'View Readme.txt' RunEntryShellExec=View %1 ; *** "Setup Needs the Next Disk" stuff ChangeDiskTitle=Setup Needs the Next Disk SelectDiskLabel2=Please insert Disk %1 and click OK.%n%nIf the files on this disk can be found in a folder other than the one displayed below, enter the correct path or click Browse. PathLabel=&Path: FileNotInDir2=The file "%1" could not be located in "%2". Please insert the correct disk or select another folder. SelectDirectoryLabel=Please specify the location of the next disk. ; *** Installation phase messages SetupAborted=Setup was not completed.%n%nPlease correct the problem and run Setup again. EntryAbortRetryIgnore=Click Retry to try again, Ignore to proceed anyway, or Abort to cancel installation. ; *** Installation status messages StatusClosingApplications=Closing applications... StatusCreateDirs=Creating directories... StatusExtractFiles=Extracting files... StatusCreateIcons=Creating shortcuts... StatusCreateIniEntries=Creating INI entries... StatusCreateRegistryEntries=Creating registry entries... StatusRegisterFiles=Registering files... StatusSavingUninstall=Saving uninstall information... StatusRunProgram=Finishing installation... StatusRestartingApplications=Restarting applications... StatusRollback=Rolling back changes... ; *** Misc. errors ErrorInternal2=Internal error: %1 ErrorFunctionFailedNoCode=%1 failed ErrorFunctionFailed=%1 failed; code %2 ErrorFunctionFailedWithMessage=%1 failed; code %2.%n%3 ErrorExecutingProgram=Unable to execute file:%n%1 ; *** Registry errors ErrorRegOpenKey=Error opening registry key:%n%1\%2 ErrorRegCreateKey=Error creating registry key:%n%1\%2 ErrorRegWriteKey=Error writing to registry key:%n%1\%2 ; *** INI errors ErrorIniEntry=Error creating INI entry in file "%1". ; *** File copying errors FileAbortRetryIgnore=Click Retry to try again, Ignore to skip this file (not recommended), or Abort to cancel installation. FileAbortRetryIgnore2=Click Retry to try again, Ignore to proceed anyway (not recommended), or Abort to cancel installation. SourceIsCorrupted=The source file is corrupted SourceDoesntExist=The source file "%1" does not exist ExistingFileReadOnly=The existing file is marked as read-only.%n%nClick Retry to remove the read-only attribute and try again, Ignore to skip this file, or Abort to cancel installation. ErrorReadingExistingDest=An error occurred while trying to read the existing file: FileExists=The file already exists.%n%nWould you like Setup to overwrite it? ExistingFileNewer=The existing file is newer than the one Setup is trying to install. It is recommended that you keep the existing file.%n%nDo you want to keep the existing file? ErrorChangingAttr=An error occurred while trying to change the attributes of the existing file: ErrorCreatingTemp=An error occurred while trying to create a file in the destination directory: ErrorReadingSource=An error occurred while trying to read the source file: ErrorCopying=An error occurred while trying to copy a file: ErrorReplacingExistingFile=An error occurred while trying to replace the existing file: ErrorRestartReplace=RestartReplace failed: ErrorRenamingTemp=An error occurred while trying to rename a file in the destination directory: ErrorRegisterServer=Unable to register the DLL/OCX: %1 ErrorRegSvr32Failed=RegSvr32 failed with exit code %1 ErrorRegisterTypeLib=Unable to register the type library: %1 ; *** Post-installation errors ErrorOpeningReadme=An error occurred while trying to open the README file. ErrorRestartingComputer=Setup was unable to restart the computer. Please do this manually. ; *** Uninstaller messages UninstallNotFound=File "%1" does not exist. Cannot uninstall. UninstallOpenError=File "%1" could not be opened. Cannot uninstall UninstallUnsupportedVer=The uninstall log file "%1" is in a format not recognized by this version of the uninstaller. Cannot uninstall UninstallUnknownEntry=An unknown entry (%1) was encountered in the uninstall log ConfirmUninstall=Are you sure you want to completely remove %1 and all of its components? UninstallOnlyOnWin64=This installation can only be uninstalled on 64-bit Windows. OnlyAdminCanUninstall=This installation can only be uninstalled by a user with administrative privileges. UninstallStatusLabel=Please wait while %1 is removed from your computer. UninstalledAll=%1 was successfully removed from your computer. UninstalledMost=%1 uninstall complete.%n%nSome elements could not be removed. These can be removed manually. UninstalledAndNeedsRestart=To complete the uninstallation of %1, your computer must be restarted.%n%nWould you like to restart now? UninstallDataCorrupted="%1" file is corrupted. Cannot uninstall ; *** Uninstallation phase messages ConfirmDeleteSharedFileTitle=Remove Shared File? ConfirmDeleteSharedFile2=The system indicates that the following shared file is no longer in use by any programs. Would you like for Uninstall to remove this shared file?%n%nIf any programs are still using this file and it is removed, those programs may not function properly. If you are unsure, choose No. Leaving the file on your system will not cause any harm. SharedFileNameLabel=File name: SharedFileLocationLabel=Location: WizardUninstalling=Uninstall Status StatusUninstalling=Uninstalling %1... ; *** Shutdown block reasons ShutdownBlockReasonInstallingApp=Installing %1. ShutdownBlockReasonUninstallingApp=Uninstalling %1. ; The custom messages below aren't used by Setup itself, but if you make ; use of them in your scripts, you'll want to translate them. [CustomMessages] NameAndVersion=%1 version %2 AdditionalIcons=Additional icons: CreateDesktopIcon=Create a &desktop icon CreateQuickLaunchIcon=Create a &Quick Launch icon ProgramOnTheWeb=%1 on the Web UninstallProgram=Uninstall %1 LaunchProgram=Launch %1 AssocFileExtension=&Associate %1 with the %2 file extension AssocingFileExtension=Associating %1 with the %2 file extension... AutoStartProgramGroupDescription=Startup: AutoStartProgram=Automatically start %1 AddonHostProgramNotFound=%1 could not be located in the folder you selected.%n%nDo you want to continue anyway?
buildtools/Default.isl
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0004463444638531655, 0.0002132085501216352, 0.00016493580187670887, 0.00017675114213488996, 0.00007499306957470253 ]
{ "id": 1, "code_window": [ " if success {\n", " fmt.Printf(\"Installing npm v\"+npmv+\"...\")\n", "\n", " // Extract npm to the temp directory\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ " // new temp directory under the nvm root\n", " tempDir := env.root + \"\\\\temp\"\n", "\n" ], "file_path": "src/nvm.go", "type": "add", "edit_start_line_idx": 217 }
Set Shell = CreateObject("Shell.Application") Set WShell = WScript.CreateObject("WScript.Shell") Set ProcEnv = WShell.Environment("PROCESS") cmd = ProcEnv("CMD") app = ProcEnv("APP") args= Right(cmd,(Len(cmd)-Len(app))) If (WScript.Arguments.Count >= 1) Then Shell.ShellExecute app, args, "", "runas", 0 Else WScript.Quit End If
bin/elevate.vbs
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00030356610659509897, 0.0002375894400756806, 0.00017161277355626225, 0.0002375894400756806, 0.00006597666651941836 ]
{ "id": 2, "code_window": [ " // Extract npm to the temp directory\n", " file.Unzip(os.TempDir()+\"\\\\npm-v\"+npmv+\".zip\",os.TempDir()+\"\\\\nvm-npm\")\n", "\n", " // Copy the npm and npm.cmd files to the installation directory\n" ], "labels": [ "keep", "replace", "keep", "keep" ], "after_edit": [ " file.Unzip(tempDir+\"\\\\npm-v\"+npmv+\".zip\",tempDir+\"\\\\nvm-npm\")\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 218 }
package main import ( "fmt" "os" "os/exec" "strings" "io/ioutil" "regexp" "bytes" "encoding/json" "strconv" "./nvm/web" "./nvm/arch" "./nvm/file" "./nvm/node" // "./ansi" ) const ( NvmVersion = "1.0.6" ) type Environment struct { settings string root string symlink string arch string proxy string originalpath string originalversion string } var env = &Environment{ settings: os.Getenv("NVM_HOME")+"\\settings.txt", root: "", symlink: os.Getenv("NVM_SYMLINK"), arch: os.Getenv("PROCESSOR_ARCHITECTURE"), proxy: "none", originalpath: "", originalversion: "", } func main() { args := os.Args detail := "" procarch := arch.Validate(env.arch) Setup() // Capture any additional arguments if len(args) > 2 { detail = strings.ToLower(args[2]) } if len(args) > 3 { procarch = args[3] } if len(args) < 2 { help() return } // Run the appropriate method switch args[1] { case "install": install(detail,procarch) case "uninstall": uninstall(detail) case "use": use(detail,procarch) case "list": list(detail) case "ls": list(detail) case "on": enable() case "off": disable() case "root": if len(args) == 3 { updateRootDir(args[2]) } else { fmt.Println("\nCurrent Root: "+env.root) } case "version": fmt.Println(NvmVersion) case "v": fmt.Println(NvmVersion) case "arch": if strings.Trim(detail," \r\n") != "" { detail = strings.Trim(detail," \r\n") if detail != "32" && detail != "64" { fmt.Println("\""+detail+"\" is an invalid architecture. Use 32 or 64.") return } env.arch = detail saveSettings() fmt.Println("Default architecture set to "+detail+"-bit.") return } _, a := node.GetCurrentVersion() fmt.Println("System Default: "+env.arch+"-bit.") fmt.Println("Currently Configured: "+a+"-bit.") case "proxy": if detail == "" { fmt.Println("Current proxy: "+env.proxy) } else { env.proxy = detail saveSettings() } case "update": update() default: help() } } func update() { // cmd := exec.Command("cmd", "/d", "echo", "testing") // var output bytes.Buffer // var _stderr bytes.Buffer // cmd.Stdout = &output // cmd.Stderr = &_stderr // perr := cmd.Run() // if perr != nil { // fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) // return // } } func CheckVersionExceedsLatest(version string) bool{ content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") latest := reg.ReplaceAllString(re.FindString(content),"") if version <= latest { return false } else { return true } } func install(version string, cpuarch string) { if version == "" { fmt.Println("\nInvalid version.") fmt.Println(" ") help() return } cpuarch = strings.ToLower(cpuarch) if cpuarch != "" { if cpuarch != "32" && cpuarch != "64" && cpuarch != "all" { fmt.Println("\""+cpuarch+"\" is not a valid CPU architecture. Must be 32 or 64.") return } } else { cpuarch = env.arch } if cpuarch != "all" { cpuarch = arch.Validate(cpuarch) } if CheckVersionExceedsLatest(version) { fmt.Println("Node.js v"+version+" is not yet released or available.") return } if cpuarch == "64" && !web.IsNode64bitAvailable(version) { fmt.Println("Node.js v"+version+" is only available in 32-bit.") return } // If user specifies "latest" version, find out what version is if version == "latest" { content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") version = reg.ReplaceAllString(re.FindString(content),"") } // Check to see if the version is already installed if !node.IsVersionInstalled(env.root,version,cpuarch) { if !node.IsVersionAvailable(version){ fmt.Println("Version "+version+" is not available. If you are attempting to download a \"just released\" version,") fmt.Println("it may not be recognized by the nvm service yet (updated hourly). If you feel this is in error and") fmt.Println("you know the version exists, please visit http://github.com/coreybutler/nodedistro and submit a PR.") return } // Make the output directories os.Mkdir(env.root+"\\v"+version,os.ModeDir) os.Mkdir(env.root+"\\v"+version+"\\node_modules",os.ModeDir) // Download node if (cpuarch == "32" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"32") { success := web.GetNodeJS(env.root,version,"32"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 32-bit executable.") return } } if (cpuarch == "64" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"64") { success := web.GetNodeJS(env.root,version,"64"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 64-bit executable.") return } } if file.Exists(env.root+"\\v"+version+"\\node_modules\\npm") { return } // If successful, add npm npmv := getNpmVersion(version) success := web.GetNpm(getNpmVersion(version)) if success { fmt.Printf("Installing npm v"+npmv+"...") // Extract npm to the temp directory file.Unzip(os.TempDir()+"\\npm-v"+npmv+".zip",os.TempDir()+"\\nvm-npm") // Copy the npm and npm.cmd files to the installation directory os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm",env.root+"\\v"+version+"\\npm") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm.cmd",env.root+"\\v"+version+"\\npm.cmd") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv,env.root+"\\v"+version+"\\node_modules\\npm") // Remove the source file os.RemoveAll(os.TempDir()+"\\nvm-npm") fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use "+version) } else { fmt.Println("Could not download npm for node v"+version+".") fmt.Println("Please visit https://github.com/npm/npm/releases/tag/v"+npmv+" to download npm.") fmt.Println("It should be extracted to "+env.root+"\\v"+version) } // If this is ever shipped for Mac, it should use homebrew. // If this ever ships on Linux, it should be on bintray so it can use yum, apt-get, etc. return } else { fmt.Println("Version "+version+" is already installed.") return } } func uninstall(version string) { // Make sure a version is specified if len(version) == 0 { fmt.Println("Provide the version you want to uninstall.") help() return } // Determine if the version exists and skip if it doesn't if node.IsVersionInstalled(env.root,version,"32") || node.IsVersionInstalled(env.root,version,"64") { fmt.Printf("Uninstalling node v"+version+"...") v, _ := node.GetCurrentVersion() if v == version { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() } e := os.RemoveAll(env.root+"\\v"+version) if e != nil { fmt.Println("Error removing node v"+version) fmt.Println("Manually remove "+env.root+"\\v"+version+".") } else { fmt.Printf(" done") } } else { fmt.Println("node v"+version+" is not installed. Type \"nvm list\" to see what is installed.") } return } func use(version string, cpuarch string) { if version == "32" || version == "64" { cpuarch = version v, _ := node.GetCurrentVersion() version = v } cpuarch = arch.Validate(cpuarch) // Make sure the version is installed. If not, warn. if !node.IsVersionInstalled(env.root,version,cpuarch) { fmt.Println("node v"+version+" ("+cpuarch+"-bit) is not installed.") if cpuarch == "32" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } if cpuarch == "64" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } return } // Create or update the symlink sym, _ := os.Stat(env.symlink) if sym != nil { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) var output bytes.Buffer var _stderr bytes.Buffer cmd.Stdout = &output cmd.Stderr = &_stderr perr := cmd.Run() if perr != nil { fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) return } } c := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "mklink", "/D", env.symlink, env.root+"\\v"+version) var out bytes.Buffer var stderr bytes.Buffer c.Stdout = &out c.Stderr = &stderr err := c.Run() if err != nil { fmt.Println(fmt.Sprint(err) + ": " + stderr.String()) return } // Use the assigned CPU architecture cpuarch = arch.Validate(cpuarch) e32 := file.Exists(env.root+"\\v"+version+"\\node32.exe") e64 := file.Exists(env.root+"\\v"+version+"\\node64.exe") used := file.Exists(env.root+"\\v"+version+"\\node.exe") if (e32 || e64) { if used { if e32 { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node64.exe") os.Rename(env.root+"\\v"+version+"\\node32.exe",env.root+"\\v"+version+"\\node.exe") } else { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node32.exe") os.Rename(env.root+"\\v"+version+"\\node64.exe",env.root+"\\v"+version+"\\node.exe") } } else if e32 || e64 { os.Rename(env.root+"\\v"+version+"\\node"+cpuarch+".exe",env.root+"\\v"+version+"\\node.exe") } } fmt.Println("Now using node v"+version+" ("+cpuarch+"-bit)") } func useArchitecture(a string) { if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) { fmt.Println("This computer only supports 32-bit processing.") return } if a == "32" || a == "64" { env.arch = a saveSettings() fmt.Println("Set to "+a+"-bit mode") } else { fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.") } } func list(listtype string) { if listtype == "" { listtype = "installed" } if listtype != "installed" && listtype != "available" { fmt.Println("\nInvalid list option.\n\nPlease use on of the following\n - nvm list\n - nvm list installed\n - nvm list available") help() return } if listtype == "installed" { fmt.Println("") inuse, a := node.GetCurrentVersion() v := node.GetInstalled(env.root) for i := 0; i < len(v); i++ { version := v[i] isnode, _ := regexp.MatchString("v",version) str := "" if isnode { if "v"+inuse == version { str = str+" * " } else { str = str+" " } str = str+regexp.MustCompile("v").ReplaceAllString(version,"") if "v"+inuse == version { str = str+" (Currently using "+a+"-bit executable)" // str = ansi.Color(str,"green:black") } fmt.Printf(str+"\n") } } if len(v) == 0 { fmt.Println("No installations recognized.") } } else { _, stable, unstable := node.GetAvailable() releases := 15 fmt.Println("\nShowing the "+strconv.Itoa(releases)+" latest available releases.\n") fmt.Println(" STABLE | UNSTABLE ") fmt.Println(" ---------------------------") for i := 0; i < releases; i++ { str := "v"+stable[i] for ii := 10-len(str); ii > 0; ii-- { str = " "+str } str = str+" | " str2 := "v"+unstable[i] for ii := 10-len(str2); ii > 0; ii-- { str2 = " "+str2 } fmt.Println(" "+str+str2) } fmt.Println("\nFor a complete list, visit http://coreybutler.github.io/nodedistro") } } func enable() { dir := "" files, _ := ioutil.ReadDir(env.root) for _, f := range files { if f.IsDir() { isnode, _ := regexp.MatchString("v",f.Name()) if isnode { dir = f.Name() } } } fmt.Println("nvm enabled") if dir != "" { use(strings.Trim(regexp.MustCompile("v").ReplaceAllString(dir,"")," \n\r"),env.arch) } else { fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest") } } func disable() { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() fmt.Println("nvm disabled") } func help() { fmt.Println("\nRunning version "+NvmVersion+".") fmt.Println("\nUsage:") fmt.Println(" ") fmt.Println(" nvm arch : Show if node is running in 32 or 64 bit mode.") fmt.Println(" nvm install <version> [arch] : The version can be a node.js version or \"latest\" for the latest stable version.") fmt.Println(" Optionally specify whether to install the 32 or 64 bit version (defaults to system arch).") fmt.Println(" Set [arch] to \"all\" to install 32 AND 64 bit versions.") fmt.Println(" nvm list [available] : List the node.js installations. Type \"available\" at the end to see what can be installed. Aliased as ls.") fmt.Println(" nvm on : Enable node.js version management.") fmt.Println(" nvm off : Disable node.js version management.") fmt.Println(" nvm proxy [url] : Set a proxy to use for downloads. Leave [url] blank to see the current proxy.") fmt.Println(" Set [url] to \"none\" to remove the proxy.") fmt.Println(" nvm uninstall <version> : The version must be a specific version.") // fmt.Println(" nvm update : Automatically update nvm to the latest version.") fmt.Println(" nvm use [version] [arch] : Switch to use the specified version. Optionally specify 32/64bit architecture.") fmt.Println(" nvm use <arch> will continue using the selected version, but switch to 32/64 bit mode.") fmt.Println(" nvm root [path] : Set the directory where nvm should store different versions of node.js.") fmt.Println(" If <path> is not set, the current root will be displayed.") fmt.Println(" nvm version : Displays the current running version of nvm for Windows. Aliased as v.") fmt.Println(" ") } // Given a node.js version, returns the associated npm version func getNpmVersion(nodeversion string) string { // Get raw text text := web.GetRemoteTextFile("https://raw.githubusercontent.com/coreybutler/nodedistro/master/nodeversions.json") // Parse var data interface{} json.Unmarshal([]byte(text), &data); body := data.(map[string]interface{}) all := body["all"] npm := all.(map[string]interface{}) return npm[nodeversion].(string) } func updateRootDir(path string) { _, err := os.Stat(path) if err != nil { fmt.Println(path+" does not exist or could not be found.") return } env.root = path saveSettings() fmt.Println("\nRoot has been set to "+path) } func saveSettings() { content := "root: "+strings.Trim(env.root," \n\r")+"\r\narch: "+strings.Trim(env.arch," \n\r")+"\r\nproxy: "+strings.Trim(env.proxy," \n\r")+"\r\noriginalpath: "+strings.Trim(env.originalpath," \n\r")+"\r\noriginalversion: "+strings.Trim(env.originalversion," \n\r") ioutil.WriteFile(env.settings, []byte(content), 0644) } func Setup() { lines, err := file.ReadLines(env.settings) if err != nil { fmt.Println("\nERROR",err) os.Exit(1) } // Process each line and extract the value for _, line := range lines { if strings.Contains(line,"root:") { env.root = strings.Trim(regexp.MustCompile("root:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalpath:") { env.originalpath = strings.Trim(regexp.MustCompile("originalpath:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalversion:") { env.originalversion = strings.Trim(regexp.MustCompile("originalversion:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"arch:"){ env.arch = strings.Trim(regexp.MustCompile("arch:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"proxy:"){ env.proxy = strings.Trim(regexp.MustCompile("proxy:").ReplaceAllString(line,"")," \r\n") if env.proxy != "none" && env.proxy != "" { if strings.ToLower(env.proxy[0:4]) != "http" { env.proxy = "http://"+env.proxy } web.SetProxy(env.proxy) } } } env.arch = arch.Validate(env.arch) // Make sure the directories exist _, e := os.Stat(env.root) if e != nil { fmt.Println(env.root+" could not be found or does not exist. Exiting.") return } }
src/nvm.go
1
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.9878609776496887, 0.020640695467591286, 0.00016205244173761457, 0.0001737532438710332, 0.1326979398727417 ]
{ "id": 2, "code_window": [ " // Extract npm to the temp directory\n", " file.Unzip(os.TempDir()+\"\\\\npm-v\"+npmv+\".zip\",os.TempDir()+\"\\\\nvm-npm\")\n", "\n", " // Copy the npm and npm.cmd files to the installation directory\n" ], "labels": [ "keep", "replace", "keep", "keep" ], "after_edit": [ " file.Unzip(tempDir+\"\\\\npm-v\"+npmv+\".zip\",tempDir+\"\\\\nvm-npm\")\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 218 }
@echo off SET INNOSETUP=%CD%\nvm.iss SET ORIG=%CD% SET GOPATH=%CD%\src SET GOBIN=%CD%\bin SET GOARCH=386 REM Get the version number from the setup file for /f "tokens=*" %%i in ('findstr /n . %INNOSETUP% ^| findstr ^4:#define') do set L=%%i set version=%L:~24,-1% REM Get the version number from the core executable for /f "tokens=*" %%i in ('findstr /n . %GOPATH%\nvm.go ^| findstr ^NvmVersion^| findstr ^21^') do set L=%%i set goversion=%L:~19,-1% IF NOT %version%==%goversion% GOTO VERSIONMISMATCH SET DIST=%CD%\dist\%version% REM Build the executable echo Building NVM for Windows rm %GOBIN%\nvm.exe cd %GOPATH% goxc -arch="386" -os="windows" -n="nvm" -d="%GOBIN%" -o="%GOBIN%\nvm{{.Ext}}" -tasks-=package cd %ORIG% rm %GOBIN%\src.exe rm %GOPATH%\src.exe rm %GOPATH%\nvm.exe REM Clean the dist directory rm -rf "%DIST%" mkdir "%DIST%" REM Create the "noinstall" zip echo Generating nvm-noinstall.zip for /d %%a in (%GOBIN%) do (buildtools\zip -j -9 -r "%DIST%\nvm-noinstall.zip" "%CD%\LICENSE" "%%a\*" -x "%GOBIN%\nodejs.ico") REM Create the installer echo Generating nvm-setup.zip buildtools\iscc %INNOSETUP% /o%DIST% buildtools\zip -j -9 -r "%DIST%\nvm-setup.zip" "%DIST%\nvm-setup.exe" REM rm "%DIST%\nvm-setup.exe" echo -------------------------- echo Release %version% available in %DIST% GOTO COMPLETE :VERSIONMISMATCH echo The version number in nvm.iss does not match the version in src\nvm.go echo - nvm.iss line #4: %version% echo - nvm.go line #21: %goversion% EXIT /B :COMPLETE @echo on
build.bat
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0003500681195873767, 0.00020212988601997495, 0.00016634794883430004, 0.00017405871767550707, 0.00006633699376834556 ]
{ "id": 2, "code_window": [ " // Extract npm to the temp directory\n", " file.Unzip(os.TempDir()+\"\\\\npm-v\"+npmv+\".zip\",os.TempDir()+\"\\\\nvm-npm\")\n", "\n", " // Copy the npm and npm.cmd files to the installation directory\n" ], "labels": [ "keep", "replace", "keep", "keep" ], "after_edit": [ " file.Unzip(tempDir+\"\\\\npm-v\"+npmv+\".zip\",tempDir+\"\\\\nvm-npm\")\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 218 }
#define MyAppName "NVM for Windows" #define MyAppShortName "nvm" #define MyAppLCShortName "nvm" #define MyAppVersion "1.0.6" #define MyAppPublisher "Ecor Ventures, LLC" #define MyAppURL "http://github.com/coreybutler/nvm" #define MyAppExeName "nvm.exe" #define MyIcon "bin\nodejs.ico" #define ProjectRoot "C:\Users\Corey\Documents\workspace\Applications\nvm" [Setup] ; NOTE: The value of AppId uniquely identifies this application. ; Do not use the same AppId value in installers for other applications. ; (To generate a new GUID, click Tools | Generate GUID inside the IDE.) PrivilegesRequired=admin AppId=40078385-F676-4C61-9A9C-F9028599D6D3 AppName={#MyAppName} AppVersion={#MyAppVersion} AppVerName={#MyAppName} {#MyAppVersion} AppPublisher={#MyAppPublisher} AppPublisherURL={#MyAppURL} AppSupportURL={#MyAppURL} AppUpdatesURL={#MyAppURL} DefaultDirName={userappdata}\{#MyAppShortName} DisableDirPage=no DefaultGroupName={#MyAppName} AllowNoIcons=yes LicenseFile={#ProjectRoot}\LICENSE OutputDir={#ProjectRoot}\dist\{#MyAppVersion} OutputBaseFilename={#MyAppLCShortName}-setup SetupIconFile={#ProjectRoot}\{#MyIcon} Compression=lzma SolidCompression=yes ChangesEnvironment=yes DisableProgramGroupPage=yes ArchitecturesInstallIn64BitMode=x64 ia64 UninstallDisplayIcon={app}\{#MyIcon} AppCopyright=Copyright (C) 2014 Corey Butler. [Languages] Name: "english"; MessagesFile: "compiler:Default.isl" [Tasks] Name: "quicklaunchicon"; Description: "{cm:CreateQuickLaunchIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked; OnlyBelowVersion: 0,6.1 [Files] Source: "{#ProjectRoot}\bin\*"; DestDir: "{app}"; BeforeInstall: PreInstall; Flags: ignoreversion recursesubdirs createallsubdirs; Excludes: "{#ProjectRoot}\bin\install.cmd" [Icons] Name: "{group}\{#MyAppShortName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{#MyIcon}" Name: "{group}\Uninstall {#MyAppShortName}"; Filename: "{uninstallexe}" [Code] var SymlinkPage: TInputDirWizardPage; function IsDirEmpty(dir: string): Boolean; var FindRec: TFindRec; ct: Integer; begin ct := 0; if FindFirst(ExpandConstant(dir + '\*'), FindRec) then try repeat if FindRec.Attributes and FILE_ATTRIBUTE_DIRECTORY = 0 then ct := ct+1; until not FindNext(FindRec); finally FindClose(FindRec); Result := ct = 0; end; end; //function getInstalledVErsions(dir: string): var nodeInUse: string; function TakeControl(np: string; nv: string): string; var path: string; begin // Move the existing node.js installation directory to the nvm root & update the path RenameFile(np,ExpandConstant('{app}')+'\'+nv); RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); StringChangeEx(path,np+'\','',True); StringChangeEx(path,np,'',True); StringChangeEx(path,np+';;',';',True); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); RegQueryStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); StringChangeEx(path,np+'\','',True); StringChangeEx(path,np,'',True); StringChangeEx(path,np+';;',';',True); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); nodeInUse := ExpandConstant('{app}')+'\'+nv; end; function Ansi2String(AString:AnsiString):String; var i : Integer; iChar : Integer; outString : String; begin outString :=''; for i := 1 to Length(AString) do begin iChar := Ord(AString[i]); //get int value outString := outString + Chr(iChar); end; Result := outString; end; procedure PreInstall(); var TmpResultFile, TmpJS, NodeVersion, NodePath: string; stdout: Ansistring; ResultCode: integer; msg1, msg2, msg3, dir1: Boolean; begin // Create a file to check for Node.JS TmpJS := ExpandConstant('{tmp}') + '\nvm_check.js'; SaveStringToFile(TmpJS, 'console.log(require("path").dirname(process.execPath));', False); // Execute the node file and save the output temporarily TmpResultFile := ExpandConstant('{tmp}') + '\nvm_node_check.txt'; Exec(ExpandConstant('{cmd}'), '/C node "'+TmpJS+'" > "' + TmpResultFile + '"', '', SW_HIDE, ewWaitUntilTerminated, ResultCode); DeleteFile(TmpJS) // Process the results LoadStringFromFile(TmpResultFile,stdout); NodePath := Trim(Ansi2String(stdout)); if DirExists(NodePath) then begin Exec(ExpandConstant('{cmd}'), '/C node -v > "' + TmpResultFile + '"', '', SW_HIDE, ewWaitUntilTerminated, ResultCode); LoadStringFromFile(TmpResultFile, stdout); NodeVersion := Trim(Ansi2String(stdout)); msg1 := MsgBox('Node '+NodeVersion+' is already installed. Do you want NVM to control this version?', mbConfirmation, MB_YESNO) = IDNO; if msg1 then begin msg2 := MsgBox('NVM cannot run in parallel with an existing Node.js installation. Node.js must be uninstalled before NVM can be installed, or you must allow NVM to control the existing installation. Do you want NVM to control node '+NodeVersion+'?', mbConfirmation, MB_YESNO) = IDYES; if msg2 then begin TakeControl(NodePath, NodeVersion); end; if not msg2 then begin DeleteFile(TmpResultFile); WizardForm.Close; end; end; if not msg1 then begin TakeControl(NodePath, NodeVersion); end; end; // Make sure the symlink directory doesn't exist if DirExists(SymlinkPage.Values[0]) then begin // If the directory is empty, just delete it since it will be recreated anyway. dir1 := IsDirEmpty(SymlinkPage.Values[0]); if dir1 then begin RemoveDir(SymlinkPage.Values[0]); end; if not dir1 then begin msg3 := MsgBox(SymlinkPage.Values[0]+' will be overwritten and all contents will be lost. Do you want to proceed?', mbConfirmation, MB_OKCANCEL) = IDOK; if msg3 then begin RemoveDir(SymlinkPage.Values[0]); end; if not msg3 then begin //RaiseException('The symlink cannot be created due to a conflict with the existing directory at '+SymlinkPage.Values[0]); WizardForm.Close; end; end; end; end; procedure InitializeWizard; begin SymlinkPage := CreateInputDirPage(wpSelectDir, 'Set Node.js Symlink', 'The active version of Node.js will always be available here.', 'Select the folder in which Setup should create the symlink, then click Next.', False, ''); SymlinkPage.Add('This directory will automatically be added to your system path.'); SymlinkPage.Values[0] := ExpandConstant('{pf}\nodejs'); end; function InitializeUninstall(): Boolean; var path: string; nvm_symlink: string; begin MsgBox('Removing NVM for Windows will remove the nvm command and all versions of node.js, including global npm modules.', mbInformation, MB_OK); // Remove the symlink RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_SYMLINK', nvm_symlink); RemoveDir(nvm_symlink); // Clean the registry RegDeleteValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_HOME') RegDeleteValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_SYMLINK') RegDeleteValue(HKEY_CURRENT_USER, 'Environment', 'NVM_HOME') RegDeleteValue(HKEY_CURRENT_USER, 'Environment', 'NVM_SYMLINK') RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); StringChangeEx(path,'%NVM_HOME%','',True); StringChangeEx(path,'%NVM_SYMLINK%','',True); StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); RegQueryStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); StringChangeEx(path,'%NVM_HOME%','',True); StringChangeEx(path,'%NVM_SYMLINK%','',True); StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); Result := True; end; // Generate the settings file based on user input & update registry procedure CurStepChanged(CurStep: TSetupStep); var path: string; begin if CurStep = ssPostInstall then begin SaveStringToFile(ExpandConstant('{app}\settings.txt'), 'root: ' + ExpandConstant('{app}') + #13#10 + 'path: ' + SymlinkPage.Values[0] + #13#10, False); // Add Registry settings RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_HOME', ExpandConstant('{app}')); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_SYMLINK', SymlinkPage.Values[0]); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'NVM_HOME', ExpandConstant('{app}')); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'NVM_SYMLINK', SymlinkPage.Values[0]); // Update system and user PATH if needed RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); if Pos('%NVM_HOME%',path) = 0 then begin path := path+';%NVM_HOME%'; StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); end; if Pos('%NVM_SYMLINK%',path) = 0 then begin path := path+';%NVM_SYMLINK%'; StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); end; RegQueryStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); if Pos('%NVM_HOME%',path) = 0 then begin path := path+';%NVM_HOME%'; StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); end; if Pos('%NVM_SYMLINK%',path) = 0 then begin path := path+';%NVM_SYMLINK%'; StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); end; end; end; function getSymLink(o: string): string; begin Result := SymlinkPage.Values[0]; end; function getCurrentVersion(o: string): string; begin Result := nodeInUse; end; function isNodeAlreadyInUse(): boolean; begin Result := Length(nodeInUse) > 0; end; [Run] Filename: "{cmd}"; Parameters: "/C ""mklink /D ""{code:getSymLink}"" ""{code:getCurrentVersion}"""" "; Check: isNodeAlreadyInUse; Flags: runhidden; Filename: "{cmd}"; Parameters: "/K ""set PATH={app};%PATH% && cls && nvm"""; Flags: runasoriginaluser postinstall; [UninstallDelete] Type: files; Name: "{app}\nvm.exe"; Type: files; Name: "{app}\elevate.cmd"; Type: files; Name: "{app}\elevate.vbs"; Type: files; Name: "{app}\nodejs.ico"; Type: files; Name: "{app}\settings.txt"; Type: filesandordirs; Name: "{app}";
nvm.iss
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0004624893481377512, 0.00020241716993041337, 0.00016372723621316254, 0.0001705652684904635, 0.00007008604006841779 ]
{ "id": 2, "code_window": [ " // Extract npm to the temp directory\n", " file.Unzip(os.TempDir()+\"\\\\npm-v\"+npmv+\".zip\",os.TempDir()+\"\\\\nvm-npm\")\n", "\n", " // Copy the npm and npm.cmd files to the installation directory\n" ], "labels": [ "keep", "replace", "keep", "keep" ], "after_edit": [ " file.Unzip(tempDir+\"\\\\npm-v\"+npmv+\".zip\",tempDir+\"\\\\nvm-npm\")\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 218 }
package file import( "archive/zip" "bufio" "log" "io" "os" "path/filepath" "strings" ) // Function courtesy http://stackoverflow.com/users/1129149/swtdrgn func Unzip(src, dest string) error { r, err := zip.OpenReader(src) if err != nil { return err } defer r.Close() for _, f := range r.File { rc, err := f.Open() if err != nil { return err } defer rc.Close() fpath := filepath.Join(dest, f.Name) if f.FileInfo().IsDir() { os.MkdirAll(fpath, f.Mode()) } else { var fdir string if lastIndex := strings.LastIndex(fpath,string(os.PathSeparator)); lastIndex > -1 { fdir = fpath[:lastIndex] } err = os.MkdirAll(fdir, f.Mode()) if err != nil { log.Fatal(err) return err } f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return err } defer f.Close() _, err = io.Copy(f, rc) if err != nil { return err } } } return nil } func ReadLines(path string) ([]string, error) { file, err := os.Open(path) if err != nil { return nil, err } defer file.Close() var lines []string scanner := bufio.NewScanner(file) for scanner.Scan() { lines = append(lines, scanner.Text()) } return lines, scanner.Err() } func Exists(filename string) bool { _, err := os.Stat(filename); return err == nil }
src/nvm/file/file.go
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0024051708169281483, 0.0005967700853943825, 0.00016670618788339198, 0.00021241312788333744, 0.0007507995469495654 ]
{ "id": 3, "code_window": [ "\n", " // Copy the npm and npm.cmd files to the installation directory\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm\",env.root+\"\\\\v\"+version+\"\\\\npm\")\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm.cmd\",env.root+\"\\\\v\"+version+\"\\\\npm.cmd\")\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv,env.root+\"\\\\v\"+version+\"\\\\node_modules\\\\npm\")\n", "\n" ], "labels": [ "keep", "keep", "replace", "replace", "replace", "keep" ], "after_edit": [ " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm\",env.root+\"\\\\v\"+version+\"\\\\npm\")\n", " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm.cmd\",env.root+\"\\\\v\"+version+\"\\\\npm.cmd\")\n", " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv,env.root+\"\\\\v\"+version+\"\\\\node_modules\\\\npm\")\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 221 }
package web import( "fmt" "net/http" "net/url" "os" "io" "io/ioutil" "strings" "strconv" "../arch" ) var client = &http.Client{} func SetProxy(p string){ if p != "" && p != "none" { proxyUrl, _ := url.Parse(p) client = &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(proxyUrl)}} } else { client = &http.Client{} } } func Download(url string, target string) bool { output, err := os.Create(target) if err != nil { fmt.Println("Error while creating", target, "-", err) } defer output.Close() response, err := client.Get(url) if err != nil { fmt.Println("Error while downloading", url, "-", err) } defer response.Body.Close() _, err = io.Copy(output, response.Body) if err != nil { fmt.Println("Error while downloading", url, "-", err) } if response.Status[0:3] != "200" { fmt.Println("Download failed. Rolling Back.") err := os.Remove(target) if err != nil { fmt.Println("Rollback failed.",err) } return false } return true } func GetNodeJS(root string, v string, a string) bool { a = arch.Validate(a) url := "" if a == "32" { url = "http://nodejs.org/dist/v"+v+"/node.exe" } else { if !IsNode64bitAvailable(v) { fmt.Println("Node.js v"+v+" is only available in 32-bit.") return false } url = "http://nodejs.org/dist/v"+v+"/x64/node.exe" } fileName := root+"\\v"+v+"\\node"+a+".exe" fmt.Printf("Downloading node.js version "+v+" ("+a+"-bit)... ") if Download(url,fileName) { fmt.Printf("Complete\n") return true } else { return false } } func GetNpm(v string) bool { url := "https://github.com/npm/npm/archive/v"+v+".zip" fileName := os.TempDir()+"\\"+"npm-v"+v+".zip" fmt.Printf("Downloading npm version "+v+"... ") if Download(url,fileName) { fmt.Printf("Complete\n") return true } else { return false } } func GetRemoteTextFile(url string) string { response, httperr := client.Get(url) if httperr != nil { fmt.Println("\nCould not retrieve "+url+".\n\n") fmt.Printf("%s", httperr) os.Exit(1) } else { defer response.Body.Close() contents, readerr := ioutil.ReadAll(response.Body) if readerr != nil { fmt.Printf("%s", readerr) os.Exit(1) } return string(contents) } os.Exit(1) return "" } func IsNode64bitAvailable(v string) bool { if v == "latest" { return true } // Anything below version 8 doesn't have a 64 bit version vers := strings.Fields(strings.Replace(v,"."," ",-1)) main, _ := strconv.ParseInt(vers[0],0,0) minor, _ := strconv.ParseInt(vers[1],0,0) if main == 0 && minor < 8 { return false } // Check online to see if a 64 bit version exists res, err := client.Head("http://nodejs.org/dist/v"+v+"/x64/node.exe") if err != nil { return false } return res.StatusCode == 200 }
src/nvm/web/web.go
1
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.004262424074113369, 0.0004958416684530675, 0.00016570521984249353, 0.00017067568842321634, 0.0010481624631211162 ]
{ "id": 3, "code_window": [ "\n", " // Copy the npm and npm.cmd files to the installation directory\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm\",env.root+\"\\\\v\"+version+\"\\\\npm\")\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm.cmd\",env.root+\"\\\\v\"+version+\"\\\\npm.cmd\")\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv,env.root+\"\\\\v\"+version+\"\\\\node_modules\\\\npm\")\n", "\n" ], "labels": [ "keep", "keep", "replace", "replace", "replace", "keep" ], "after_edit": [ " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm\",env.root+\"\\\\v\"+version+\"\\\\npm\")\n", " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm.cmd\",env.root+\"\\\\v\"+version+\"\\\\npm.cmd\")\n", " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv,env.root+\"\\\\v\"+version+\"\\\\node_modules\\\\npm\")\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 221 }
[![Tweet This!][1.1] Tweet This!][1] [1.1]: http://i.imgur.com/wWzX9uB.png (Tweet about NVM for Windows) [1]: https://twitter.com/intent/tweet?hashtags=nodejs&original_referer=http%3A%2F%2F127.0.0.1%3A91%2F&text=Check%20out%20NVM%20for%20Windows!&tw_p=tweetbutton&url=http%3A%2F%2Fgithub.com%2Fcoreybutler%2Fnvm-windows&via=goldglovecb # Node Version Manager (nvm) for Windows [![Gitter](https://badges.gitter.im/Join Chat.svg)](https://gitter.im/coreybutler/nvm-windows?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) Manage multiple installations of node.js on a Windows computer. **tl;dr** [nvm](https://github.com/creationix/nvm), but for Windows, with an installer. [Download Now](https://github.com/coreybutler/nvm/releases)! (No io.js support, _yet_... see Gitter for details) ![NVM for Windows](http://coreybutler.github.io/nvm-windows/images/installlatest.jpg) There are situations where the ability to switch between different versions of Node.js can be very useful. For example, if you want to test a module you're developing with the latest bleeding edge version without uninstalling the stable version of node, this utility can help. ![Switch between stable and unstable versions.](http://coreybutler.github.io/nvm-windows/images/use.jpg) ### Installation & Upgrades It comes with an installer (and uninstaller), because getting it should be easy. Please note, you need to uninstall any existing versions of node.js before installing NVM for Windows. [Download the latest installer from the releases](https://github.com/coreybutler/nvm/releases). ![NVM for Windows Installer](http://coreybutler.github.io/nvm-windows/images/installer.jpg) **To upgrade**, run the new installer. It will safely overwrite the files it needs to update without touching your node.js installations. Make sure you use the same installation and symlink folder. If you originally installed to the default locations, you just need to click "next" on each window until it finishes. ### Usage NVM for Windows is a command line tool. Simply type `nvm` in the console for help. The basic commands are: - `nvm arch [32|64]`: Show if node is running in 32 or 64 bit mode. Specify 32 or 64 to override the default architecture. - `nvm install <version> [arch]`: The version can be a node.js version or "latest" for the latest stable version. Optionally specify whether to install the 32 or 64 bit version (defaults to system arch). Set `[arch]` to "all" to install 32 AND 64 bit versions. - `nvm list [available]`: List the node.js installations. Type `available` at the end to show a list of versions available for download. - `nvm on`: Enable node.js version management. - `nvm off`: Disable node.js version management (does not uninstall anything). - `nvm proxy [url]`: Set a proxy to use for downloads. Leave `[url]` blank to see the current proxy. Set `[url]` to "none" to remove the proxy. - `nvm uninstall <version>`: Uninstall a specific version. - `nvm use <version> [arch]`: Switch to use the specified version. Optionally specify 32/64bit architecture. `nvm use <arch>` will continue using the selected version, but switch to 32/64 bit mode based on the value supplied to `<arch>`. - `nvm root <path>`: Set the directory where nvm should store different versions of node.js. If `<path>` is not set, the current root will be displayed. - `nvm version`: Displays the current running version of NVM for Windows. ### Gotcha! Please note that any global npm modules you may have installed are **not** shared between the various versions of node.js you have installed. Additionally, some npm modules may not be supported in the version of node you're using, so be aware of your environment as you work. --- ## Why another version manager? There are several version managers for node.js. Tools like [nvm](https://github.com/creationix/nvm) and [n](https://github.com/visionmedia/n) only run on Mac OSX and Linux. Windows users are left in the cold? No. [nvmw](https://github.com/hakobera/nvmw) and [nodist](https://github.com/marcelklehr/nodist) are both designed for Windows. So, why another version manager for Windows? The architecture of most node version managers for Windows rely on `.bat` files, which do some clever tricks to set or mimic environment variables. Some of them use node itself (once it's downloaded), which is admirable, but prone to problems. Right around node 0.10.30, the installation structure changed a little, causing some of these to just stop working with anything new. Additionally, some users struggle to install these modules since it requires a little more knowledge of node's installation structure. I believe if it were easier for people to switch between versions, people might take the time to test their code on back and future versions... which is just good practice. ## What's the big difference? First and foremost, this version of nvm has no dependency on node. It's written in [Go](http://golang.org/), which is a much more structured approach than hacking around a limited `.bat` file. It does not rely on having an existing node installation. Plus, should the need arise, Go offers potential for creating a Mac/Linux version on the same code base with a substanially easier migration path than converting a bunch of batch to shell logic. `bat > sh, it crazy, right?` The control mechanism is also quite different. There are two general ways to support multiple node installations with hot switching capabilities. The first is to modify the system `PATH` any time you switch versions, or bypass it by using a `.bat` file to mimic the node executable and redirect accordingly. This always seemed a little hackish to me, and there are some quirks as a result of this implementation. The second option is to use a symlink. This concept requires putting the symlink in the system `PATH`, then updating its target to the node installation directory you want to use. This is a straightforward approach, and seems to be what people recommend.... until they realize just how much of a pain symlinks are on Windows. This is why it hasn't happened before. In order to create/modify a symlink, you must be running as an admin, and you must get around Windows UAC (that annoying prompt). Luckily, this is a challenge I already solved with some helper scripts in [node-windows](http://github.com/coreybutler/node-windows). As a result, NVM for Windows maintains a single symlink that is put in the system `PATH` during installation only. Switching to different versions of node is a matter of switching the symlink target. As a result, this utility does **not** require you to run `nvm use x.x.x` every time you open a console window. When you _do_ run `nvm use x.x.x`, the active version of node is automatically updated across all open console windows. It also persists between system reboots, so you only need to use nvm when you want to make a change. NVM for Windows comes with an installer, courtesy of a byproduct of my work on [Fenix Web Server](http://fenixwebserver.com). Overall, this project brings together some ideas, a few battle-hardened pieces of other modules, and support for newer versions of node. I also wrote a simple [data feed](http://github.com/coreybutler/nodedistro) containing a list of node.js versions and their associated npm version. This is how NVM for Windows recognizes the "latest" stable version. It's free for anyone to use. ## Motivation I needed it, plain and simple. Additionally, it's apparent that [support for multiple versions](https://github.com/joyent/node/issues/8075) is not coming to node core, or even something they care about. It was also an excuse to play with Go. ## License MIT. ## Thanks Thanks to everyone who has submitted issues on and off Github, made suggestions, and generally helped make this a better project. Special thanks to [@vkbansal](https://github.com/vkbansal), who has actively provided feedback throughout the releases. ## Alternatives - [nvmw](https://github.com/hakobera/nvmw) - Windows Only - [nodist](https://github.com/marcelklehr/nodist) - Windows Only - [nvm](https://github.com/creationix/nvm) - Mac/Linux Only - [n](https://github.com/visionmedia/n) - Mac/Linux Only
README.md
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.004345795139670372, 0.000756499997805804, 0.00016045308439061046, 0.0002425366546958685, 0.0011581673752516508 ]
{ "id": 3, "code_window": [ "\n", " // Copy the npm and npm.cmd files to the installation directory\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm\",env.root+\"\\\\v\"+version+\"\\\\npm\")\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm.cmd\",env.root+\"\\\\v\"+version+\"\\\\npm.cmd\")\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv,env.root+\"\\\\v\"+version+\"\\\\node_modules\\\\npm\")\n", "\n" ], "labels": [ "keep", "keep", "replace", "replace", "replace", "keep" ], "after_edit": [ " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm\",env.root+\"\\\\v\"+version+\"\\\\npm\")\n", " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm.cmd\",env.root+\"\\\\v\"+version+\"\\\\npm.cmd\")\n", " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv,env.root+\"\\\\v\"+version+\"\\\\node_modules\\\\npm\")\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 221 }
# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof dist src/v* bin/*.exe !bin/buildtools/* bin/*.zip bin/nvm/*
.gitignore
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00032058879151009023, 0.00020483956905081868, 0.00015844935842324048, 0.0001701600558590144, 0.00006702787504764274 ]
{ "id": 3, "code_window": [ "\n", " // Copy the npm and npm.cmd files to the installation directory\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm\",env.root+\"\\\\v\"+version+\"\\\\npm\")\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm.cmd\",env.root+\"\\\\v\"+version+\"\\\\npm.cmd\")\n", " os.Rename(os.TempDir()+\"\\\\nvm-npm\\\\npm-\"+npmv,env.root+\"\\\\v\"+version+\"\\\\node_modules\\\\npm\")\n", "\n" ], "labels": [ "keep", "keep", "replace", "replace", "replace", "keep" ], "after_edit": [ " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm\",env.root+\"\\\\v\"+version+\"\\\\npm\")\n", " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv+\"\\\\bin\\\\npm.cmd\",env.root+\"\\\\v\"+version+\"\\\\npm.cmd\")\n", " os.Rename(tempDir+\"\\\\nvm-npm\\\\npm-\"+npmv,env.root+\"\\\\v\"+version+\"\\\\node_modules\\\\npm\")\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 221 }
Set Shell = CreateObject("Shell.Application") Set WShell = WScript.CreateObject("WScript.Shell") Set ProcEnv = WShell.Environment("PROCESS") cmd = ProcEnv("CMD") app = ProcEnv("APP") args= Right(cmd,(Len(cmd)-Len(app))) If (WScript.Arguments.Count >= 1) Then Shell.ShellExecute app, args, "", "runas", 0 Else WScript.Quit End If
bin/elevate.vbs
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0001715836551738903, 0.0001683651644270867, 0.00016514667368028313, 0.0001683651644270867, 0.0000032184907468035817 ]
{ "id": 4, "code_window": [ "\n", " // Remove the source file\n", " os.RemoveAll(os.TempDir()+\"\\\\nvm-npm\")\n", "\n", " fmt.Println(\"\\n\\nInstallation complete. If you want to use this version, type\\n\\nnvm use \"+version)\n", " } else {\n" ], "labels": [ "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ " // Remove the temp directory\n", " // may consider keep the temp files here\n", " os.RemoveAll(tempDir)\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 225 }
package main import ( "fmt" "os" "os/exec" "strings" "io/ioutil" "regexp" "bytes" "encoding/json" "strconv" "./nvm/web" "./nvm/arch" "./nvm/file" "./nvm/node" // "./ansi" ) const ( NvmVersion = "1.0.6" ) type Environment struct { settings string root string symlink string arch string proxy string originalpath string originalversion string } var env = &Environment{ settings: os.Getenv("NVM_HOME")+"\\settings.txt", root: "", symlink: os.Getenv("NVM_SYMLINK"), arch: os.Getenv("PROCESSOR_ARCHITECTURE"), proxy: "none", originalpath: "", originalversion: "", } func main() { args := os.Args detail := "" procarch := arch.Validate(env.arch) Setup() // Capture any additional arguments if len(args) > 2 { detail = strings.ToLower(args[2]) } if len(args) > 3 { procarch = args[3] } if len(args) < 2 { help() return } // Run the appropriate method switch args[1] { case "install": install(detail,procarch) case "uninstall": uninstall(detail) case "use": use(detail,procarch) case "list": list(detail) case "ls": list(detail) case "on": enable() case "off": disable() case "root": if len(args) == 3 { updateRootDir(args[2]) } else { fmt.Println("\nCurrent Root: "+env.root) } case "version": fmt.Println(NvmVersion) case "v": fmt.Println(NvmVersion) case "arch": if strings.Trim(detail," \r\n") != "" { detail = strings.Trim(detail," \r\n") if detail != "32" && detail != "64" { fmt.Println("\""+detail+"\" is an invalid architecture. Use 32 or 64.") return } env.arch = detail saveSettings() fmt.Println("Default architecture set to "+detail+"-bit.") return } _, a := node.GetCurrentVersion() fmt.Println("System Default: "+env.arch+"-bit.") fmt.Println("Currently Configured: "+a+"-bit.") case "proxy": if detail == "" { fmt.Println("Current proxy: "+env.proxy) } else { env.proxy = detail saveSettings() } case "update": update() default: help() } } func update() { // cmd := exec.Command("cmd", "/d", "echo", "testing") // var output bytes.Buffer // var _stderr bytes.Buffer // cmd.Stdout = &output // cmd.Stderr = &_stderr // perr := cmd.Run() // if perr != nil { // fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) // return // } } func CheckVersionExceedsLatest(version string) bool{ content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") latest := reg.ReplaceAllString(re.FindString(content),"") if version <= latest { return false } else { return true } } func install(version string, cpuarch string) { if version == "" { fmt.Println("\nInvalid version.") fmt.Println(" ") help() return } cpuarch = strings.ToLower(cpuarch) if cpuarch != "" { if cpuarch != "32" && cpuarch != "64" && cpuarch != "all" { fmt.Println("\""+cpuarch+"\" is not a valid CPU architecture. Must be 32 or 64.") return } } else { cpuarch = env.arch } if cpuarch != "all" { cpuarch = arch.Validate(cpuarch) } if CheckVersionExceedsLatest(version) { fmt.Println("Node.js v"+version+" is not yet released or available.") return } if cpuarch == "64" && !web.IsNode64bitAvailable(version) { fmt.Println("Node.js v"+version+" is only available in 32-bit.") return } // If user specifies "latest" version, find out what version is if version == "latest" { content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") version = reg.ReplaceAllString(re.FindString(content),"") } // Check to see if the version is already installed if !node.IsVersionInstalled(env.root,version,cpuarch) { if !node.IsVersionAvailable(version){ fmt.Println("Version "+version+" is not available. If you are attempting to download a \"just released\" version,") fmt.Println("it may not be recognized by the nvm service yet (updated hourly). If you feel this is in error and") fmt.Println("you know the version exists, please visit http://github.com/coreybutler/nodedistro and submit a PR.") return } // Make the output directories os.Mkdir(env.root+"\\v"+version,os.ModeDir) os.Mkdir(env.root+"\\v"+version+"\\node_modules",os.ModeDir) // Download node if (cpuarch == "32" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"32") { success := web.GetNodeJS(env.root,version,"32"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 32-bit executable.") return } } if (cpuarch == "64" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"64") { success := web.GetNodeJS(env.root,version,"64"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 64-bit executable.") return } } if file.Exists(env.root+"\\v"+version+"\\node_modules\\npm") { return } // If successful, add npm npmv := getNpmVersion(version) success := web.GetNpm(getNpmVersion(version)) if success { fmt.Printf("Installing npm v"+npmv+"...") // Extract npm to the temp directory file.Unzip(os.TempDir()+"\\npm-v"+npmv+".zip",os.TempDir()+"\\nvm-npm") // Copy the npm and npm.cmd files to the installation directory os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm",env.root+"\\v"+version+"\\npm") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm.cmd",env.root+"\\v"+version+"\\npm.cmd") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv,env.root+"\\v"+version+"\\node_modules\\npm") // Remove the source file os.RemoveAll(os.TempDir()+"\\nvm-npm") fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use "+version) } else { fmt.Println("Could not download npm for node v"+version+".") fmt.Println("Please visit https://github.com/npm/npm/releases/tag/v"+npmv+" to download npm.") fmt.Println("It should be extracted to "+env.root+"\\v"+version) } // If this is ever shipped for Mac, it should use homebrew. // If this ever ships on Linux, it should be on bintray so it can use yum, apt-get, etc. return } else { fmt.Println("Version "+version+" is already installed.") return } } func uninstall(version string) { // Make sure a version is specified if len(version) == 0 { fmt.Println("Provide the version you want to uninstall.") help() return } // Determine if the version exists and skip if it doesn't if node.IsVersionInstalled(env.root,version,"32") || node.IsVersionInstalled(env.root,version,"64") { fmt.Printf("Uninstalling node v"+version+"...") v, _ := node.GetCurrentVersion() if v == version { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() } e := os.RemoveAll(env.root+"\\v"+version) if e != nil { fmt.Println("Error removing node v"+version) fmt.Println("Manually remove "+env.root+"\\v"+version+".") } else { fmt.Printf(" done") } } else { fmt.Println("node v"+version+" is not installed. Type \"nvm list\" to see what is installed.") } return } func use(version string, cpuarch string) { if version == "32" || version == "64" { cpuarch = version v, _ := node.GetCurrentVersion() version = v } cpuarch = arch.Validate(cpuarch) // Make sure the version is installed. If not, warn. if !node.IsVersionInstalled(env.root,version,cpuarch) { fmt.Println("node v"+version+" ("+cpuarch+"-bit) is not installed.") if cpuarch == "32" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } if cpuarch == "64" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } return } // Create or update the symlink sym, _ := os.Stat(env.symlink) if sym != nil { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) var output bytes.Buffer var _stderr bytes.Buffer cmd.Stdout = &output cmd.Stderr = &_stderr perr := cmd.Run() if perr != nil { fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) return } } c := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "mklink", "/D", env.symlink, env.root+"\\v"+version) var out bytes.Buffer var stderr bytes.Buffer c.Stdout = &out c.Stderr = &stderr err := c.Run() if err != nil { fmt.Println(fmt.Sprint(err) + ": " + stderr.String()) return } // Use the assigned CPU architecture cpuarch = arch.Validate(cpuarch) e32 := file.Exists(env.root+"\\v"+version+"\\node32.exe") e64 := file.Exists(env.root+"\\v"+version+"\\node64.exe") used := file.Exists(env.root+"\\v"+version+"\\node.exe") if (e32 || e64) { if used { if e32 { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node64.exe") os.Rename(env.root+"\\v"+version+"\\node32.exe",env.root+"\\v"+version+"\\node.exe") } else { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node32.exe") os.Rename(env.root+"\\v"+version+"\\node64.exe",env.root+"\\v"+version+"\\node.exe") } } else if e32 || e64 { os.Rename(env.root+"\\v"+version+"\\node"+cpuarch+".exe",env.root+"\\v"+version+"\\node.exe") } } fmt.Println("Now using node v"+version+" ("+cpuarch+"-bit)") } func useArchitecture(a string) { if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) { fmt.Println("This computer only supports 32-bit processing.") return } if a == "32" || a == "64" { env.arch = a saveSettings() fmt.Println("Set to "+a+"-bit mode") } else { fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.") } } func list(listtype string) { if listtype == "" { listtype = "installed" } if listtype != "installed" && listtype != "available" { fmt.Println("\nInvalid list option.\n\nPlease use on of the following\n - nvm list\n - nvm list installed\n - nvm list available") help() return } if listtype == "installed" { fmt.Println("") inuse, a := node.GetCurrentVersion() v := node.GetInstalled(env.root) for i := 0; i < len(v); i++ { version := v[i] isnode, _ := regexp.MatchString("v",version) str := "" if isnode { if "v"+inuse == version { str = str+" * " } else { str = str+" " } str = str+regexp.MustCompile("v").ReplaceAllString(version,"") if "v"+inuse == version { str = str+" (Currently using "+a+"-bit executable)" // str = ansi.Color(str,"green:black") } fmt.Printf(str+"\n") } } if len(v) == 0 { fmt.Println("No installations recognized.") } } else { _, stable, unstable := node.GetAvailable() releases := 15 fmt.Println("\nShowing the "+strconv.Itoa(releases)+" latest available releases.\n") fmt.Println(" STABLE | UNSTABLE ") fmt.Println(" ---------------------------") for i := 0; i < releases; i++ { str := "v"+stable[i] for ii := 10-len(str); ii > 0; ii-- { str = " "+str } str = str+" | " str2 := "v"+unstable[i] for ii := 10-len(str2); ii > 0; ii-- { str2 = " "+str2 } fmt.Println(" "+str+str2) } fmt.Println("\nFor a complete list, visit http://coreybutler.github.io/nodedistro") } } func enable() { dir := "" files, _ := ioutil.ReadDir(env.root) for _, f := range files { if f.IsDir() { isnode, _ := regexp.MatchString("v",f.Name()) if isnode { dir = f.Name() } } } fmt.Println("nvm enabled") if dir != "" { use(strings.Trim(regexp.MustCompile("v").ReplaceAllString(dir,"")," \n\r"),env.arch) } else { fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest") } } func disable() { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() fmt.Println("nvm disabled") } func help() { fmt.Println("\nRunning version "+NvmVersion+".") fmt.Println("\nUsage:") fmt.Println(" ") fmt.Println(" nvm arch : Show if node is running in 32 or 64 bit mode.") fmt.Println(" nvm install <version> [arch] : The version can be a node.js version or \"latest\" for the latest stable version.") fmt.Println(" Optionally specify whether to install the 32 or 64 bit version (defaults to system arch).") fmt.Println(" Set [arch] to \"all\" to install 32 AND 64 bit versions.") fmt.Println(" nvm list [available] : List the node.js installations. Type \"available\" at the end to see what can be installed. Aliased as ls.") fmt.Println(" nvm on : Enable node.js version management.") fmt.Println(" nvm off : Disable node.js version management.") fmt.Println(" nvm proxy [url] : Set a proxy to use for downloads. Leave [url] blank to see the current proxy.") fmt.Println(" Set [url] to \"none\" to remove the proxy.") fmt.Println(" nvm uninstall <version> : The version must be a specific version.") // fmt.Println(" nvm update : Automatically update nvm to the latest version.") fmt.Println(" nvm use [version] [arch] : Switch to use the specified version. Optionally specify 32/64bit architecture.") fmt.Println(" nvm use <arch> will continue using the selected version, but switch to 32/64 bit mode.") fmt.Println(" nvm root [path] : Set the directory where nvm should store different versions of node.js.") fmt.Println(" If <path> is not set, the current root will be displayed.") fmt.Println(" nvm version : Displays the current running version of nvm for Windows. Aliased as v.") fmt.Println(" ") } // Given a node.js version, returns the associated npm version func getNpmVersion(nodeversion string) string { // Get raw text text := web.GetRemoteTextFile("https://raw.githubusercontent.com/coreybutler/nodedistro/master/nodeversions.json") // Parse var data interface{} json.Unmarshal([]byte(text), &data); body := data.(map[string]interface{}) all := body["all"] npm := all.(map[string]interface{}) return npm[nodeversion].(string) } func updateRootDir(path string) { _, err := os.Stat(path) if err != nil { fmt.Println(path+" does not exist or could not be found.") return } env.root = path saveSettings() fmt.Println("\nRoot has been set to "+path) } func saveSettings() { content := "root: "+strings.Trim(env.root," \n\r")+"\r\narch: "+strings.Trim(env.arch," \n\r")+"\r\nproxy: "+strings.Trim(env.proxy," \n\r")+"\r\noriginalpath: "+strings.Trim(env.originalpath," \n\r")+"\r\noriginalversion: "+strings.Trim(env.originalversion," \n\r") ioutil.WriteFile(env.settings, []byte(content), 0644) } func Setup() { lines, err := file.ReadLines(env.settings) if err != nil { fmt.Println("\nERROR",err) os.Exit(1) } // Process each line and extract the value for _, line := range lines { if strings.Contains(line,"root:") { env.root = strings.Trim(regexp.MustCompile("root:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalpath:") { env.originalpath = strings.Trim(regexp.MustCompile("originalpath:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalversion:") { env.originalversion = strings.Trim(regexp.MustCompile("originalversion:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"arch:"){ env.arch = strings.Trim(regexp.MustCompile("arch:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"proxy:"){ env.proxy = strings.Trim(regexp.MustCompile("proxy:").ReplaceAllString(line,"")," \r\n") if env.proxy != "none" && env.proxy != "" { if strings.ToLower(env.proxy[0:4]) != "http" { env.proxy = "http://"+env.proxy } web.SetProxy(env.proxy) } } } env.arch = arch.Validate(env.arch) // Make sure the directories exist _, e := os.Stat(env.root) if e != nil { fmt.Println(env.root+" could not be found or does not exist. Exiting.") return } }
src/nvm.go
1
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.9980267882347107, 0.024386364966630936, 0.00016373800463043153, 0.001049830112606287, 0.13633739948272705 ]
{ "id": 4, "code_window": [ "\n", " // Remove the source file\n", " os.RemoveAll(os.TempDir()+\"\\\\nvm-npm\")\n", "\n", " fmt.Println(\"\\n\\nInstallation complete. If you want to use this version, type\\n\\nnvm use \"+version)\n", " } else {\n" ], "labels": [ "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ " // Remove the temp directory\n", " // may consider keep the temp files here\n", " os.RemoveAll(tempDir)\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 225 }
[![Tweet This!][1.1] Tweet This!][1] [1.1]: http://i.imgur.com/wWzX9uB.png (Tweet about NVM for Windows) [1]: https://twitter.com/intent/tweet?hashtags=nodejs&original_referer=http%3A%2F%2F127.0.0.1%3A91%2F&text=Check%20out%20NVM%20for%20Windows!&tw_p=tweetbutton&url=http%3A%2F%2Fgithub.com%2Fcoreybutler%2Fnvm-windows&via=goldglovecb # Node Version Manager (nvm) for Windows [![Gitter](https://badges.gitter.im/Join Chat.svg)](https://gitter.im/coreybutler/nvm-windows?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) Manage multiple installations of node.js on a Windows computer. **tl;dr** [nvm](https://github.com/creationix/nvm), but for Windows, with an installer. [Download Now](https://github.com/coreybutler/nvm/releases)! (No io.js support, _yet_... see Gitter for details) ![NVM for Windows](http://coreybutler.github.io/nvm-windows/images/installlatest.jpg) There are situations where the ability to switch between different versions of Node.js can be very useful. For example, if you want to test a module you're developing with the latest bleeding edge version without uninstalling the stable version of node, this utility can help. ![Switch between stable and unstable versions.](http://coreybutler.github.io/nvm-windows/images/use.jpg) ### Installation & Upgrades It comes with an installer (and uninstaller), because getting it should be easy. Please note, you need to uninstall any existing versions of node.js before installing NVM for Windows. [Download the latest installer from the releases](https://github.com/coreybutler/nvm/releases). ![NVM for Windows Installer](http://coreybutler.github.io/nvm-windows/images/installer.jpg) **To upgrade**, run the new installer. It will safely overwrite the files it needs to update without touching your node.js installations. Make sure you use the same installation and symlink folder. If you originally installed to the default locations, you just need to click "next" on each window until it finishes. ### Usage NVM for Windows is a command line tool. Simply type `nvm` in the console for help. The basic commands are: - `nvm arch [32|64]`: Show if node is running in 32 or 64 bit mode. Specify 32 or 64 to override the default architecture. - `nvm install <version> [arch]`: The version can be a node.js version or "latest" for the latest stable version. Optionally specify whether to install the 32 or 64 bit version (defaults to system arch). Set `[arch]` to "all" to install 32 AND 64 bit versions. - `nvm list [available]`: List the node.js installations. Type `available` at the end to show a list of versions available for download. - `nvm on`: Enable node.js version management. - `nvm off`: Disable node.js version management (does not uninstall anything). - `nvm proxy [url]`: Set a proxy to use for downloads. Leave `[url]` blank to see the current proxy. Set `[url]` to "none" to remove the proxy. - `nvm uninstall <version>`: Uninstall a specific version. - `nvm use <version> [arch]`: Switch to use the specified version. Optionally specify 32/64bit architecture. `nvm use <arch>` will continue using the selected version, but switch to 32/64 bit mode based on the value supplied to `<arch>`. - `nvm root <path>`: Set the directory where nvm should store different versions of node.js. If `<path>` is not set, the current root will be displayed. - `nvm version`: Displays the current running version of NVM for Windows. ### Gotcha! Please note that any global npm modules you may have installed are **not** shared between the various versions of node.js you have installed. Additionally, some npm modules may not be supported in the version of node you're using, so be aware of your environment as you work. --- ## Why another version manager? There are several version managers for node.js. Tools like [nvm](https://github.com/creationix/nvm) and [n](https://github.com/visionmedia/n) only run on Mac OSX and Linux. Windows users are left in the cold? No. [nvmw](https://github.com/hakobera/nvmw) and [nodist](https://github.com/marcelklehr/nodist) are both designed for Windows. So, why another version manager for Windows? The architecture of most node version managers for Windows rely on `.bat` files, which do some clever tricks to set or mimic environment variables. Some of them use node itself (once it's downloaded), which is admirable, but prone to problems. Right around node 0.10.30, the installation structure changed a little, causing some of these to just stop working with anything new. Additionally, some users struggle to install these modules since it requires a little more knowledge of node's installation structure. I believe if it were easier for people to switch between versions, people might take the time to test their code on back and future versions... which is just good practice. ## What's the big difference? First and foremost, this version of nvm has no dependency on node. It's written in [Go](http://golang.org/), which is a much more structured approach than hacking around a limited `.bat` file. It does not rely on having an existing node installation. Plus, should the need arise, Go offers potential for creating a Mac/Linux version on the same code base with a substanially easier migration path than converting a bunch of batch to shell logic. `bat > sh, it crazy, right?` The control mechanism is also quite different. There are two general ways to support multiple node installations with hot switching capabilities. The first is to modify the system `PATH` any time you switch versions, or bypass it by using a `.bat` file to mimic the node executable and redirect accordingly. This always seemed a little hackish to me, and there are some quirks as a result of this implementation. The second option is to use a symlink. This concept requires putting the symlink in the system `PATH`, then updating its target to the node installation directory you want to use. This is a straightforward approach, and seems to be what people recommend.... until they realize just how much of a pain symlinks are on Windows. This is why it hasn't happened before. In order to create/modify a symlink, you must be running as an admin, and you must get around Windows UAC (that annoying prompt). Luckily, this is a challenge I already solved with some helper scripts in [node-windows](http://github.com/coreybutler/node-windows). As a result, NVM for Windows maintains a single symlink that is put in the system `PATH` during installation only. Switching to different versions of node is a matter of switching the symlink target. As a result, this utility does **not** require you to run `nvm use x.x.x` every time you open a console window. When you _do_ run `nvm use x.x.x`, the active version of node is automatically updated across all open console windows. It also persists between system reboots, so you only need to use nvm when you want to make a change. NVM for Windows comes with an installer, courtesy of a byproduct of my work on [Fenix Web Server](http://fenixwebserver.com). Overall, this project brings together some ideas, a few battle-hardened pieces of other modules, and support for newer versions of node. I also wrote a simple [data feed](http://github.com/coreybutler/nodedistro) containing a list of node.js versions and their associated npm version. This is how NVM for Windows recognizes the "latest" stable version. It's free for anyone to use. ## Motivation I needed it, plain and simple. Additionally, it's apparent that [support for multiple versions](https://github.com/joyent/node/issues/8075) is not coming to node core, or even something they care about. It was also an excuse to play with Go. ## License MIT. ## Thanks Thanks to everyone who has submitted issues on and off Github, made suggestions, and generally helped make this a better project. Special thanks to [@vkbansal](https://github.com/vkbansal), who has actively provided feedback throughout the releases. ## Alternatives - [nvmw](https://github.com/hakobera/nvmw) - Windows Only - [nodist](https://github.com/marcelklehr/nodist) - Windows Only - [nvm](https://github.com/creationix/nvm) - Mac/Linux Only - [n](https://github.com/visionmedia/n) - Mac/Linux Only
README.md
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.010735603049397469, 0.001324907992966473, 0.00016051321290433407, 0.00029830378480255604, 0.0028769117780029774 ]
{ "id": 4, "code_window": [ "\n", " // Remove the source file\n", " os.RemoveAll(os.TempDir()+\"\\\\nvm-npm\")\n", "\n", " fmt.Println(\"\\n\\nInstallation complete. If you want to use this version, type\\n\\nnvm use \"+version)\n", " } else {\n" ], "labels": [ "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ " // Remove the temp directory\n", " // may consider keep the temp files here\n", " os.RemoveAll(tempDir)\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 225 }
# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof dist src/v* bin/*.exe !bin/buildtools/* bin/*.zip bin/nvm/*
.gitignore
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0003312078770250082, 0.00020875426707789302, 0.00016235497605521232, 0.00017072710033971816, 0.00007078533235471696 ]
{ "id": 4, "code_window": [ "\n", " // Remove the source file\n", " os.RemoveAll(os.TempDir()+\"\\\\nvm-npm\")\n", "\n", " fmt.Println(\"\\n\\nInstallation complete. If you want to use this version, type\\n\\nnvm use \"+version)\n", " } else {\n" ], "labels": [ "keep", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ " // Remove the temp directory\n", " // may consider keep the temp files here\n", " os.RemoveAll(tempDir)\n" ], "file_path": "src/nvm.go", "type": "replace", "edit_start_line_idx": 225 }
; *** Inno Setup version 5.5.3+ English messages *** ; ; To download user-contributed translations of this file, go to: ; http://www.jrsoftware.org/files/istrans/ ; ; Note: When translating this text, do not add periods (.) to the end of ; messages that didn't have them already, because on those messages Inno ; Setup adds the periods automatically (appending a period would result in ; two periods being displayed). [LangOptions] ; The following three entries are very important. Be sure to read and ; understand the '[LangOptions] section' topic in the help file. LanguageName=English LanguageID=$0409 LanguageCodePage=0 ; If the language you are translating to requires special font faces or ; sizes, uncomment any of the following entries and change them accordingly. ;DialogFontName= ;DialogFontSize=8 ;WelcomeFontName=Verdana ;WelcomeFontSize=12 ;TitleFontName=Arial ;TitleFontSize=29 ;CopyrightFontName=Arial ;CopyrightFontSize=8 [Messages] ; *** Application titles SetupAppTitle=Setup SetupWindowTitle=Setup - %1 UninstallAppTitle=Uninstall UninstallAppFullTitle=%1 Uninstall ; *** Misc. common InformationTitle=Information ConfirmTitle=Confirm ErrorTitle=Error ; *** SetupLdr messages SetupLdrStartupMessage=This will install %1. Do you wish to continue? LdrCannotCreateTemp=Unable to create a temporary file. Setup aborted LdrCannotExecTemp=Unable to execute file in the temporary directory. Setup aborted ; *** Startup error messages LastErrorMessage=%1.%n%nError %2: %3 SetupFileMissing=The file %1 is missing from the installation directory. Please correct the problem or obtain a new copy of the program. SetupFileCorrupt=The setup files are corrupted. Please obtain a new copy of the program. SetupFileCorruptOrWrongVer=The setup files are corrupted, or are incompatible with this version of Setup. Please correct the problem or obtain a new copy of the program. InvalidParameter=An invalid parameter was passed on the command line:%n%n%1 SetupAlreadyRunning=Setup is already running. WindowsVersionNotSupported=This program does not support the version of Windows your computer is running. WindowsServicePackRequired=This program requires %1 Service Pack %2 or later. NotOnThisPlatform=This program will not run on %1. OnlyOnThisPlatform=This program must be run on %1. OnlyOnTheseArchitectures=This program can only be installed on versions of Windows designed for the following processor architectures:%n%n%1 MissingWOW64APIs=The version of Windows you are running does not include functionality required by Setup to perform a 64-bit installation. To correct this problem, please install Service Pack %1. WinVersionTooLowError=This program requires %1 version %2 or later. WinVersionTooHighError=This program cannot be installed on %1 version %2 or later. AdminPrivilegesRequired=You must be logged in as an administrator when installing this program. PowerUserPrivilegesRequired=You must be logged in as an administrator or as a member of the Power Users group when installing this program. SetupAppRunningError=Setup has detected that %1 is currently running.%n%nPlease close all instances of it now, then click OK to continue, or Cancel to exit. UninstallAppRunningError=Uninstall has detected that %1 is currently running.%n%nPlease close all instances of it now, then click OK to continue, or Cancel to exit. ; *** Misc. errors ErrorCreatingDir=Setup was unable to create the directory "%1" ErrorTooManyFilesInDir=Unable to create a file in the directory "%1" because it contains too many files ; *** Setup common messages ExitSetupTitle=Exit Setup ExitSetupMessage=Setup is not complete. If you exit now, the program will not be installed.%n%nYou may run Setup again at another time to complete the installation.%n%nExit Setup? AboutSetupMenuItem=&About Setup... AboutSetupTitle=About Setup AboutSetupMessage=%1 version %2%n%3%n%n%1 home page:%n%4 AboutSetupNote= TranslatorNote= ; *** Buttons ButtonBack=< &Back ButtonNext=&Next > ButtonInstall=&Install ButtonOK=OK ButtonCancel=Cancel ButtonYes=&Yes ButtonYesToAll=Yes to &All ButtonNo=&No ButtonNoToAll=N&o to All ButtonFinish=&Finish ButtonBrowse=&Browse... ButtonWizardBrowse=B&rowse... ButtonNewFolder=&Make New Folder ; *** "Select Language" dialog messages SelectLanguageTitle=Select Setup Language SelectLanguageLabel=Select the language to use during the installation: ; *** Common wizard text ClickNext=Click Next to continue, or Cancel to exit Setup. BeveledLabel= BrowseDialogTitle=Browse For Folder BrowseDialogLabel=Select a folder in the list below, then click OK. NewFolderName=New Folder ; *** "Welcome" wizard page WelcomeLabel1=Welcome to the [name] Setup Wizard WelcomeLabel2=This will install [name/ver] on your computer.%n%nIt is recommended that you close all other applications before continuing. ; *** "Password" wizard page WizardPassword=Password PasswordLabel1=This installation is password protected. PasswordLabel3=Please provide the password, then click Next to continue. Passwords are case-sensitive. PasswordEditLabel=&Password: IncorrectPassword=The password you entered is not correct. Please try again. ; *** "License Agreement" wizard page WizardLicense=License Agreement LicenseLabel=Please read the following important information before continuing. LicenseLabel3=Please read the following License Agreement. You must accept the terms of this agreement before continuing with the installation. LicenseAccepted=I &accept the agreement LicenseNotAccepted=I &do not accept the agreement ; *** "Information" wizard pages WizardInfoBefore=Information InfoBeforeLabel=Please read the following important information before continuing. InfoBeforeClickLabel=When you are ready to continue with Setup, click Next. WizardInfoAfter=Information InfoAfterLabel=Please read the following important information before continuing. InfoAfterClickLabel=When you are ready to continue with Setup, click Next. ; *** "User Information" wizard page WizardUserInfo=User Information UserInfoDesc=Please enter your information. UserInfoName=&User Name: UserInfoOrg=&Organization: UserInfoSerial=&Serial Number: UserInfoNameRequired=You must enter a name. ; *** "Select Destination Location" wizard page WizardSelectDir=Select Destination Location SelectDirDesc=Where should [name] be installed? SelectDirLabel3=Setup will install [name] into the following folder. SelectDirBrowseLabel=To continue, click Next. If you would like to select a different folder, click Browse. DiskSpaceMBLabel=At least [mb] MB of free disk space is required. CannotInstallToNetworkDrive=Setup cannot install to a network drive. CannotInstallToUNCPath=Setup cannot install to a UNC path. InvalidPath=You must enter a full path with drive letter; for example:%n%nC:\APP%n%nor a UNC path in the form:%n%n\\server\share InvalidDrive=The drive or UNC share you selected does not exist or is not accessible. Please select another. DiskSpaceWarningTitle=Not Enough Disk Space DiskSpaceWarning=Setup requires at least %1 KB of free space to install, but the selected drive only has %2 KB available.%n%nDo you want to continue anyway? DirNameTooLong=The folder name or path is too long. InvalidDirName=The folder name is not valid. BadDirName32=Folder names cannot include any of the following characters:%n%n%1 DirExistsTitle=Folder Exists DirExists=The folder:%n%n%1%n%nalready exists. Would you like to install to that folder anyway? DirDoesntExistTitle=Folder Does Not Exist DirDoesntExist=The folder:%n%n%1%n%ndoes not exist. Would you like the folder to be created? ; *** "Select Components" wizard page WizardSelectComponents=Select Components SelectComponentsDesc=Which components should be installed? SelectComponentsLabel2=Select the components you want to install; clear the components you do not want to install. Click Next when you are ready to continue. FullInstallation=Full installation ; if possible don't translate 'Compact' as 'Minimal' (I mean 'Minimal' in your language) CompactInstallation=Compact installation CustomInstallation=Custom installation NoUninstallWarningTitle=Components Exist NoUninstallWarning=Setup has detected that the following components are already installed on your computer:%n%n%1%n%nDeselecting these components will not uninstall them.%n%nWould you like to continue anyway? ComponentSize1=%1 KB ComponentSize2=%1 MB ComponentsDiskSpaceMBLabel=Current selection requires at least [mb] MB of disk space. ; *** "Select Additional Tasks" wizard page WizardSelectTasks=Select Additional Tasks SelectTasksDesc=Which additional tasks should be performed? SelectTasksLabel2=Select the additional tasks you would like Setup to perform while installing [name], then click Next. ; *** "Select Start Menu Folder" wizard page WizardSelectProgramGroup=Select Start Menu Folder SelectStartMenuFolderDesc=Where should Setup place the program's shortcuts? SelectStartMenuFolderLabel3=Setup will create the program's shortcuts in the following Start Menu folder. SelectStartMenuFolderBrowseLabel=To continue, click Next. If you would like to select a different folder, click Browse. MustEnterGroupName=You must enter a folder name. GroupNameTooLong=The folder name or path is too long. InvalidGroupName=The folder name is not valid. BadGroupName=The folder name cannot include any of the following characters:%n%n%1 NoProgramGroupCheck2=&Don't create a Start Menu folder ; *** "Ready to Install" wizard page WizardReady=Ready to Install ReadyLabel1=Setup is now ready to begin installing [name] on your computer. ReadyLabel2a=Click Install to continue with the installation, or click Back if you want to review or change any settings. ReadyLabel2b=Click Install to continue with the installation. ReadyMemoUserInfo=User information: ReadyMemoDir=Destination location: ReadyMemoType=Setup type: ReadyMemoComponents=Selected components: ReadyMemoGroup=Start Menu folder: ReadyMemoTasks=Additional tasks: ; *** "Preparing to Install" wizard page WizardPreparing=Preparing to Install PreparingDesc=Setup is preparing to install [name] on your computer. PreviousInstallNotCompleted=The installation/removal of a previous program was not completed. You will need to restart your computer to complete that installation.%n%nAfter restarting your computer, run Setup again to complete the installation of [name]. CannotContinue=Setup cannot continue. Please click Cancel to exit. ApplicationsFound=The following applications are using files that need to be updated by Setup. It is recommended that you allow Setup to automatically close these applications. ApplicationsFound2=The following applications are using files that need to be updated by Setup. It is recommended that you allow Setup to automatically close these applications. After the installation has completed, Setup will attempt to restart the applications. CloseApplications=&Automatically close the applications DontCloseApplications=&Do not close the applications ErrorCloseApplications=Setup was unable to automatically close all applications. It is recommended that you close all applications using files that need to be updated by Setup before continuing. ; *** "Installing" wizard page WizardInstalling=Installing InstallingLabel=Please wait while Setup installs [name] on your computer. ; *** "Setup Completed" wizard page FinishedHeadingLabel=Completing the [name] Setup Wizard FinishedLabelNoIcons=Setup has finished installing [name] on your computer. FinishedLabel=Setup has finished installing [name] on your computer. The application may be launched by selecting the installed icons. ClickFinish=Click Finish to exit Setup. FinishedRestartLabel=To complete the installation of [name], Setup must restart your computer. Would you like to restart now? FinishedRestartMessage=To complete the installation of [name], Setup must restart your computer.%n%nWould you like to restart now? ShowReadmeCheck=Yes, I would like to view the README file YesRadio=&Yes, restart the computer now NoRadio=&No, I will restart the computer later ; used for example as 'Run MyProg.exe' RunEntryExec=Run %1 ; used for example as 'View Readme.txt' RunEntryShellExec=View %1 ; *** "Setup Needs the Next Disk" stuff ChangeDiskTitle=Setup Needs the Next Disk SelectDiskLabel2=Please insert Disk %1 and click OK.%n%nIf the files on this disk can be found in a folder other than the one displayed below, enter the correct path or click Browse. PathLabel=&Path: FileNotInDir2=The file "%1" could not be located in "%2". Please insert the correct disk or select another folder. SelectDirectoryLabel=Please specify the location of the next disk. ; *** Installation phase messages SetupAborted=Setup was not completed.%n%nPlease correct the problem and run Setup again. EntryAbortRetryIgnore=Click Retry to try again, Ignore to proceed anyway, or Abort to cancel installation. ; *** Installation status messages StatusClosingApplications=Closing applications... StatusCreateDirs=Creating directories... StatusExtractFiles=Extracting files... StatusCreateIcons=Creating shortcuts... StatusCreateIniEntries=Creating INI entries... StatusCreateRegistryEntries=Creating registry entries... StatusRegisterFiles=Registering files... StatusSavingUninstall=Saving uninstall information... StatusRunProgram=Finishing installation... StatusRestartingApplications=Restarting applications... StatusRollback=Rolling back changes... ; *** Misc. errors ErrorInternal2=Internal error: %1 ErrorFunctionFailedNoCode=%1 failed ErrorFunctionFailed=%1 failed; code %2 ErrorFunctionFailedWithMessage=%1 failed; code %2.%n%3 ErrorExecutingProgram=Unable to execute file:%n%1 ; *** Registry errors ErrorRegOpenKey=Error opening registry key:%n%1\%2 ErrorRegCreateKey=Error creating registry key:%n%1\%2 ErrorRegWriteKey=Error writing to registry key:%n%1\%2 ; *** INI errors ErrorIniEntry=Error creating INI entry in file "%1". ; *** File copying errors FileAbortRetryIgnore=Click Retry to try again, Ignore to skip this file (not recommended), or Abort to cancel installation. FileAbortRetryIgnore2=Click Retry to try again, Ignore to proceed anyway (not recommended), or Abort to cancel installation. SourceIsCorrupted=The source file is corrupted SourceDoesntExist=The source file "%1" does not exist ExistingFileReadOnly=The existing file is marked as read-only.%n%nClick Retry to remove the read-only attribute and try again, Ignore to skip this file, or Abort to cancel installation. ErrorReadingExistingDest=An error occurred while trying to read the existing file: FileExists=The file already exists.%n%nWould you like Setup to overwrite it? ExistingFileNewer=The existing file is newer than the one Setup is trying to install. It is recommended that you keep the existing file.%n%nDo you want to keep the existing file? ErrorChangingAttr=An error occurred while trying to change the attributes of the existing file: ErrorCreatingTemp=An error occurred while trying to create a file in the destination directory: ErrorReadingSource=An error occurred while trying to read the source file: ErrorCopying=An error occurred while trying to copy a file: ErrorReplacingExistingFile=An error occurred while trying to replace the existing file: ErrorRestartReplace=RestartReplace failed: ErrorRenamingTemp=An error occurred while trying to rename a file in the destination directory: ErrorRegisterServer=Unable to register the DLL/OCX: %1 ErrorRegSvr32Failed=RegSvr32 failed with exit code %1 ErrorRegisterTypeLib=Unable to register the type library: %1 ; *** Post-installation errors ErrorOpeningReadme=An error occurred while trying to open the README file. ErrorRestartingComputer=Setup was unable to restart the computer. Please do this manually. ; *** Uninstaller messages UninstallNotFound=File "%1" does not exist. Cannot uninstall. UninstallOpenError=File "%1" could not be opened. Cannot uninstall UninstallUnsupportedVer=The uninstall log file "%1" is in a format not recognized by this version of the uninstaller. Cannot uninstall UninstallUnknownEntry=An unknown entry (%1) was encountered in the uninstall log ConfirmUninstall=Are you sure you want to completely remove %1 and all of its components? UninstallOnlyOnWin64=This installation can only be uninstalled on 64-bit Windows. OnlyAdminCanUninstall=This installation can only be uninstalled by a user with administrative privileges. UninstallStatusLabel=Please wait while %1 is removed from your computer. UninstalledAll=%1 was successfully removed from your computer. UninstalledMost=%1 uninstall complete.%n%nSome elements could not be removed. These can be removed manually. UninstalledAndNeedsRestart=To complete the uninstallation of %1, your computer must be restarted.%n%nWould you like to restart now? UninstallDataCorrupted="%1" file is corrupted. Cannot uninstall ; *** Uninstallation phase messages ConfirmDeleteSharedFileTitle=Remove Shared File? ConfirmDeleteSharedFile2=The system indicates that the following shared file is no longer in use by any programs. Would you like for Uninstall to remove this shared file?%n%nIf any programs are still using this file and it is removed, those programs may not function properly. If you are unsure, choose No. Leaving the file on your system will not cause any harm. SharedFileNameLabel=File name: SharedFileLocationLabel=Location: WizardUninstalling=Uninstall Status StatusUninstalling=Uninstalling %1... ; *** Shutdown block reasons ShutdownBlockReasonInstallingApp=Installing %1. ShutdownBlockReasonUninstallingApp=Uninstalling %1. ; The custom messages below aren't used by Setup itself, but if you make ; use of them in your scripts, you'll want to translate them. [CustomMessages] NameAndVersion=%1 version %2 AdditionalIcons=Additional icons: CreateDesktopIcon=Create a &desktop icon CreateQuickLaunchIcon=Create a &Quick Launch icon ProgramOnTheWeb=%1 on the Web UninstallProgram=Uninstall %1 LaunchProgram=Launch %1 AssocFileExtension=&Associate %1 with the %2 file extension AssocingFileExtension=Associating %1 with the %2 file extension... AutoStartProgramGroupDescription=Startup: AutoStartProgram=Automatically start %1 AddonHostProgramNotFound=%1 could not be located in the folder you selected.%n%nDo you want to continue anyway?
buildtools/Default.isl
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0038810090627521276, 0.0004258584522176534, 0.0001617622037883848, 0.00017192773520946503, 0.0007697630790062249 ]
{ "id": 5, "code_window": [ " \"io/ioutil\"\n", " \"strings\"\n", " \"strconv\"\n", " \"../arch\"\n", ")\n", "\n", "var client = &http.Client{}\n", "\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"../file\"\n" ], "file_path": "src/nvm/web/web.go", "type": "add", "edit_start_line_idx": 12 }
package main import ( "fmt" "os" "os/exec" "strings" "io/ioutil" "regexp" "bytes" "encoding/json" "strconv" "./nvm/web" "./nvm/arch" "./nvm/file" "./nvm/node" // "./ansi" ) const ( NvmVersion = "1.0.6" ) type Environment struct { settings string root string symlink string arch string proxy string originalpath string originalversion string } var env = &Environment{ settings: os.Getenv("NVM_HOME")+"\\settings.txt", root: "", symlink: os.Getenv("NVM_SYMLINK"), arch: os.Getenv("PROCESSOR_ARCHITECTURE"), proxy: "none", originalpath: "", originalversion: "", } func main() { args := os.Args detail := "" procarch := arch.Validate(env.arch) Setup() // Capture any additional arguments if len(args) > 2 { detail = strings.ToLower(args[2]) } if len(args) > 3 { procarch = args[3] } if len(args) < 2 { help() return } // Run the appropriate method switch args[1] { case "install": install(detail,procarch) case "uninstall": uninstall(detail) case "use": use(detail,procarch) case "list": list(detail) case "ls": list(detail) case "on": enable() case "off": disable() case "root": if len(args) == 3 { updateRootDir(args[2]) } else { fmt.Println("\nCurrent Root: "+env.root) } case "version": fmt.Println(NvmVersion) case "v": fmt.Println(NvmVersion) case "arch": if strings.Trim(detail," \r\n") != "" { detail = strings.Trim(detail," \r\n") if detail != "32" && detail != "64" { fmt.Println("\""+detail+"\" is an invalid architecture. Use 32 or 64.") return } env.arch = detail saveSettings() fmt.Println("Default architecture set to "+detail+"-bit.") return } _, a := node.GetCurrentVersion() fmt.Println("System Default: "+env.arch+"-bit.") fmt.Println("Currently Configured: "+a+"-bit.") case "proxy": if detail == "" { fmt.Println("Current proxy: "+env.proxy) } else { env.proxy = detail saveSettings() } case "update": update() default: help() } } func update() { // cmd := exec.Command("cmd", "/d", "echo", "testing") // var output bytes.Buffer // var _stderr bytes.Buffer // cmd.Stdout = &output // cmd.Stderr = &_stderr // perr := cmd.Run() // if perr != nil { // fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) // return // } } func CheckVersionExceedsLatest(version string) bool{ content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") latest := reg.ReplaceAllString(re.FindString(content),"") if version <= latest { return false } else { return true } } func install(version string, cpuarch string) { if version == "" { fmt.Println("\nInvalid version.") fmt.Println(" ") help() return } cpuarch = strings.ToLower(cpuarch) if cpuarch != "" { if cpuarch != "32" && cpuarch != "64" && cpuarch != "all" { fmt.Println("\""+cpuarch+"\" is not a valid CPU architecture. Must be 32 or 64.") return } } else { cpuarch = env.arch } if cpuarch != "all" { cpuarch = arch.Validate(cpuarch) } if CheckVersionExceedsLatest(version) { fmt.Println("Node.js v"+version+" is not yet released or available.") return } if cpuarch == "64" && !web.IsNode64bitAvailable(version) { fmt.Println("Node.js v"+version+" is only available in 32-bit.") return } // If user specifies "latest" version, find out what version is if version == "latest" { content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") version = reg.ReplaceAllString(re.FindString(content),"") } // Check to see if the version is already installed if !node.IsVersionInstalled(env.root,version,cpuarch) { if !node.IsVersionAvailable(version){ fmt.Println("Version "+version+" is not available. If you are attempting to download a \"just released\" version,") fmt.Println("it may not be recognized by the nvm service yet (updated hourly). If you feel this is in error and") fmt.Println("you know the version exists, please visit http://github.com/coreybutler/nodedistro and submit a PR.") return } // Make the output directories os.Mkdir(env.root+"\\v"+version,os.ModeDir) os.Mkdir(env.root+"\\v"+version+"\\node_modules",os.ModeDir) // Download node if (cpuarch == "32" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"32") { success := web.GetNodeJS(env.root,version,"32"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 32-bit executable.") return } } if (cpuarch == "64" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"64") { success := web.GetNodeJS(env.root,version,"64"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 64-bit executable.") return } } if file.Exists(env.root+"\\v"+version+"\\node_modules\\npm") { return } // If successful, add npm npmv := getNpmVersion(version) success := web.GetNpm(getNpmVersion(version)) if success { fmt.Printf("Installing npm v"+npmv+"...") // Extract npm to the temp directory file.Unzip(os.TempDir()+"\\npm-v"+npmv+".zip",os.TempDir()+"\\nvm-npm") // Copy the npm and npm.cmd files to the installation directory os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm",env.root+"\\v"+version+"\\npm") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm.cmd",env.root+"\\v"+version+"\\npm.cmd") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv,env.root+"\\v"+version+"\\node_modules\\npm") // Remove the source file os.RemoveAll(os.TempDir()+"\\nvm-npm") fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use "+version) } else { fmt.Println("Could not download npm for node v"+version+".") fmt.Println("Please visit https://github.com/npm/npm/releases/tag/v"+npmv+" to download npm.") fmt.Println("It should be extracted to "+env.root+"\\v"+version) } // If this is ever shipped for Mac, it should use homebrew. // If this ever ships on Linux, it should be on bintray so it can use yum, apt-get, etc. return } else { fmt.Println("Version "+version+" is already installed.") return } } func uninstall(version string) { // Make sure a version is specified if len(version) == 0 { fmt.Println("Provide the version you want to uninstall.") help() return } // Determine if the version exists and skip if it doesn't if node.IsVersionInstalled(env.root,version,"32") || node.IsVersionInstalled(env.root,version,"64") { fmt.Printf("Uninstalling node v"+version+"...") v, _ := node.GetCurrentVersion() if v == version { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() } e := os.RemoveAll(env.root+"\\v"+version) if e != nil { fmt.Println("Error removing node v"+version) fmt.Println("Manually remove "+env.root+"\\v"+version+".") } else { fmt.Printf(" done") } } else { fmt.Println("node v"+version+" is not installed. Type \"nvm list\" to see what is installed.") } return } func use(version string, cpuarch string) { if version == "32" || version == "64" { cpuarch = version v, _ := node.GetCurrentVersion() version = v } cpuarch = arch.Validate(cpuarch) // Make sure the version is installed. If not, warn. if !node.IsVersionInstalled(env.root,version,cpuarch) { fmt.Println("node v"+version+" ("+cpuarch+"-bit) is not installed.") if cpuarch == "32" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } if cpuarch == "64" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } return } // Create or update the symlink sym, _ := os.Stat(env.symlink) if sym != nil { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) var output bytes.Buffer var _stderr bytes.Buffer cmd.Stdout = &output cmd.Stderr = &_stderr perr := cmd.Run() if perr != nil { fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) return } } c := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "mklink", "/D", env.symlink, env.root+"\\v"+version) var out bytes.Buffer var stderr bytes.Buffer c.Stdout = &out c.Stderr = &stderr err := c.Run() if err != nil { fmt.Println(fmt.Sprint(err) + ": " + stderr.String()) return } // Use the assigned CPU architecture cpuarch = arch.Validate(cpuarch) e32 := file.Exists(env.root+"\\v"+version+"\\node32.exe") e64 := file.Exists(env.root+"\\v"+version+"\\node64.exe") used := file.Exists(env.root+"\\v"+version+"\\node.exe") if (e32 || e64) { if used { if e32 { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node64.exe") os.Rename(env.root+"\\v"+version+"\\node32.exe",env.root+"\\v"+version+"\\node.exe") } else { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node32.exe") os.Rename(env.root+"\\v"+version+"\\node64.exe",env.root+"\\v"+version+"\\node.exe") } } else if e32 || e64 { os.Rename(env.root+"\\v"+version+"\\node"+cpuarch+".exe",env.root+"\\v"+version+"\\node.exe") } } fmt.Println("Now using node v"+version+" ("+cpuarch+"-bit)") } func useArchitecture(a string) { if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) { fmt.Println("This computer only supports 32-bit processing.") return } if a == "32" || a == "64" { env.arch = a saveSettings() fmt.Println("Set to "+a+"-bit mode") } else { fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.") } } func list(listtype string) { if listtype == "" { listtype = "installed" } if listtype != "installed" && listtype != "available" { fmt.Println("\nInvalid list option.\n\nPlease use on of the following\n - nvm list\n - nvm list installed\n - nvm list available") help() return } if listtype == "installed" { fmt.Println("") inuse, a := node.GetCurrentVersion() v := node.GetInstalled(env.root) for i := 0; i < len(v); i++ { version := v[i] isnode, _ := regexp.MatchString("v",version) str := "" if isnode { if "v"+inuse == version { str = str+" * " } else { str = str+" " } str = str+regexp.MustCompile("v").ReplaceAllString(version,"") if "v"+inuse == version { str = str+" (Currently using "+a+"-bit executable)" // str = ansi.Color(str,"green:black") } fmt.Printf(str+"\n") } } if len(v) == 0 { fmt.Println("No installations recognized.") } } else { _, stable, unstable := node.GetAvailable() releases := 15 fmt.Println("\nShowing the "+strconv.Itoa(releases)+" latest available releases.\n") fmt.Println(" STABLE | UNSTABLE ") fmt.Println(" ---------------------------") for i := 0; i < releases; i++ { str := "v"+stable[i] for ii := 10-len(str); ii > 0; ii-- { str = " "+str } str = str+" | " str2 := "v"+unstable[i] for ii := 10-len(str2); ii > 0; ii-- { str2 = " "+str2 } fmt.Println(" "+str+str2) } fmt.Println("\nFor a complete list, visit http://coreybutler.github.io/nodedistro") } } func enable() { dir := "" files, _ := ioutil.ReadDir(env.root) for _, f := range files { if f.IsDir() { isnode, _ := regexp.MatchString("v",f.Name()) if isnode { dir = f.Name() } } } fmt.Println("nvm enabled") if dir != "" { use(strings.Trim(regexp.MustCompile("v").ReplaceAllString(dir,"")," \n\r"),env.arch) } else { fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest") } } func disable() { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() fmt.Println("nvm disabled") } func help() { fmt.Println("\nRunning version "+NvmVersion+".") fmt.Println("\nUsage:") fmt.Println(" ") fmt.Println(" nvm arch : Show if node is running in 32 or 64 bit mode.") fmt.Println(" nvm install <version> [arch] : The version can be a node.js version or \"latest\" for the latest stable version.") fmt.Println(" Optionally specify whether to install the 32 or 64 bit version (defaults to system arch).") fmt.Println(" Set [arch] to \"all\" to install 32 AND 64 bit versions.") fmt.Println(" nvm list [available] : List the node.js installations. Type \"available\" at the end to see what can be installed. Aliased as ls.") fmt.Println(" nvm on : Enable node.js version management.") fmt.Println(" nvm off : Disable node.js version management.") fmt.Println(" nvm proxy [url] : Set a proxy to use for downloads. Leave [url] blank to see the current proxy.") fmt.Println(" Set [url] to \"none\" to remove the proxy.") fmt.Println(" nvm uninstall <version> : The version must be a specific version.") // fmt.Println(" nvm update : Automatically update nvm to the latest version.") fmt.Println(" nvm use [version] [arch] : Switch to use the specified version. Optionally specify 32/64bit architecture.") fmt.Println(" nvm use <arch> will continue using the selected version, but switch to 32/64 bit mode.") fmt.Println(" nvm root [path] : Set the directory where nvm should store different versions of node.js.") fmt.Println(" If <path> is not set, the current root will be displayed.") fmt.Println(" nvm version : Displays the current running version of nvm for Windows. Aliased as v.") fmt.Println(" ") } // Given a node.js version, returns the associated npm version func getNpmVersion(nodeversion string) string { // Get raw text text := web.GetRemoteTextFile("https://raw.githubusercontent.com/coreybutler/nodedistro/master/nodeversions.json") // Parse var data interface{} json.Unmarshal([]byte(text), &data); body := data.(map[string]interface{}) all := body["all"] npm := all.(map[string]interface{}) return npm[nodeversion].(string) } func updateRootDir(path string) { _, err := os.Stat(path) if err != nil { fmt.Println(path+" does not exist or could not be found.") return } env.root = path saveSettings() fmt.Println("\nRoot has been set to "+path) } func saveSettings() { content := "root: "+strings.Trim(env.root," \n\r")+"\r\narch: "+strings.Trim(env.arch," \n\r")+"\r\nproxy: "+strings.Trim(env.proxy," \n\r")+"\r\noriginalpath: "+strings.Trim(env.originalpath," \n\r")+"\r\noriginalversion: "+strings.Trim(env.originalversion," \n\r") ioutil.WriteFile(env.settings, []byte(content), 0644) } func Setup() { lines, err := file.ReadLines(env.settings) if err != nil { fmt.Println("\nERROR",err) os.Exit(1) } // Process each line and extract the value for _, line := range lines { if strings.Contains(line,"root:") { env.root = strings.Trim(regexp.MustCompile("root:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalpath:") { env.originalpath = strings.Trim(regexp.MustCompile("originalpath:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalversion:") { env.originalversion = strings.Trim(regexp.MustCompile("originalversion:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"arch:"){ env.arch = strings.Trim(regexp.MustCompile("arch:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"proxy:"){ env.proxy = strings.Trim(regexp.MustCompile("proxy:").ReplaceAllString(line,"")," \r\n") if env.proxy != "none" && env.proxy != "" { if strings.ToLower(env.proxy[0:4]) != "http" { env.proxy = "http://"+env.proxy } web.SetProxy(env.proxy) } } } env.arch = arch.Validate(env.arch) // Make sure the directories exist _, e := os.Stat(env.root) if e != nil { fmt.Println(env.root+" could not be found or does not exist. Exiting.") return } }
src/nvm.go
1
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0020799776539206505, 0.000245703908149153, 0.000166004741913639, 0.00017257832223549485, 0.0002813543251249939 ]
{ "id": 5, "code_window": [ " \"io/ioutil\"\n", " \"strings\"\n", " \"strconv\"\n", " \"../arch\"\n", ")\n", "\n", "var client = &http.Client{}\n", "\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"../file\"\n" ], "file_path": "src/nvm/web/web.go", "type": "add", "edit_start_line_idx": 12 }
package arch import ( //"regexp" "os" //"os/exec" "strings" //"fmt" "encoding/hex" ) func SearchBytesInFile( path string, match string, limit int) bool { // Transform to byte array the string toMatch, err := hex.DecodeString(match); if (err != nil) { return false; } // Opening the file and checking if there is an arror file, err := os.Open(path) if err != nil { return false; } // Allocate 1 byte array to perform the match bit := make([]byte, 1); j := 0 for i := 0; i < limit; i++ { file.Read(bit); if bit[0] != toMatch[j] { j = 0; } if bit[0] == toMatch[j] { j++; if (j >= len(toMatch)) { return true; } } } return false; } func Bit(path string) string { is64 := SearchBytesInFile(path, "504500006486", 400); is32 := SearchBytesInFile(path, "504500004C", 400); if is64 { return "64"; } else if is32 { return "32"; } return "?"; } func Validate(str string) (string){ if str == "" { str = os.Getenv("PROCESSOR_ARCHITECTURE") } if strings.ContainsAny("64",str) { return "64" } else { return "32" } }
src/nvm/arch/arch.go
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0002079126425087452, 0.00018067115161102265, 0.00017188773199450225, 0.00017407978884875774, 0.000012866745237261057 ]
{ "id": 5, "code_window": [ " \"io/ioutil\"\n", " \"strings\"\n", " \"strconv\"\n", " \"../arch\"\n", ")\n", "\n", "var client = &http.Client{}\n", "\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"../file\"\n" ], "file_path": "src/nvm/web/web.go", "type": "add", "edit_start_line_idx": 12 }
#define MyAppName "NVM for Windows" #define MyAppShortName "nvm" #define MyAppLCShortName "nvm" #define MyAppVersion "1.0.6" #define MyAppPublisher "Ecor Ventures, LLC" #define MyAppURL "http://github.com/coreybutler/nvm" #define MyAppExeName "nvm.exe" #define MyIcon "bin\nodejs.ico" #define ProjectRoot "C:\Users\Corey\Documents\workspace\Applications\nvm" [Setup] ; NOTE: The value of AppId uniquely identifies this application. ; Do not use the same AppId value in installers for other applications. ; (To generate a new GUID, click Tools | Generate GUID inside the IDE.) PrivilegesRequired=admin AppId=40078385-F676-4C61-9A9C-F9028599D6D3 AppName={#MyAppName} AppVersion={#MyAppVersion} AppVerName={#MyAppName} {#MyAppVersion} AppPublisher={#MyAppPublisher} AppPublisherURL={#MyAppURL} AppSupportURL={#MyAppURL} AppUpdatesURL={#MyAppURL} DefaultDirName={userappdata}\{#MyAppShortName} DisableDirPage=no DefaultGroupName={#MyAppName} AllowNoIcons=yes LicenseFile={#ProjectRoot}\LICENSE OutputDir={#ProjectRoot}\dist\{#MyAppVersion} OutputBaseFilename={#MyAppLCShortName}-setup SetupIconFile={#ProjectRoot}\{#MyIcon} Compression=lzma SolidCompression=yes ChangesEnvironment=yes DisableProgramGroupPage=yes ArchitecturesInstallIn64BitMode=x64 ia64 UninstallDisplayIcon={app}\{#MyIcon} AppCopyright=Copyright (C) 2014 Corey Butler. [Languages] Name: "english"; MessagesFile: "compiler:Default.isl" [Tasks] Name: "quicklaunchicon"; Description: "{cm:CreateQuickLaunchIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked; OnlyBelowVersion: 0,6.1 [Files] Source: "{#ProjectRoot}\bin\*"; DestDir: "{app}"; BeforeInstall: PreInstall; Flags: ignoreversion recursesubdirs createallsubdirs; Excludes: "{#ProjectRoot}\bin\install.cmd" [Icons] Name: "{group}\{#MyAppShortName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{#MyIcon}" Name: "{group}\Uninstall {#MyAppShortName}"; Filename: "{uninstallexe}" [Code] var SymlinkPage: TInputDirWizardPage; function IsDirEmpty(dir: string): Boolean; var FindRec: TFindRec; ct: Integer; begin ct := 0; if FindFirst(ExpandConstant(dir + '\*'), FindRec) then try repeat if FindRec.Attributes and FILE_ATTRIBUTE_DIRECTORY = 0 then ct := ct+1; until not FindNext(FindRec); finally FindClose(FindRec); Result := ct = 0; end; end; //function getInstalledVErsions(dir: string): var nodeInUse: string; function TakeControl(np: string; nv: string): string; var path: string; begin // Move the existing node.js installation directory to the nvm root & update the path RenameFile(np,ExpandConstant('{app}')+'\'+nv); RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); StringChangeEx(path,np+'\','',True); StringChangeEx(path,np,'',True); StringChangeEx(path,np+';;',';',True); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); RegQueryStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); StringChangeEx(path,np+'\','',True); StringChangeEx(path,np,'',True); StringChangeEx(path,np+';;',';',True); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); nodeInUse := ExpandConstant('{app}')+'\'+nv; end; function Ansi2String(AString:AnsiString):String; var i : Integer; iChar : Integer; outString : String; begin outString :=''; for i := 1 to Length(AString) do begin iChar := Ord(AString[i]); //get int value outString := outString + Chr(iChar); end; Result := outString; end; procedure PreInstall(); var TmpResultFile, TmpJS, NodeVersion, NodePath: string; stdout: Ansistring; ResultCode: integer; msg1, msg2, msg3, dir1: Boolean; begin // Create a file to check for Node.JS TmpJS := ExpandConstant('{tmp}') + '\nvm_check.js'; SaveStringToFile(TmpJS, 'console.log(require("path").dirname(process.execPath));', False); // Execute the node file and save the output temporarily TmpResultFile := ExpandConstant('{tmp}') + '\nvm_node_check.txt'; Exec(ExpandConstant('{cmd}'), '/C node "'+TmpJS+'" > "' + TmpResultFile + '"', '', SW_HIDE, ewWaitUntilTerminated, ResultCode); DeleteFile(TmpJS) // Process the results LoadStringFromFile(TmpResultFile,stdout); NodePath := Trim(Ansi2String(stdout)); if DirExists(NodePath) then begin Exec(ExpandConstant('{cmd}'), '/C node -v > "' + TmpResultFile + '"', '', SW_HIDE, ewWaitUntilTerminated, ResultCode); LoadStringFromFile(TmpResultFile, stdout); NodeVersion := Trim(Ansi2String(stdout)); msg1 := MsgBox('Node '+NodeVersion+' is already installed. Do you want NVM to control this version?', mbConfirmation, MB_YESNO) = IDNO; if msg1 then begin msg2 := MsgBox('NVM cannot run in parallel with an existing Node.js installation. Node.js must be uninstalled before NVM can be installed, or you must allow NVM to control the existing installation. Do you want NVM to control node '+NodeVersion+'?', mbConfirmation, MB_YESNO) = IDYES; if msg2 then begin TakeControl(NodePath, NodeVersion); end; if not msg2 then begin DeleteFile(TmpResultFile); WizardForm.Close; end; end; if not msg1 then begin TakeControl(NodePath, NodeVersion); end; end; // Make sure the symlink directory doesn't exist if DirExists(SymlinkPage.Values[0]) then begin // If the directory is empty, just delete it since it will be recreated anyway. dir1 := IsDirEmpty(SymlinkPage.Values[0]); if dir1 then begin RemoveDir(SymlinkPage.Values[0]); end; if not dir1 then begin msg3 := MsgBox(SymlinkPage.Values[0]+' will be overwritten and all contents will be lost. Do you want to proceed?', mbConfirmation, MB_OKCANCEL) = IDOK; if msg3 then begin RemoveDir(SymlinkPage.Values[0]); end; if not msg3 then begin //RaiseException('The symlink cannot be created due to a conflict with the existing directory at '+SymlinkPage.Values[0]); WizardForm.Close; end; end; end; end; procedure InitializeWizard; begin SymlinkPage := CreateInputDirPage(wpSelectDir, 'Set Node.js Symlink', 'The active version of Node.js will always be available here.', 'Select the folder in which Setup should create the symlink, then click Next.', False, ''); SymlinkPage.Add('This directory will automatically be added to your system path.'); SymlinkPage.Values[0] := ExpandConstant('{pf}\nodejs'); end; function InitializeUninstall(): Boolean; var path: string; nvm_symlink: string; begin MsgBox('Removing NVM for Windows will remove the nvm command and all versions of node.js, including global npm modules.', mbInformation, MB_OK); // Remove the symlink RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_SYMLINK', nvm_symlink); RemoveDir(nvm_symlink); // Clean the registry RegDeleteValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_HOME') RegDeleteValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_SYMLINK') RegDeleteValue(HKEY_CURRENT_USER, 'Environment', 'NVM_HOME') RegDeleteValue(HKEY_CURRENT_USER, 'Environment', 'NVM_SYMLINK') RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); StringChangeEx(path,'%NVM_HOME%','',True); StringChangeEx(path,'%NVM_SYMLINK%','',True); StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); RegQueryStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); StringChangeEx(path,'%NVM_HOME%','',True); StringChangeEx(path,'%NVM_SYMLINK%','',True); StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); Result := True; end; // Generate the settings file based on user input & update registry procedure CurStepChanged(CurStep: TSetupStep); var path: string; begin if CurStep = ssPostInstall then begin SaveStringToFile(ExpandConstant('{app}\settings.txt'), 'root: ' + ExpandConstant('{app}') + #13#10 + 'path: ' + SymlinkPage.Values[0] + #13#10, False); // Add Registry settings RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_HOME', ExpandConstant('{app}')); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'NVM_SYMLINK', SymlinkPage.Values[0]); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'NVM_HOME', ExpandConstant('{app}')); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'NVM_SYMLINK', SymlinkPage.Values[0]); // Update system and user PATH if needed RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); if Pos('%NVM_HOME%',path) = 0 then begin path := path+';%NVM_HOME%'; StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); end; if Pos('%NVM_SYMLINK%',path) = 0 then begin path := path+';%NVM_SYMLINK%'; StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', path); end; RegQueryStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); if Pos('%NVM_HOME%',path) = 0 then begin path := path+';%NVM_HOME%'; StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); end; if Pos('%NVM_SYMLINK%',path) = 0 then begin path := path+';%NVM_SYMLINK%'; StringChangeEx(path,';;',';',True); RegWriteExpandStringValue(HKEY_CURRENT_USER, 'Environment', 'Path', path); end; end; end; function getSymLink(o: string): string; begin Result := SymlinkPage.Values[0]; end; function getCurrentVersion(o: string): string; begin Result := nodeInUse; end; function isNodeAlreadyInUse(): boolean; begin Result := Length(nodeInUse) > 0; end; [Run] Filename: "{cmd}"; Parameters: "/C ""mklink /D ""{code:getSymLink}"" ""{code:getCurrentVersion}"""" "; Check: isNodeAlreadyInUse; Flags: runhidden; Filename: "{cmd}"; Parameters: "/K ""set PATH={app};%PATH% && cls && nvm"""; Flags: runasoriginaluser postinstall; [UninstallDelete] Type: files; Name: "{app}\nvm.exe"; Type: files; Name: "{app}\elevate.cmd"; Type: files; Name: "{app}\elevate.vbs"; Type: files; Name: "{app}\nodejs.ico"; Type: files; Name: "{app}\settings.txt"; Type: filesandordirs; Name: "{app}";
nvm.iss
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00019672172493301332, 0.00017157997353933752, 0.00016429804963991046, 0.0001718204002827406, 0.000005296105427987641 ]
{ "id": 5, "code_window": [ " \"io/ioutil\"\n", " \"strings\"\n", " \"strconv\"\n", " \"../arch\"\n", ")\n", "\n", "var client = &http.Client{}\n", "\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"../file\"\n" ], "file_path": "src/nvm/web/web.go", "type": "add", "edit_start_line_idx": 12 }
root: C:\Users\Corey\AppData\Roaming\nvm path: C:\Program Files\nodejs arch: 64 proxy: none
examples/settings.txt
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00016889801190700382, 0.00016889801190700382, 0.00016889801190700382, 0.00016889801190700382, 0 ]
{ "id": 6, "code_window": [ " return false\n", " }\n", "\n", "}\n", "\n", "func GetNpm(v string) bool {\n", " url := \"https://github.com/npm/npm/archive/v\"+v+\".zip\"\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "func GetNpm(root string, v string) bool {\n" ], "file_path": "src/nvm/web/web.go", "type": "replace", "edit_start_line_idx": 83 }
package main import ( "fmt" "os" "os/exec" "strings" "io/ioutil" "regexp" "bytes" "encoding/json" "strconv" "./nvm/web" "./nvm/arch" "./nvm/file" "./nvm/node" // "./ansi" ) const ( NvmVersion = "1.0.6" ) type Environment struct { settings string root string symlink string arch string proxy string originalpath string originalversion string } var env = &Environment{ settings: os.Getenv("NVM_HOME")+"\\settings.txt", root: "", symlink: os.Getenv("NVM_SYMLINK"), arch: os.Getenv("PROCESSOR_ARCHITECTURE"), proxy: "none", originalpath: "", originalversion: "", } func main() { args := os.Args detail := "" procarch := arch.Validate(env.arch) Setup() // Capture any additional arguments if len(args) > 2 { detail = strings.ToLower(args[2]) } if len(args) > 3 { procarch = args[3] } if len(args) < 2 { help() return } // Run the appropriate method switch args[1] { case "install": install(detail,procarch) case "uninstall": uninstall(detail) case "use": use(detail,procarch) case "list": list(detail) case "ls": list(detail) case "on": enable() case "off": disable() case "root": if len(args) == 3 { updateRootDir(args[2]) } else { fmt.Println("\nCurrent Root: "+env.root) } case "version": fmt.Println(NvmVersion) case "v": fmt.Println(NvmVersion) case "arch": if strings.Trim(detail," \r\n") != "" { detail = strings.Trim(detail," \r\n") if detail != "32" && detail != "64" { fmt.Println("\""+detail+"\" is an invalid architecture. Use 32 or 64.") return } env.arch = detail saveSettings() fmt.Println("Default architecture set to "+detail+"-bit.") return } _, a := node.GetCurrentVersion() fmt.Println("System Default: "+env.arch+"-bit.") fmt.Println("Currently Configured: "+a+"-bit.") case "proxy": if detail == "" { fmt.Println("Current proxy: "+env.proxy) } else { env.proxy = detail saveSettings() } case "update": update() default: help() } } func update() { // cmd := exec.Command("cmd", "/d", "echo", "testing") // var output bytes.Buffer // var _stderr bytes.Buffer // cmd.Stdout = &output // cmd.Stderr = &_stderr // perr := cmd.Run() // if perr != nil { // fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) // return // } } func CheckVersionExceedsLatest(version string) bool{ content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") latest := reg.ReplaceAllString(re.FindString(content),"") if version <= latest { return false } else { return true } } func install(version string, cpuarch string) { if version == "" { fmt.Println("\nInvalid version.") fmt.Println(" ") help() return } cpuarch = strings.ToLower(cpuarch) if cpuarch != "" { if cpuarch != "32" && cpuarch != "64" && cpuarch != "all" { fmt.Println("\""+cpuarch+"\" is not a valid CPU architecture. Must be 32 or 64.") return } } else { cpuarch = env.arch } if cpuarch != "all" { cpuarch = arch.Validate(cpuarch) } if CheckVersionExceedsLatest(version) { fmt.Println("Node.js v"+version+" is not yet released or available.") return } if cpuarch == "64" && !web.IsNode64bitAvailable(version) { fmt.Println("Node.js v"+version+" is only available in 32-bit.") return } // If user specifies "latest" version, find out what version is if version == "latest" { content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS.txt") re := regexp.MustCompile("node-v(.+)+msi") reg := regexp.MustCompile("node-v|-x.+") version = reg.ReplaceAllString(re.FindString(content),"") } // Check to see if the version is already installed if !node.IsVersionInstalled(env.root,version,cpuarch) { if !node.IsVersionAvailable(version){ fmt.Println("Version "+version+" is not available. If you are attempting to download a \"just released\" version,") fmt.Println("it may not be recognized by the nvm service yet (updated hourly). If you feel this is in error and") fmt.Println("you know the version exists, please visit http://github.com/coreybutler/nodedistro and submit a PR.") return } // Make the output directories os.Mkdir(env.root+"\\v"+version,os.ModeDir) os.Mkdir(env.root+"\\v"+version+"\\node_modules",os.ModeDir) // Download node if (cpuarch == "32" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"32") { success := web.GetNodeJS(env.root,version,"32"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 32-bit executable.") return } } if (cpuarch == "64" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"64") { success := web.GetNodeJS(env.root,version,"64"); if !success { os.RemoveAll(env.root+"\\v"+version+"\\node_modules") fmt.Println("Could not download node.js v"+version+" 64-bit executable.") return } } if file.Exists(env.root+"\\v"+version+"\\node_modules\\npm") { return } // If successful, add npm npmv := getNpmVersion(version) success := web.GetNpm(getNpmVersion(version)) if success { fmt.Printf("Installing npm v"+npmv+"...") // Extract npm to the temp directory file.Unzip(os.TempDir()+"\\npm-v"+npmv+".zip",os.TempDir()+"\\nvm-npm") // Copy the npm and npm.cmd files to the installation directory os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm",env.root+"\\v"+version+"\\npm") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv+"\\bin\\npm.cmd",env.root+"\\v"+version+"\\npm.cmd") os.Rename(os.TempDir()+"\\nvm-npm\\npm-"+npmv,env.root+"\\v"+version+"\\node_modules\\npm") // Remove the source file os.RemoveAll(os.TempDir()+"\\nvm-npm") fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use "+version) } else { fmt.Println("Could not download npm for node v"+version+".") fmt.Println("Please visit https://github.com/npm/npm/releases/tag/v"+npmv+" to download npm.") fmt.Println("It should be extracted to "+env.root+"\\v"+version) } // If this is ever shipped for Mac, it should use homebrew. // If this ever ships on Linux, it should be on bintray so it can use yum, apt-get, etc. return } else { fmt.Println("Version "+version+" is already installed.") return } } func uninstall(version string) { // Make sure a version is specified if len(version) == 0 { fmt.Println("Provide the version you want to uninstall.") help() return } // Determine if the version exists and skip if it doesn't if node.IsVersionInstalled(env.root,version,"32") || node.IsVersionInstalled(env.root,version,"64") { fmt.Printf("Uninstalling node v"+version+"...") v, _ := node.GetCurrentVersion() if v == version { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() } e := os.RemoveAll(env.root+"\\v"+version) if e != nil { fmt.Println("Error removing node v"+version) fmt.Println("Manually remove "+env.root+"\\v"+version+".") } else { fmt.Printf(" done") } } else { fmt.Println("node v"+version+" is not installed. Type \"nvm list\" to see what is installed.") } return } func use(version string, cpuarch string) { if version == "32" || version == "64" { cpuarch = version v, _ := node.GetCurrentVersion() version = v } cpuarch = arch.Validate(cpuarch) // Make sure the version is installed. If not, warn. if !node.IsVersionInstalled(env.root,version,cpuarch) { fmt.Println("node v"+version+" ("+cpuarch+"-bit) is not installed.") if cpuarch == "32" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } if cpuarch == "64" { if node.IsVersionInstalled(env.root,version,"64") { fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.") } } return } // Create or update the symlink sym, _ := os.Stat(env.symlink) if sym != nil { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) var output bytes.Buffer var _stderr bytes.Buffer cmd.Stdout = &output cmd.Stderr = &_stderr perr := cmd.Run() if perr != nil { fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String()) return } } c := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "mklink", "/D", env.symlink, env.root+"\\v"+version) var out bytes.Buffer var stderr bytes.Buffer c.Stdout = &out c.Stderr = &stderr err := c.Run() if err != nil { fmt.Println(fmt.Sprint(err) + ": " + stderr.String()) return } // Use the assigned CPU architecture cpuarch = arch.Validate(cpuarch) e32 := file.Exists(env.root+"\\v"+version+"\\node32.exe") e64 := file.Exists(env.root+"\\v"+version+"\\node64.exe") used := file.Exists(env.root+"\\v"+version+"\\node.exe") if (e32 || e64) { if used { if e32 { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node64.exe") os.Rename(env.root+"\\v"+version+"\\node32.exe",env.root+"\\v"+version+"\\node.exe") } else { os.Rename(env.root+"\\v"+version+"\\node.exe",env.root+"\\v"+version+"\\node32.exe") os.Rename(env.root+"\\v"+version+"\\node64.exe",env.root+"\\v"+version+"\\node.exe") } } else if e32 || e64 { os.Rename(env.root+"\\v"+version+"\\node"+cpuarch+".exe",env.root+"\\v"+version+"\\node.exe") } } fmt.Println("Now using node v"+version+" ("+cpuarch+"-bit)") } func useArchitecture(a string) { if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) { fmt.Println("This computer only supports 32-bit processing.") return } if a == "32" || a == "64" { env.arch = a saveSettings() fmt.Println("Set to "+a+"-bit mode") } else { fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.") } } func list(listtype string) { if listtype == "" { listtype = "installed" } if listtype != "installed" && listtype != "available" { fmt.Println("\nInvalid list option.\n\nPlease use on of the following\n - nvm list\n - nvm list installed\n - nvm list available") help() return } if listtype == "installed" { fmt.Println("") inuse, a := node.GetCurrentVersion() v := node.GetInstalled(env.root) for i := 0; i < len(v); i++ { version := v[i] isnode, _ := regexp.MatchString("v",version) str := "" if isnode { if "v"+inuse == version { str = str+" * " } else { str = str+" " } str = str+regexp.MustCompile("v").ReplaceAllString(version,"") if "v"+inuse == version { str = str+" (Currently using "+a+"-bit executable)" // str = ansi.Color(str,"green:black") } fmt.Printf(str+"\n") } } if len(v) == 0 { fmt.Println("No installations recognized.") } } else { _, stable, unstable := node.GetAvailable() releases := 15 fmt.Println("\nShowing the "+strconv.Itoa(releases)+" latest available releases.\n") fmt.Println(" STABLE | UNSTABLE ") fmt.Println(" ---------------------------") for i := 0; i < releases; i++ { str := "v"+stable[i] for ii := 10-len(str); ii > 0; ii-- { str = " "+str } str = str+" | " str2 := "v"+unstable[i] for ii := 10-len(str2); ii > 0; ii-- { str2 = " "+str2 } fmt.Println(" "+str+str2) } fmt.Println("\nFor a complete list, visit http://coreybutler.github.io/nodedistro") } } func enable() { dir := "" files, _ := ioutil.ReadDir(env.root) for _, f := range files { if f.IsDir() { isnode, _ := regexp.MatchString("v",f.Name()) if isnode { dir = f.Name() } } } fmt.Println("nvm enabled") if dir != "" { use(strings.Trim(regexp.MustCompile("v").ReplaceAllString(dir,"")," \n\r"),env.arch) } else { fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest") } } func disable() { cmd := exec.Command(env.root+"\\elevate.cmd", "cmd", "/C", "rmdir", env.symlink) cmd.Run() fmt.Println("nvm disabled") } func help() { fmt.Println("\nRunning version "+NvmVersion+".") fmt.Println("\nUsage:") fmt.Println(" ") fmt.Println(" nvm arch : Show if node is running in 32 or 64 bit mode.") fmt.Println(" nvm install <version> [arch] : The version can be a node.js version or \"latest\" for the latest stable version.") fmt.Println(" Optionally specify whether to install the 32 or 64 bit version (defaults to system arch).") fmt.Println(" Set [arch] to \"all\" to install 32 AND 64 bit versions.") fmt.Println(" nvm list [available] : List the node.js installations. Type \"available\" at the end to see what can be installed. Aliased as ls.") fmt.Println(" nvm on : Enable node.js version management.") fmt.Println(" nvm off : Disable node.js version management.") fmt.Println(" nvm proxy [url] : Set a proxy to use for downloads. Leave [url] blank to see the current proxy.") fmt.Println(" Set [url] to \"none\" to remove the proxy.") fmt.Println(" nvm uninstall <version> : The version must be a specific version.") // fmt.Println(" nvm update : Automatically update nvm to the latest version.") fmt.Println(" nvm use [version] [arch] : Switch to use the specified version. Optionally specify 32/64bit architecture.") fmt.Println(" nvm use <arch> will continue using the selected version, but switch to 32/64 bit mode.") fmt.Println(" nvm root [path] : Set the directory where nvm should store different versions of node.js.") fmt.Println(" If <path> is not set, the current root will be displayed.") fmt.Println(" nvm version : Displays the current running version of nvm for Windows. Aliased as v.") fmt.Println(" ") } // Given a node.js version, returns the associated npm version func getNpmVersion(nodeversion string) string { // Get raw text text := web.GetRemoteTextFile("https://raw.githubusercontent.com/coreybutler/nodedistro/master/nodeversions.json") // Parse var data interface{} json.Unmarshal([]byte(text), &data); body := data.(map[string]interface{}) all := body["all"] npm := all.(map[string]interface{}) return npm[nodeversion].(string) } func updateRootDir(path string) { _, err := os.Stat(path) if err != nil { fmt.Println(path+" does not exist or could not be found.") return } env.root = path saveSettings() fmt.Println("\nRoot has been set to "+path) } func saveSettings() { content := "root: "+strings.Trim(env.root," \n\r")+"\r\narch: "+strings.Trim(env.arch," \n\r")+"\r\nproxy: "+strings.Trim(env.proxy," \n\r")+"\r\noriginalpath: "+strings.Trim(env.originalpath," \n\r")+"\r\noriginalversion: "+strings.Trim(env.originalversion," \n\r") ioutil.WriteFile(env.settings, []byte(content), 0644) } func Setup() { lines, err := file.ReadLines(env.settings) if err != nil { fmt.Println("\nERROR",err) os.Exit(1) } // Process each line and extract the value for _, line := range lines { if strings.Contains(line,"root:") { env.root = strings.Trim(regexp.MustCompile("root:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalpath:") { env.originalpath = strings.Trim(regexp.MustCompile("originalpath:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"originalversion:") { env.originalversion = strings.Trim(regexp.MustCompile("originalversion:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"arch:"){ env.arch = strings.Trim(regexp.MustCompile("arch:").ReplaceAllString(line,"")," \r\n") } else if strings.Contains(line,"proxy:"){ env.proxy = strings.Trim(regexp.MustCompile("proxy:").ReplaceAllString(line,"")," \r\n") if env.proxy != "none" && env.proxy != "" { if strings.ToLower(env.proxy[0:4]) != "http" { env.proxy = "http://"+env.proxy } web.SetProxy(env.proxy) } } } env.arch = arch.Validate(env.arch) // Make sure the directories exist _, e := os.Stat(env.root) if e != nil { fmt.Println(env.root+" could not be found or does not exist. Exiting.") return } }
src/nvm.go
1
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.9989792108535767, 0.12073808163404465, 0.0001627767487661913, 0.00018060464935842901, 0.307122141122818 ]
{ "id": 6, "code_window": [ " return false\n", " }\n", "\n", "}\n", "\n", "func GetNpm(v string) bool {\n", " url := \"https://github.com/npm/npm/archive/v\"+v+\".zip\"\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "func GetNpm(root string, v string) bool {\n" ], "file_path": "src/nvm/web/web.go", "type": "replace", "edit_start_line_idx": 83 }
[![Tweet This!][1.1] Tweet This!][1] [1.1]: http://i.imgur.com/wWzX9uB.png (Tweet about NVM for Windows) [1]: https://twitter.com/intent/tweet?hashtags=nodejs&original_referer=http%3A%2F%2F127.0.0.1%3A91%2F&text=Check%20out%20NVM%20for%20Windows!&tw_p=tweetbutton&url=http%3A%2F%2Fgithub.com%2Fcoreybutler%2Fnvm-windows&via=goldglovecb # Node Version Manager (nvm) for Windows [![Gitter](https://badges.gitter.im/Join Chat.svg)](https://gitter.im/coreybutler/nvm-windows?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) Manage multiple installations of node.js on a Windows computer. **tl;dr** [nvm](https://github.com/creationix/nvm), but for Windows, with an installer. [Download Now](https://github.com/coreybutler/nvm/releases)! (No io.js support, _yet_... see Gitter for details) ![NVM for Windows](http://coreybutler.github.io/nvm-windows/images/installlatest.jpg) There are situations where the ability to switch between different versions of Node.js can be very useful. For example, if you want to test a module you're developing with the latest bleeding edge version without uninstalling the stable version of node, this utility can help. ![Switch between stable and unstable versions.](http://coreybutler.github.io/nvm-windows/images/use.jpg) ### Installation & Upgrades It comes with an installer (and uninstaller), because getting it should be easy. Please note, you need to uninstall any existing versions of node.js before installing NVM for Windows. [Download the latest installer from the releases](https://github.com/coreybutler/nvm/releases). ![NVM for Windows Installer](http://coreybutler.github.io/nvm-windows/images/installer.jpg) **To upgrade**, run the new installer. It will safely overwrite the files it needs to update without touching your node.js installations. Make sure you use the same installation and symlink folder. If you originally installed to the default locations, you just need to click "next" on each window until it finishes. ### Usage NVM for Windows is a command line tool. Simply type `nvm` in the console for help. The basic commands are: - `nvm arch [32|64]`: Show if node is running in 32 or 64 bit mode. Specify 32 or 64 to override the default architecture. - `nvm install <version> [arch]`: The version can be a node.js version or "latest" for the latest stable version. Optionally specify whether to install the 32 or 64 bit version (defaults to system arch). Set `[arch]` to "all" to install 32 AND 64 bit versions. - `nvm list [available]`: List the node.js installations. Type `available` at the end to show a list of versions available for download. - `nvm on`: Enable node.js version management. - `nvm off`: Disable node.js version management (does not uninstall anything). - `nvm proxy [url]`: Set a proxy to use for downloads. Leave `[url]` blank to see the current proxy. Set `[url]` to "none" to remove the proxy. - `nvm uninstall <version>`: Uninstall a specific version. - `nvm use <version> [arch]`: Switch to use the specified version. Optionally specify 32/64bit architecture. `nvm use <arch>` will continue using the selected version, but switch to 32/64 bit mode based on the value supplied to `<arch>`. - `nvm root <path>`: Set the directory where nvm should store different versions of node.js. If `<path>` is not set, the current root will be displayed. - `nvm version`: Displays the current running version of NVM for Windows. ### Gotcha! Please note that any global npm modules you may have installed are **not** shared between the various versions of node.js you have installed. Additionally, some npm modules may not be supported in the version of node you're using, so be aware of your environment as you work. --- ## Why another version manager? There are several version managers for node.js. Tools like [nvm](https://github.com/creationix/nvm) and [n](https://github.com/visionmedia/n) only run on Mac OSX and Linux. Windows users are left in the cold? No. [nvmw](https://github.com/hakobera/nvmw) and [nodist](https://github.com/marcelklehr/nodist) are both designed for Windows. So, why another version manager for Windows? The architecture of most node version managers for Windows rely on `.bat` files, which do some clever tricks to set or mimic environment variables. Some of them use node itself (once it's downloaded), which is admirable, but prone to problems. Right around node 0.10.30, the installation structure changed a little, causing some of these to just stop working with anything new. Additionally, some users struggle to install these modules since it requires a little more knowledge of node's installation structure. I believe if it were easier for people to switch between versions, people might take the time to test their code on back and future versions... which is just good practice. ## What's the big difference? First and foremost, this version of nvm has no dependency on node. It's written in [Go](http://golang.org/), which is a much more structured approach than hacking around a limited `.bat` file. It does not rely on having an existing node installation. Plus, should the need arise, Go offers potential for creating a Mac/Linux version on the same code base with a substanially easier migration path than converting a bunch of batch to shell logic. `bat > sh, it crazy, right?` The control mechanism is also quite different. There are two general ways to support multiple node installations with hot switching capabilities. The first is to modify the system `PATH` any time you switch versions, or bypass it by using a `.bat` file to mimic the node executable and redirect accordingly. This always seemed a little hackish to me, and there are some quirks as a result of this implementation. The second option is to use a symlink. This concept requires putting the symlink in the system `PATH`, then updating its target to the node installation directory you want to use. This is a straightforward approach, and seems to be what people recommend.... until they realize just how much of a pain symlinks are on Windows. This is why it hasn't happened before. In order to create/modify a symlink, you must be running as an admin, and you must get around Windows UAC (that annoying prompt). Luckily, this is a challenge I already solved with some helper scripts in [node-windows](http://github.com/coreybutler/node-windows). As a result, NVM for Windows maintains a single symlink that is put in the system `PATH` during installation only. Switching to different versions of node is a matter of switching the symlink target. As a result, this utility does **not** require you to run `nvm use x.x.x` every time you open a console window. When you _do_ run `nvm use x.x.x`, the active version of node is automatically updated across all open console windows. It also persists between system reboots, so you only need to use nvm when you want to make a change. NVM for Windows comes with an installer, courtesy of a byproduct of my work on [Fenix Web Server](http://fenixwebserver.com). Overall, this project brings together some ideas, a few battle-hardened pieces of other modules, and support for newer versions of node. I also wrote a simple [data feed](http://github.com/coreybutler/nodedistro) containing a list of node.js versions and their associated npm version. This is how NVM for Windows recognizes the "latest" stable version. It's free for anyone to use. ## Motivation I needed it, plain and simple. Additionally, it's apparent that [support for multiple versions](https://github.com/joyent/node/issues/8075) is not coming to node core, or even something they care about. It was also an excuse to play with Go. ## License MIT. ## Thanks Thanks to everyone who has submitted issues on and off Github, made suggestions, and generally helped make this a better project. Special thanks to [@vkbansal](https://github.com/vkbansal), who has actively provided feedback throughout the releases. ## Alternatives - [nvmw](https://github.com/hakobera/nvmw) - Windows Only - [nodist](https://github.com/marcelklehr/nodist) - Windows Only - [nvm](https://github.com/creationix/nvm) - Mac/Linux Only - [n](https://github.com/visionmedia/n) - Mac/Linux Only
README.md
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0004120416706427932, 0.00022138789063319564, 0.00016567013517487794, 0.00017773653962649405, 0.00007847494271118194 ]
{ "id": 6, "code_window": [ " return false\n", " }\n", "\n", "}\n", "\n", "func GetNpm(v string) bool {\n", " url := \"https://github.com/npm/npm/archive/v\"+v+\".zip\"\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "func GetNpm(root string, v string) bool {\n" ], "file_path": "src/nvm/web/web.go", "type": "replace", "edit_start_line_idx": 83 }
# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof dist src/v* bin/*.exe !bin/buildtools/* bin/*.zip bin/nvm/*
.gitignore
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00028453205595724285, 0.00019605382112786174, 0.0001635264343349263, 0.00016807837528176606, 0.00005113549923407845 ]
{ "id": 6, "code_window": [ " return false\n", " }\n", "\n", "}\n", "\n", "func GetNpm(v string) bool {\n", " url := \"https://github.com/npm/npm/archive/v\"+v+\".zip\"\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "func GetNpm(root string, v string) bool {\n" ], "file_path": "src/nvm/web/web.go", "type": "replace", "edit_start_line_idx": 83 }
package arch import ( //"regexp" "os" //"os/exec" "strings" //"fmt" "encoding/hex" ) func SearchBytesInFile( path string, match string, limit int) bool { // Transform to byte array the string toMatch, err := hex.DecodeString(match); if (err != nil) { return false; } // Opening the file and checking if there is an arror file, err := os.Open(path) if err != nil { return false; } // Allocate 1 byte array to perform the match bit := make([]byte, 1); j := 0 for i := 0; i < limit; i++ { file.Read(bit); if bit[0] != toMatch[j] { j = 0; } if bit[0] == toMatch[j] { j++; if (j >= len(toMatch)) { return true; } } } return false; } func Bit(path string) string { is64 := SearchBytesInFile(path, "504500006486", 400); is32 := SearchBytesInFile(path, "504500004C", 400); if is64 { return "64"; } else if is32 { return "32"; } return "?"; } func Validate(str string) (string){ if str == "" { str = os.Getenv("PROCESSOR_ARCHITECTURE") } if strings.ContainsAny("64",str) { return "64" } else { return "32" } }
src/nvm/arch/arch.go
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0018750964663922787, 0.0004325960762798786, 0.0001647097960812971, 0.00017681054305285215, 0.0005904066492803395 ]
{ "id": 7, "code_window": [ " url := \"https://github.com/npm/npm/archive/v\"+v+\".zip\"\n", " fileName := os.TempDir()+\"\\\\\"+\"npm-v\"+v+\".zip\"\n", "\n", " fmt.Printf(\"Downloading npm version \"+v+\"... \")\n", " if Download(url,fileName) {\n", " fmt.Printf(\"Complete\\n\")\n", " return true\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " // temp directory to download the .zip file\n", " tempDir := root+\"\\\\temp\"\n", "\n", " // if the temp directory doesn't exist, create it\n", " if (!file.Exists(tempDir)) {\n", " fmt.Println(\"Creating \"+tempDir+\"\\n\")\n", " err := os.Mkdir(tempDir, os.ModePerm)\n", " if err != nil {\n", " fmt.Println(err)\n", " os.Exit(1)\n", " }\n", " }\n", " fileName := tempDir+\"\\\\\"+\"npm-v\"+v+\".zip\"\n" ], "file_path": "src/nvm/web/web.go", "type": "replace", "edit_start_line_idx": 85 }
package web import( "fmt" "net/http" "net/url" "os" "io" "io/ioutil" "strings" "strconv" "../arch" ) var client = &http.Client{} func SetProxy(p string){ if p != "" && p != "none" { proxyUrl, _ := url.Parse(p) client = &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(proxyUrl)}} } else { client = &http.Client{} } } func Download(url string, target string) bool { output, err := os.Create(target) if err != nil { fmt.Println("Error while creating", target, "-", err) } defer output.Close() response, err := client.Get(url) if err != nil { fmt.Println("Error while downloading", url, "-", err) } defer response.Body.Close() _, err = io.Copy(output, response.Body) if err != nil { fmt.Println("Error while downloading", url, "-", err) } if response.Status[0:3] != "200" { fmt.Println("Download failed. Rolling Back.") err := os.Remove(target) if err != nil { fmt.Println("Rollback failed.",err) } return false } return true } func GetNodeJS(root string, v string, a string) bool { a = arch.Validate(a) url := "" if a == "32" { url = "http://nodejs.org/dist/v"+v+"/node.exe" } else { if !IsNode64bitAvailable(v) { fmt.Println("Node.js v"+v+" is only available in 32-bit.") return false } url = "http://nodejs.org/dist/v"+v+"/x64/node.exe" } fileName := root+"\\v"+v+"\\node"+a+".exe" fmt.Printf("Downloading node.js version "+v+" ("+a+"-bit)... ") if Download(url,fileName) { fmt.Printf("Complete\n") return true } else { return false } } func GetNpm(v string) bool { url := "https://github.com/npm/npm/archive/v"+v+".zip" fileName := os.TempDir()+"\\"+"npm-v"+v+".zip" fmt.Printf("Downloading npm version "+v+"... ") if Download(url,fileName) { fmt.Printf("Complete\n") return true } else { return false } } func GetRemoteTextFile(url string) string { response, httperr := client.Get(url) if httperr != nil { fmt.Println("\nCould not retrieve "+url+".\n\n") fmt.Printf("%s", httperr) os.Exit(1) } else { defer response.Body.Close() contents, readerr := ioutil.ReadAll(response.Body) if readerr != nil { fmt.Printf("%s", readerr) os.Exit(1) } return string(contents) } os.Exit(1) return "" } func IsNode64bitAvailable(v string) bool { if v == "latest" { return true } // Anything below version 8 doesn't have a 64 bit version vers := strings.Fields(strings.Replace(v,"."," ",-1)) main, _ := strconv.ParseInt(vers[0],0,0) minor, _ := strconv.ParseInt(vers[1],0,0) if main == 0 && minor < 8 { return false } // Check online to see if a 64 bit version exists res, err := client.Head("http://nodejs.org/dist/v"+v+"/x64/node.exe") if err != nil { return false } return res.StatusCode == 200 }
src/nvm/web/web.go
1
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.9984785914421082, 0.10524964332580566, 0.00015745157725177705, 0.0003519687452353537, 0.27429699897766113 ]
{ "id": 7, "code_window": [ " url := \"https://github.com/npm/npm/archive/v\"+v+\".zip\"\n", " fileName := os.TempDir()+\"\\\\\"+\"npm-v\"+v+\".zip\"\n", "\n", " fmt.Printf(\"Downloading npm version \"+v+\"... \")\n", " if Download(url,fileName) {\n", " fmt.Printf(\"Complete\\n\")\n", " return true\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " // temp directory to download the .zip file\n", " tempDir := root+\"\\\\temp\"\n", "\n", " // if the temp directory doesn't exist, create it\n", " if (!file.Exists(tempDir)) {\n", " fmt.Println(\"Creating \"+tempDir+\"\\n\")\n", " err := os.Mkdir(tempDir, os.ModePerm)\n", " if err != nil {\n", " fmt.Println(err)\n", " os.Exit(1)\n", " }\n", " }\n", " fileName := tempDir+\"\\\\\"+\"npm-v\"+v+\".zip\"\n" ], "file_path": "src/nvm/web/web.go", "type": "replace", "edit_start_line_idx": 85 }
@echo off SET INNOSETUP=%CD%\nvm.iss SET ORIG=%CD% SET GOPATH=%CD%\src SET GOBIN=%CD%\bin SET GOARCH=386 REM Get the version number from the setup file for /f "tokens=*" %%i in ('findstr /n . %INNOSETUP% ^| findstr ^4:#define') do set L=%%i set version=%L:~24,-1% REM Get the version number from the core executable for /f "tokens=*" %%i in ('findstr /n . %GOPATH%\nvm.go ^| findstr ^NvmVersion^| findstr ^21^') do set L=%%i set goversion=%L:~19,-1% IF NOT %version%==%goversion% GOTO VERSIONMISMATCH SET DIST=%CD%\dist\%version% REM Build the executable echo Building NVM for Windows rm %GOBIN%\nvm.exe cd %GOPATH% goxc -arch="386" -os="windows" -n="nvm" -d="%GOBIN%" -o="%GOBIN%\nvm{{.Ext}}" -tasks-=package cd %ORIG% rm %GOBIN%\src.exe rm %GOPATH%\src.exe rm %GOPATH%\nvm.exe REM Clean the dist directory rm -rf "%DIST%" mkdir "%DIST%" REM Create the "noinstall" zip echo Generating nvm-noinstall.zip for /d %%a in (%GOBIN%) do (buildtools\zip -j -9 -r "%DIST%\nvm-noinstall.zip" "%CD%\LICENSE" "%%a\*" -x "%GOBIN%\nodejs.ico") REM Create the installer echo Generating nvm-setup.zip buildtools\iscc %INNOSETUP% /o%DIST% buildtools\zip -j -9 -r "%DIST%\nvm-setup.zip" "%DIST%\nvm-setup.exe" REM rm "%DIST%\nvm-setup.exe" echo -------------------------- echo Release %version% available in %DIST% GOTO COMPLETE :VERSIONMISMATCH echo The version number in nvm.iss does not match the version in src\nvm.go echo - nvm.iss line #4: %version% echo - nvm.go line #21: %goversion% EXIT /B :COMPLETE @echo on
build.bat
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.00017089299217332155, 0.0001663264847593382, 0.00016080074419733137, 0.00016692008648533374, 0.0000032511600238649407 ]
{ "id": 7, "code_window": [ " url := \"https://github.com/npm/npm/archive/v\"+v+\".zip\"\n", " fileName := os.TempDir()+\"\\\\\"+\"npm-v\"+v+\".zip\"\n", "\n", " fmt.Printf(\"Downloading npm version \"+v+\"... \")\n", " if Download(url,fileName) {\n", " fmt.Printf(\"Complete\\n\")\n", " return true\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " // temp directory to download the .zip file\n", " tempDir := root+\"\\\\temp\"\n", "\n", " // if the temp directory doesn't exist, create it\n", " if (!file.Exists(tempDir)) {\n", " fmt.Println(\"Creating \"+tempDir+\"\\n\")\n", " err := os.Mkdir(tempDir, os.ModePerm)\n", " if err != nil {\n", " fmt.Println(err)\n", " os.Exit(1)\n", " }\n", " }\n", " fileName := tempDir+\"\\\\\"+\"npm-v\"+v+\".zip\"\n" ], "file_path": "src/nvm/web/web.go", "type": "replace", "edit_start_line_idx": 85 }
/** * Used under the MIT License. * Semver courtesy Benedikt Lang (https://github.com/blang) */ package semver import ( "errors" "fmt" "strconv" "strings" ) const ( numbers string = "0123456789" alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" alphanum = alphas + numbers dot = "." hyphen = "-" plus = "+" ) // Latest fully supported spec version var SPEC_VERSION = Version{ Major: 2, Minor: 0, Patch: 0, } type Version struct { Major uint64 Minor uint64 Patch uint64 Pre []*PRVersion Build []string //No Precendence } // Version to string func (v *Version) String() string { versionArray := []string{ strconv.FormatUint(v.Major, 10), dot, strconv.FormatUint(v.Minor, 10), dot, strconv.FormatUint(v.Patch, 10), } if len(v.Pre) > 0 { versionArray = append(versionArray, hyphen) for i, pre := range v.Pre { if i > 0 { versionArray = append(versionArray, dot) } versionArray = append(versionArray, pre.String()) } } if len(v.Build) > 0 { versionArray = append(versionArray, plus, strings.Join(v.Build, dot)) } return strings.Join(versionArray, "") } // Checks if v is greater than o. func (v *Version) GT(o *Version) bool { return (v.Compare(o) == 1) } // Checks if v is greater than or equal to o. func (v *Version) GTE(o *Version) bool { return (v.Compare(o) >= 0) } // Checks if v is less than o. func (v *Version) LT(o *Version) bool { return (v.Compare(o) == -1) } // Checks if v is less than or equal to o. func (v *Version) LTE(o *Version) bool { return (v.Compare(o) <= 0) } // Compares Versions v to o: // -1 == v is less than o // 0 == v is equal to o // 1 == v is greater than o func (v *Version) Compare(o *Version) int { if v.Major != o.Major { if v.Major > o.Major { return 1 } else { return -1 } } if v.Minor != o.Minor { if v.Minor > o.Minor { return 1 } else { return -1 } } if v.Patch != o.Patch { if v.Patch > o.Patch { return 1 } else { return -1 } } // Quick comparison if a version has no prerelease versions if len(v.Pre) == 0 && len(o.Pre) == 0 { return 0 } else if len(v.Pre) == 0 && len(o.Pre) > 0 { return 1 } else if len(v.Pre) > 0 && len(o.Pre) == 0 { return -1 } else { i := 0 for ; i < len(v.Pre) && i < len(o.Pre); i++ { if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { continue } else if comp == 1 { return 1 } else { return -1 } } // If all pr versions are the equal but one has further prversion, this one greater if i == len(v.Pre) && i == len(o.Pre) { return 0 } else if i == len(v.Pre) && i < len(o.Pre) { return -1 } else { return 1 } } } // Validates v and returns error in case func (v *Version) Validate() error { // Major, Minor, Patch already validated using uint64 if len(v.Pre) > 0 { for _, pre := range v.Pre { if !pre.IsNum { //Numeric prerelease versions already uint64 if len(pre.VersionStr) == 0 { return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) } if !containsOnly(pre.VersionStr, alphanum) { return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) } } } } if len(v.Build) > 0 { for _, build := range v.Build { if len(build) == 0 { return fmt.Errorf("Build meta data can not be empty %q", build) } if !containsOnly(build, alphanum) { return fmt.Errorf("Invalid character(s) found in build meta data %q", build) } } } return nil } // Alias for Parse, parses version string and returns a validated Version or error func New(s string) (*Version, error) { return Parse(s) } // Parses version string and returns a validated Version or error func Parse(s string) (*Version, error) { if len(s) == 0 { return nil, errors.New("Version string empty") } // Split into major.minor.(patch+pr+meta) parts := strings.SplitN(s, ".", 3) if len(parts) != 3 { return nil, errors.New("No Major.Minor.Patch elements found") } // Major if !containsOnly(parts[0], numbers) { return nil, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) } if hasLeadingZeroes(parts[0]) { return nil, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) } major, err := strconv.ParseUint(parts[0], 10, 64) if err != nil { return nil, err } // Minor if !containsOnly(parts[1], numbers) { return nil, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) } if hasLeadingZeroes(parts[1]) { return nil, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) } minor, err := strconv.ParseUint(parts[1], 10, 64) if err != nil { return nil, err } preIndex := strings.Index(parts[2], "-") buildIndex := strings.Index(parts[2], "+") // Determine last index of patch version (first of pre or build versions) var subVersionIndex int if preIndex != -1 && buildIndex == -1 { subVersionIndex = preIndex } else if preIndex == -1 && buildIndex != -1 { subVersionIndex = buildIndex } else if preIndex == -1 && buildIndex == -1 { subVersionIndex = len(parts[2]) } else { // if there is no actual prversion but a hyphen inside the build meta data if buildIndex < preIndex { subVersionIndex = buildIndex preIndex = -1 // Build meta data before preIndex found implicates there are no prerelease versions } else { subVersionIndex = preIndex } } if !containsOnly(parts[2][:subVersionIndex], numbers) { return nil, fmt.Errorf("Invalid character(s) found in patch number %q", parts[2][:subVersionIndex]) } if hasLeadingZeroes(parts[2][:subVersionIndex]) { return nil, fmt.Errorf("Patch number must not contain leading zeroes %q", parts[2][:subVersionIndex]) } patch, err := strconv.ParseUint(parts[2][:subVersionIndex], 10, 64) if err != nil { return nil, err } v := &Version{} v.Major = major v.Minor = minor v.Patch = patch // There are PreRelease versions if preIndex != -1 { var preRels string if buildIndex != -1 { preRels = parts[2][subVersionIndex+1 : buildIndex] } else { preRels = parts[2][subVersionIndex+1:] } prparts := strings.Split(preRels, ".") for _, prstr := range prparts { parsedPR, err := NewPRVersion(prstr) if err != nil { return nil, err } v.Pre = append(v.Pre, parsedPR) } } // There is build meta data if buildIndex != -1 { buildStr := parts[2][buildIndex+1:] buildParts := strings.Split(buildStr, ".") for _, str := range buildParts { if len(str) == 0 { return nil, errors.New("Build meta data is empty") } if !containsOnly(str, alphanum) { return nil, fmt.Errorf("Invalid character(s) found in build meta data %q", str) } v.Build = append(v.Build, str) } } return v, nil } // PreRelease Version type PRVersion struct { VersionStr string VersionNum uint64 IsNum bool } // Creates a new valid prerelease version func NewPRVersion(s string) (*PRVersion, error) { if len(s) == 0 { return nil, errors.New("Prerelease is empty") } v := &PRVersion{} if containsOnly(s, numbers) { if hasLeadingZeroes(s) { return nil, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) } num, err := strconv.ParseUint(s, 10, 64) // Might never be hit, but just in case if err != nil { return nil, err } v.VersionNum = num v.IsNum = true } else if containsOnly(s, alphanum) { v.VersionStr = s v.IsNum = false } else { return nil, fmt.Errorf("Invalid character(s) found in prerelease %q", s) } return v, nil } // Is pre release version numeric? func (v *PRVersion) IsNumeric() bool { return v.IsNum } // Compares PreRelease Versions v to o: // -1 == v is less than o // 0 == v is equal to o // 1 == v is greater than o func (v *PRVersion) Compare(o *PRVersion) int { if v.IsNum && !o.IsNum { return -1 } else if !v.IsNum && o.IsNum { return 1 } else if v.IsNum && o.IsNum { if v.VersionNum == o.VersionNum { return 0 } else if v.VersionNum > o.VersionNum { return 1 } else { return -1 } } else { // both are Alphas if v.VersionStr == o.VersionStr { return 0 } else if v.VersionStr > o.VersionStr { return 1 } else { return -1 } } } // PreRelease version to string func (v *PRVersion) String() string { if v.IsNum { return strconv.FormatUint(v.VersionNum, 10) } return v.VersionStr } func containsOnly(s string, set string) bool { return strings.IndexFunc(s, func(r rune) bool { return !strings.ContainsRune(set, r) }) == -1 } func hasLeadingZeroes(s string) bool { return len(s) > 1 && s[0] == '0' } // Creates a new valid build version func NewBuildVersion(s string) (string, error) { if len(s) == 0 { return "", errors.New("Buildversion is empty") } if !containsOnly(s, alphanum) { return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) } return s, nil }
src/nvm/semver/semver.go
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.019480085000395775, 0.0012718181824311614, 0.00016621350368950516, 0.0003008183848578483, 0.003179919207468629 ]
{ "id": 7, "code_window": [ " url := \"https://github.com/npm/npm/archive/v\"+v+\".zip\"\n", " fileName := os.TempDir()+\"\\\\\"+\"npm-v\"+v+\".zip\"\n", "\n", " fmt.Printf(\"Downloading npm version \"+v+\"... \")\n", " if Download(url,fileName) {\n", " fmt.Printf(\"Complete\\n\")\n", " return true\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " // temp directory to download the .zip file\n", " tempDir := root+\"\\\\temp\"\n", "\n", " // if the temp directory doesn't exist, create it\n", " if (!file.Exists(tempDir)) {\n", " fmt.Println(\"Creating \"+tempDir+\"\\n\")\n", " err := os.Mkdir(tempDir, os.ModePerm)\n", " if err != nil {\n", " fmt.Println(err)\n", " os.Exit(1)\n", " }\n", " }\n", " fileName := tempDir+\"\\\\\"+\"npm-v\"+v+\".zip\"\n" ], "file_path": "src/nvm/web/web.go", "type": "replace", "edit_start_line_idx": 85 }
package file import( "archive/zip" "bufio" "log" "io" "os" "path/filepath" "strings" ) // Function courtesy http://stackoverflow.com/users/1129149/swtdrgn func Unzip(src, dest string) error { r, err := zip.OpenReader(src) if err != nil { return err } defer r.Close() for _, f := range r.File { rc, err := f.Open() if err != nil { return err } defer rc.Close() fpath := filepath.Join(dest, f.Name) if f.FileInfo().IsDir() { os.MkdirAll(fpath, f.Mode()) } else { var fdir string if lastIndex := strings.LastIndex(fpath,string(os.PathSeparator)); lastIndex > -1 { fdir = fpath[:lastIndex] } err = os.MkdirAll(fdir, f.Mode()) if err != nil { log.Fatal(err) return err } f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return err } defer f.Close() _, err = io.Copy(f, rc) if err != nil { return err } } } return nil } func ReadLines(path string) ([]string, error) { file, err := os.Open(path) if err != nil { return nil, err } defer file.Close() var lines []string scanner := bufio.NewScanner(file) for scanner.Scan() { lines = append(lines, scanner.Text()) } return lines, scanner.Err() } func Exists(filename string) bool { _, err := os.Stat(filename); return err == nil }
src/nvm/file/file.go
0
https://github.com/coreybutler/nvm-windows/commit/a1f6d4165705084c40f702abfca89b355df2daa3
[ 0.0003389422781765461, 0.00020360603230074048, 0.00016357227286789566, 0.00017329410184174776, 0.00005632686224998906 ]
{ "id": 0, "code_window": [ "\t\t\treturn DescribeMatchingResources(mapper, typer, describer, f, cmdNamespace, args[0], args[1], out)\n", "\t\t}\n", "\t\treturn err\n", "\t}\n", "\tinfo := infos[0]\n", "\n", "\ts, err := describer.Describe(info.Namespace, info.Name)\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tif len(infos) > 1 {\n", "\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n", "\t}\n" ], "file_path": "pkg/kubectl/cmd/describe.go", "type": "add", "edit_start_line_idx": 88 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "fmt" "io" "strings" "github.com/spf13/cobra" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" ) func NewCmdDescribe(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "describe (RESOURCE NAME | RESOURCE/NAME)", Short: "Show details of a specific resource", Long: `Show details of a specific resource. This command joins many API calls together to form a detailed description of a given resource.`, Example: `// Describe a node $ kubectl describe nodes kubernetes-minion-emt8.c.myproject.internal // Describe a pod $ kubectl describe pods/nginx`, Run: func(cmd *cobra.Command, args []string) { err := RunDescribe(f, out, cmd, args) cmdutil.CheckErr(err) }, ValidArgs: kubectl.DescribableResources(), } return cmd } func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error { cmdNamespace, err := f.DefaultNamespace() if err != nil { return err } mapper, typer := f.Object() r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). ResourceTypeOrNameArgs(false, args...). Flatten(). Do() err = r.Err() if err != nil { return err } mapping, err := r.ResourceMapping() if err != nil { return err } describer, err := f.Describer(mapping) if err != nil { return err } infos, err := r.Infos() if err != nil { if errors.IsNotFound(err) && len(args) == 2 { return DescribeMatchingResources(mapper, typer, describer, f, cmdNamespace, args[0], args[1], out) } return err } info := infos[0] s, err := describer.Describe(info.Namespace, info.Name) if err != nil { return err } fmt.Fprintf(out, "%s\n", s) return nil } func DescribeMatchingResources(mapper meta.RESTMapper, typer runtime.ObjectTyper, describer kubectl.Describer, f *cmdutil.Factory, namespace, rsrc, prefix string, out io.Writer) error { r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). NamespaceParam(namespace).DefaultNamespace(). ResourceTypeOrNameArgs(true, rsrc). SingleResourceType(). Flatten(). Do() infos, err := r.Infos() if err != nil { return err } for ix := range infos { info := infos[ix] if strings.HasPrefix(info.Name, prefix) { s, err := describer.Describe(info.Namespace, info.Name) if err != nil { return err } fmt.Fprintf(out, "%s\n", s) } } return nil }
pkg/kubectl/cmd/describe.go
1
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.997468113899231, 0.20753134787082672, 0.00016819121083244681, 0.0022015580907464027, 0.3827607333660126 ]
{ "id": 0, "code_window": [ "\t\t\treturn DescribeMatchingResources(mapper, typer, describer, f, cmdNamespace, args[0], args[1], out)\n", "\t\t}\n", "\t\treturn err\n", "\t}\n", "\tinfo := infos[0]\n", "\n", "\ts, err := describer.Describe(info.Namespace, info.Name)\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tif len(infos) > 1 {\n", "\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n", "\t}\n" ], "file_path": "pkg/kubectl/cmd/describe.go", "type": "add", "edit_start_line_idx": 88 }
# Collecting log files from within containers with Fluentd and sending them to the Google Cloud Logging service. *Note that this only works for clusters running on GCE and whose VMs have the cloud-logging.write scope. If your cluster is logging to Elasticsearch instead, see [this guide](/contrib/logging/fluentd-sidecar-es/) instead.* This directory contains the source files needed to make a Docker image that collects log files from arbitrary files within a container using [Fluentd](http://www.fluentd.org/) and sends them to GCP. The image is designed to be used as a sidecar container as part of a pod. It lives in the Google Container Registry under the name `gcr.io/google_containers/fluentd-sidecar-gcp`. This shouldn't be necessary if your container writes its logs to stdout or stderr, since the Kubernetes cluster's default logging infrastructure will collect that automatically, but this is useful if your application logs to a specific file in its filesystem and can't easily be changed. In order to make this work, you have to add a few things to your pod config: 1. A second container, using the `gcr.io/google_containers/fluentd-sidecar-gcp:1.0` image to send the logs to Google Cloud Logging. 2. A volume for the two containers to share. The emptyDir volume type is a good choice for this because we only want the volume to exist for the lifetime of the pod. 3. Mount paths for the volume in each container. In your primary container, this should be the path that the applications log files are written to. In the secondary container, this can be just about anything, so we put it under /mnt/log to keep it out of the way of the rest of the filesystem. 4. The `FILES_TO_COLLECT` environment variable in the sidecar container, telling it which files to collect logs from. These paths should always be in the mounted volume. To try it out, make sure that your cluster was set up to log to Google Cloud Logging when it was created (i.e. you set `LOGGING_DESTINATION=gcp` or are running on Container Engine), then simply run ``` kubectl create -f logging-sidecar-pod.yaml ``` You should see the logs show up in the log viewer of the Google Developer Console shortly after creating the pod. To clean up after yourself, simply run ``` kubectl delete -f logging-sidecar-pod.yaml ``` [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/logging/fluentd-sidecar-gcp/README.md?pixel)]()
contrib/logging/fluentd-sidecar-gcp/README.md
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.00016701292770449072, 0.00016452798445243388, 0.0001623710704734549, 0.00016419999883510172, 0.0000019091705780738266 ]
{ "id": 0, "code_window": [ "\t\t\treturn DescribeMatchingResources(mapper, typer, describer, f, cmdNamespace, args[0], args[1], out)\n", "\t\t}\n", "\t\treturn err\n", "\t}\n", "\tinfo := infos[0]\n", "\n", "\ts, err := describer.Describe(info.Namespace, info.Name)\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tif len(infos) > 1 {\n", "\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n", "\t}\n" ], "file_path": "pkg/kubectl/cmd/describe.go", "type": "add", "edit_start_line_idx": 88 }
// Copyright 2013 The go-github AUTHORS. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package github import ( "encoding/json" "fmt" "net/http" "reflect" "testing" ) func TestGistsService_ListComments(t *testing.T) { setup() defer teardown() mux.HandleFunc("/gists/1/comments", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") testFormValues(t, r, values{"page": "2"}) fmt.Fprint(w, `[{"id": 1}]`) }) opt := &ListOptions{Page: 2} comments, _, err := client.Gists.ListComments("1", opt) if err != nil { t.Errorf("Gists.Comments returned error: %v", err) } want := []GistComment{{ID: Int(1)}} if !reflect.DeepEqual(comments, want) { t.Errorf("Gists.ListComments returned %+v, want %+v", comments, want) } } func TestGistsService_ListComments_invalidID(t *testing.T) { _, _, err := client.Gists.ListComments("%", nil) testURLParseError(t, err) } func TestGistsService_GetComment(t *testing.T) { setup() defer teardown() mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") fmt.Fprint(w, `{"id": 1}`) }) comment, _, err := client.Gists.GetComment("1", 2) if err != nil { t.Errorf("Gists.GetComment returned error: %v", err) } want := &GistComment{ID: Int(1)} if !reflect.DeepEqual(comment, want) { t.Errorf("Gists.GetComment returned %+v, want %+v", comment, want) } } func TestGistsService_GetComment_invalidID(t *testing.T) { _, _, err := client.Gists.GetComment("%", 1) testURLParseError(t, err) } func TestGistsService_CreateComment(t *testing.T) { setup() defer teardown() input := &GistComment{ID: Int(1), Body: String("b")} mux.HandleFunc("/gists/1/comments", func(w http.ResponseWriter, r *http.Request) { v := new(GistComment) json.NewDecoder(r.Body).Decode(v) testMethod(t, r, "POST") if !reflect.DeepEqual(v, input) { t.Errorf("Request body = %+v, want %+v", v, input) } fmt.Fprint(w, `{"id":1}`) }) comment, _, err := client.Gists.CreateComment("1", input) if err != nil { t.Errorf("Gists.CreateComment returned error: %v", err) } want := &GistComment{ID: Int(1)} if !reflect.DeepEqual(comment, want) { t.Errorf("Gists.CreateComment returned %+v, want %+v", comment, want) } } func TestGistsService_CreateComment_invalidID(t *testing.T) { _, _, err := client.Gists.CreateComment("%", nil) testURLParseError(t, err) } func TestGistsService_EditComment(t *testing.T) { setup() defer teardown() input := &GistComment{ID: Int(1), Body: String("b")} mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) { v := new(GistComment) json.NewDecoder(r.Body).Decode(v) testMethod(t, r, "PATCH") if !reflect.DeepEqual(v, input) { t.Errorf("Request body = %+v, want %+v", v, input) } fmt.Fprint(w, `{"id":1}`) }) comment, _, err := client.Gists.EditComment("1", 2, input) if err != nil { t.Errorf("Gists.EditComment returned error: %v", err) } want := &GistComment{ID: Int(1)} if !reflect.DeepEqual(comment, want) { t.Errorf("Gists.EditComment returned %+v, want %+v", comment, want) } } func TestGistsService_EditComment_invalidID(t *testing.T) { _, _, err := client.Gists.EditComment("%", 1, nil) testURLParseError(t, err) } func TestGistsService_DeleteComment(t *testing.T) { setup() defer teardown() mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") }) _, err := client.Gists.DeleteComment("1", 2) if err != nil { t.Errorf("Gists.Delete returned error: %v", err) } } func TestGistsService_DeleteComment_invalidID(t *testing.T) { _, err := client.Gists.DeleteComment("%", 1) testURLParseError(t, err) }
Godeps/_workspace/src/github.com/google/go-github/github/gists_comments_test.go
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.0007498000049963593, 0.00021209819533396512, 0.00016264084842987359, 0.00017112698697019368, 0.00013967118866275996 ]
{ "id": 0, "code_window": [ "\t\t\treturn DescribeMatchingResources(mapper, typer, describer, f, cmdNamespace, args[0], args[1], out)\n", "\t\t}\n", "\t\treturn err\n", "\t}\n", "\tinfo := infos[0]\n", "\n", "\ts, err := describer.Describe(info.Namespace, info.Name)\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tif len(infos) > 1 {\n", "\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n", "\t}\n" ], "file_path": "pkg/kubectl/cmd/describe.go", "type": "add", "edit_start_line_idx": 88 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config import ( "sync" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/config" "github.com/golang/glog" ) // Operation is a type of operation of services or endpoints. type Operation int // These are the available operation types. const ( SET Operation = iota ADD REMOVE ) // ServiceUpdate describes an operation of services, sent on the channel. // You can add or remove single services by sending an array of size one and Op == ADD|REMOVE. // For setting the state of the system to a given state for this source configuration, set Services as desired and Op to SET, // which will reset the system state to that specified in this operation for this source channel. // To remove all services, set Services to empty array and Op to SET type ServiceUpdate struct { Services []api.Service Op Operation } // EndpointsUpdate describes an operation of endpoints, sent on the channel. // You can add or remove single endpoints by sending an array of size one and Op == ADD|REMOVE. // For setting the state of the system to a given state for this source configuration, set Endpoints as desired and Op to SET, // which will reset the system state to that specified in this operation for this source channel. // To remove all endpoints, set Endpoints to empty array and Op to SET type EndpointsUpdate struct { Endpoints []api.Endpoints Op Operation } // ServiceConfigHandler is an abstract interface of objects which receive update notifications for the set of services. type ServiceConfigHandler interface { // OnUpdate gets called when a configuration has been changed by one of the sources. // This is the union of all the configuration sources. OnUpdate(services []api.Service) } // EndpointsConfigHandler is an abstract interface of objects which receive update notifications for the set of endpoints. type EndpointsConfigHandler interface { // OnUpdate gets called when endpoints configuration is changed for a given // service on any of the configuration sources. An example is when a new // service comes up, or when containers come up or down for an existing service. OnUpdate(endpoints []api.Endpoints) } // EndpointsConfig tracks a set of endpoints configurations. // It accepts "set", "add" and "remove" operations of endpoints via channels, and invokes registered handlers on change. type EndpointsConfig struct { mux *config.Mux bcaster *config.Broadcaster store *endpointsStore } // NewEndpointsConfig creates a new EndpointsConfig. // It immediately runs the created EndpointsConfig. func NewEndpointsConfig() *EndpointsConfig { updates := make(chan struct{}) store := &endpointsStore{updates: updates, endpoints: make(map[string]map[types.NamespacedName]api.Endpoints)} mux := config.NewMux(store) bcaster := config.NewBroadcaster() go watchForUpdates(bcaster, store, updates) return &EndpointsConfig{mux, bcaster, store} } func (c *EndpointsConfig) RegisterHandler(handler EndpointsConfigHandler) { c.bcaster.Add(config.ListenerFunc(func(instance interface{}) { handler.OnUpdate(instance.([]api.Endpoints)) })) } func (c *EndpointsConfig) Channel(source string) chan EndpointsUpdate { ch := c.mux.Channel(source) endpointsCh := make(chan EndpointsUpdate) go func() { for update := range endpointsCh { ch <- update } close(ch) }() return endpointsCh } func (c *EndpointsConfig) Config() []api.Endpoints { return c.store.MergedState().([]api.Endpoints) } type endpointsStore struct { endpointLock sync.RWMutex endpoints map[string]map[types.NamespacedName]api.Endpoints updates chan<- struct{} } func (s *endpointsStore) Merge(source string, change interface{}) error { s.endpointLock.Lock() endpoints := s.endpoints[source] if endpoints == nil { endpoints = make(map[types.NamespacedName]api.Endpoints) } update := change.(EndpointsUpdate) switch update.Op { case ADD: glog.V(4).Infof("Adding new endpoint from source %s : %+v", source, update.Endpoints) for _, value := range update.Endpoints { name := types.NamespacedName{value.Namespace, value.Name} endpoints[name] = value } case REMOVE: glog.V(4).Infof("Removing an endpoint %+v", update) for _, value := range update.Endpoints { name := types.NamespacedName{value.Namespace, value.Name} delete(endpoints, name) } case SET: glog.V(4).Infof("Setting endpoints %+v", update) // Clear the old map entries by just creating a new map endpoints = make(map[types.NamespacedName]api.Endpoints) for _, value := range update.Endpoints { name := types.NamespacedName{value.Namespace, value.Name} endpoints[name] = value } default: glog.V(4).Infof("Received invalid update type: %v", update) } s.endpoints[source] = endpoints s.endpointLock.Unlock() if s.updates != nil { s.updates <- struct{}{} } return nil } func (s *endpointsStore) MergedState() interface{} { s.endpointLock.RLock() defer s.endpointLock.RUnlock() endpoints := make([]api.Endpoints, 0) for _, sourceEndpoints := range s.endpoints { for _, value := range sourceEndpoints { endpoints = append(endpoints, value) } } return endpoints } // ServiceConfig tracks a set of service configurations. // It accepts "set", "add" and "remove" operations of services via channels, and invokes registered handlers on change. type ServiceConfig struct { mux *config.Mux bcaster *config.Broadcaster store *serviceStore } // NewServiceConfig creates a new ServiceConfig. // It immediately runs the created ServiceConfig. func NewServiceConfig() *ServiceConfig { updates := make(chan struct{}) store := &serviceStore{updates: updates, services: make(map[string]map[types.NamespacedName]api.Service)} mux := config.NewMux(store) bcaster := config.NewBroadcaster() go watchForUpdates(bcaster, store, updates) return &ServiceConfig{mux, bcaster, store} } func (c *ServiceConfig) RegisterHandler(handler ServiceConfigHandler) { c.bcaster.Add(config.ListenerFunc(func(instance interface{}) { handler.OnUpdate(instance.([]api.Service)) })) } func (c *ServiceConfig) Channel(source string) chan ServiceUpdate { ch := c.mux.Channel(source) serviceCh := make(chan ServiceUpdate) go func() { for update := range serviceCh { ch <- update } close(ch) }() return serviceCh } func (c *ServiceConfig) Config() []api.Service { return c.store.MergedState().([]api.Service) } type serviceStore struct { serviceLock sync.RWMutex services map[string]map[types.NamespacedName]api.Service updates chan<- struct{} } func (s *serviceStore) Merge(source string, change interface{}) error { s.serviceLock.Lock() services := s.services[source] if services == nil { services = make(map[types.NamespacedName]api.Service) } update := change.(ServiceUpdate) switch update.Op { case ADD: glog.V(4).Infof("Adding new service from source %s : %+v", source, update.Services) for _, value := range update.Services { name := types.NamespacedName{value.Namespace, value.Name} services[name] = value } case REMOVE: glog.V(4).Infof("Removing a service %+v", update) for _, value := range update.Services { name := types.NamespacedName{value.Namespace, value.Name} delete(services, name) } case SET: glog.V(4).Infof("Setting services %+v", update) // Clear the old map entries by just creating a new map services = make(map[types.NamespacedName]api.Service) for _, value := range update.Services { name := types.NamespacedName{value.Namespace, value.Name} services[name] = value } default: glog.V(4).Infof("Received invalid update type: %v", update) } s.services[source] = services s.serviceLock.Unlock() if s.updates != nil { s.updates <- struct{}{} } return nil } func (s *serviceStore) MergedState() interface{} { s.serviceLock.RLock() defer s.serviceLock.RUnlock() services := make([]api.Service, 0) for _, sourceServices := range s.services { for _, value := range sourceServices { services = append(services, value) } } return services } // watchForUpdates invokes bcaster.Notify() with the latest version of an object // when changes occur. func watchForUpdates(bcaster *config.Broadcaster, accessor config.Accessor, updates <-chan struct{}) { for true { <-updates bcaster.Notify(accessor.MergedState()) } }
pkg/proxy/config/config.go
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.9820951819419861, 0.12513834238052368, 0.00016461010091006756, 0.00017397600458934903, 0.30464228987693787 ]
{ "id": 1, "code_window": [ "\n", "func RunResize(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {\n", "\tcount := cmdutil.GetFlagInt(cmd, \"replicas\")\n", "\tif len(args) != 2 || count < 0 {\n", "\t\treturn cmdutil.UsageError(cmd, \"--replicas=COUNT RESOURCE ID\")\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif count < 0 {\n" ], "file_path": "pkg/kubectl/cmd/resize.go", "type": "replace", "edit_start_line_idx": 67 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "fmt" "io" "time" "github.com/spf13/cobra" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource" ) const ( resize_long = `Set a new size for a Replication Controller. Resize also allows users to specify one or more preconditions for the resize action. If --current-replicas or --resource-version is specified, it is validated before the resize is attempted, and it is guaranteed that the precondition holds true when the resize is sent to the server.` resize_example = `// Resize replication controller named 'foo' to 3. $ kubectl resize --replicas=3 replicationcontrollers foo // If the replication controller named foo's current size is 2, resize foo to 3. $ kubectl resize --current-replicas=2 --replicas=3 replicationcontrollers foo` retryFrequency = 100 * time.Millisecond retryTimeout = 10 * time.Second ) func NewCmdResize(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "resize [--resource-version=version] [--current-replicas=count] --replicas=COUNT RESOURCE ID", Short: "Set a new size for a Replication Controller.", Long: resize_long, Example: resize_example, Run: func(cmd *cobra.Command, args []string) { err := RunResize(f, out, cmd, args) cmdutil.CheckErr(err) }, } cmd.Flags().String("resource-version", "", "Precondition for resource version. Requires that the current resource version match this value in order to resize.") cmd.Flags().Int("current-replicas", -1, "Precondition for current size. Requires that the current size of the replication controller match this value in order to resize.") cmd.Flags().Int("replicas", -1, "The new desired number of replicas. Required.") cmd.MarkFlagRequired("replicas") return cmd } func RunResize(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error { count := cmdutil.GetFlagInt(cmd, "replicas") if len(args) != 2 || count < 0 { return cmdutil.UsageError(cmd, "--replicas=COUNT RESOURCE ID") } cmdNamespace, err := f.DefaultNamespace() if err != nil { return err } mapper, typer := f.Object() r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). ResourceTypeOrNameArgs(false, args...). Flatten(). Do() err = r.Err() if err != nil { return err } mapping, err := r.ResourceMapping() if err != nil { return err } infos, err := r.Infos() if err != nil { return err } info := infos[0] resizer, err := f.Resizer(mapping) if err != nil { return err } resourceVersion := cmdutil.GetFlagString(cmd, "resource-version") currentSize := cmdutil.GetFlagInt(cmd, "current-replicas") precondition := &kubectl.ResizePrecondition{currentSize, resourceVersion} retry := &kubectl.RetryParams{Interval: retryFrequency, Timeout: retryTimeout} if err := resizer.Resize(info.Namespace, info.Name, uint(count), precondition, retry, nil); err != nil { return err } fmt.Fprint(out, "resized\n") return nil }
pkg/kubectl/cmd/resize.go
1
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.9992770552635193, 0.3245827555656433, 0.00016635525389574468, 0.003970164805650711, 0.4564109146595001 ]
{ "id": 1, "code_window": [ "\n", "func RunResize(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {\n", "\tcount := cmdutil.GetFlagInt(cmd, \"replicas\")\n", "\tif len(args) != 2 || count < 0 {\n", "\t\treturn cmdutil.UsageError(cmd, \"--replicas=COUNT RESOURCE ID\")\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif count < 0 {\n" ], "file_path": "pkg/kubectl/cmd/resize.go", "type": "replace", "edit_start_line_idx": 67 }
{ "name": "angular-material-progressLinear", "version": "0.7.0-rc3", "dependencies": { "angular-material-core": "0.7.0-rc3" } }
third_party/ui/bower_components/angular-material/modules/js/progressLinear/bower.json
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.00017289057723246515, 0.00017289057723246515, 0.00017289057723246515, 0.00017289057723246515, 0 ]
{ "id": 1, "code_window": [ "\n", "func RunResize(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {\n", "\tcount := cmdutil.GetFlagInt(cmd, \"replicas\")\n", "\tif len(args) != 2 || count < 0 {\n", "\t\treturn cmdutil.UsageError(cmd, \"--replicas=COUNT RESOURCE ID\")\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif count < 0 {\n" ], "file_path": "pkg/kubectl/cmd/resize.go", "type": "replace", "edit_start_line_idx": 67 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package rest import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" ) func makeValidService() api.Service { return api.Service{ ObjectMeta: api.ObjectMeta{ Name: "valid", Namespace: "default", Labels: map[string]string{}, Annotations: map[string]string{}, ResourceVersion: "1", }, Spec: api.ServiceSpec{ Selector: map[string]string{"key": "val"}, SessionAffinity: "None", Ports: []api.ServicePort{{Name: "p", Protocol: "TCP", Port: 8675}}, }, } } // TODO: This should be done on types that are not part of our API func TestBeforeUpdate(t *testing.T) { testCases := []struct { name string tweakSvc func(oldSvc, newSvc *api.Service) // given basic valid services, each test case can customize them expectErr bool }{ { name: "no change", tweakSvc: func(oldSvc, newSvc *api.Service) { // nothing }, expectErr: false, }, { name: "change port", tweakSvc: func(oldSvc, newSvc *api.Service) { newSvc.Spec.Ports[0].Port++ }, expectErr: false, }, { name: "bad namespace", tweakSvc: func(oldSvc, newSvc *api.Service) { newSvc.Namespace = "#$%%invalid" }, expectErr: true, }, { name: "change name", tweakSvc: func(oldSvc, newSvc *api.Service) { newSvc.Name += "2" }, expectErr: true, }, { name: "change portal IP", tweakSvc: func(oldSvc, newSvc *api.Service) { oldSvc.Spec.PortalIP = "1.2.3.4" newSvc.Spec.PortalIP = "4.3.2.1" }, expectErr: true, }, { name: "change selectpor", tweakSvc: func(oldSvc, newSvc *api.Service) { newSvc.Spec.Selector = map[string]string{"newkey": "newvalue"} }, expectErr: false, }, } for _, tc := range testCases { oldSvc := makeValidService() newSvc := makeValidService() tc.tweakSvc(&oldSvc, &newSvc) ctx := api.NewDefaultContext() err := BeforeUpdate(Services, ctx, runtime.Object(&oldSvc), runtime.Object(&newSvc)) if tc.expectErr && err == nil { t.Errorf("unexpected non-error for %q", tc.name) } if !tc.expectErr && err != nil { t.Errorf("unexpected error for %q: %v", tc.name, err) } } }
pkg/api/rest/update_test.go
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.00017769666737876832, 0.00017262787150684744, 0.0001672457583481446, 0.00017214918625541031, 0.000002970939704027842 ]
{ "id": 1, "code_window": [ "\n", "func RunResize(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {\n", "\tcount := cmdutil.GetFlagInt(cmd, \"replicas\")\n", "\tif len(args) != 2 || count < 0 {\n", "\t\treturn cmdutil.UsageError(cmd, \"--replicas=COUNT RESOURCE ID\")\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif count < 0 {\n" ], "file_path": "pkg/kubectl/cmd/resize.go", "type": "replace", "edit_start_line_idx": 67 }
/*! * Angular Material Design * https://github.com/angular/material * @license MIT * v0.7.0-rc3 */md-sidenav.md-THEME_NAME-theme{background-color:'{{background-hue-3}}'}
third_party/ui/bower_components/angular-material/modules/js/sidenav/sidenav-default-theme.min.css
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.0001751571981003508, 0.0001751571981003508, 0.0001751571981003508, 0.0001751571981003508, 0 ]
{ "id": 2, "code_window": [ "\tif err != nil {\n", "\t\treturn err\n", "\t}\n", "\tinfo := infos[0]\n", "\n", "\tresizer, err := f.Resizer(mapping)\n", "\tif err != nil {\n", "\t\treturn err\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif len(infos) > 1 {\n", "\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n", "\t}\n" ], "file_path": "pkg/kubectl/cmd/resize.go", "type": "add", "edit_start_line_idx": 96 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "fmt" "io" "strings" "github.com/spf13/cobra" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" ) func NewCmdDescribe(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "describe (RESOURCE NAME | RESOURCE/NAME)", Short: "Show details of a specific resource", Long: `Show details of a specific resource. This command joins many API calls together to form a detailed description of a given resource.`, Example: `// Describe a node $ kubectl describe nodes kubernetes-minion-emt8.c.myproject.internal // Describe a pod $ kubectl describe pods/nginx`, Run: func(cmd *cobra.Command, args []string) { err := RunDescribe(f, out, cmd, args) cmdutil.CheckErr(err) }, ValidArgs: kubectl.DescribableResources(), } return cmd } func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error { cmdNamespace, err := f.DefaultNamespace() if err != nil { return err } mapper, typer := f.Object() r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). ResourceTypeOrNameArgs(false, args...). Flatten(). Do() err = r.Err() if err != nil { return err } mapping, err := r.ResourceMapping() if err != nil { return err } describer, err := f.Describer(mapping) if err != nil { return err } infos, err := r.Infos() if err != nil { if errors.IsNotFound(err) && len(args) == 2 { return DescribeMatchingResources(mapper, typer, describer, f, cmdNamespace, args[0], args[1], out) } return err } info := infos[0] s, err := describer.Describe(info.Namespace, info.Name) if err != nil { return err } fmt.Fprintf(out, "%s\n", s) return nil } func DescribeMatchingResources(mapper meta.RESTMapper, typer runtime.ObjectTyper, describer kubectl.Describer, f *cmdutil.Factory, namespace, rsrc, prefix string, out io.Writer) error { r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). NamespaceParam(namespace).DefaultNamespace(). ResourceTypeOrNameArgs(true, rsrc). SingleResourceType(). Flatten(). Do() infos, err := r.Infos() if err != nil { return err } for ix := range infos { info := infos[ix] if strings.HasPrefix(info.Name, prefix) { s, err := describer.Describe(info.Namespace, info.Name) if err != nil { return err } fmt.Fprintf(out, "%s\n", s) } } return nil }
pkg/kubectl/cmd/describe.go
1
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.9529205560684204, 0.1929345279932022, 0.00016135476471390575, 0.00017776784079615027, 0.3572790324687958 ]
{ "id": 2, "code_window": [ "\tif err != nil {\n", "\t\treturn err\n", "\t}\n", "\tinfo := infos[0]\n", "\n", "\tresizer, err := f.Resizer(mapping)\n", "\tif err != nil {\n", "\t\treturn err\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif len(infos) > 1 {\n", "\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n", "\t}\n" ], "file_path": "pkg/kubectl/cmd/resize.go", "type": "add", "edit_start_line_idx": 96 }
package etcd import ( "testing" ) func TestCompareAndDelete(t *testing.T) { c := NewClient(nil) defer func() { c.Delete("foo", true) }() c.Set("foo", "bar", 5) // This should succeed an correct prevValue resp, err := c.CompareAndDelete("foo", "bar", 0) if err != nil { t.Fatal(err) } if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) { t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp) } resp, _ = c.Set("foo", "bar", 5) // This should fail because it gives an incorrect prevValue _, err = c.CompareAndDelete("foo", "xxx", 0) if err == nil { t.Fatalf("CompareAndDelete 2 should have failed. The response is: %#v", resp) } // This should succeed because it gives an correct prevIndex resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex) if err != nil { t.Fatal(err) } if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) { t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp) } c.Set("foo", "bar", 5) // This should fail because it gives an incorrect prevIndex resp, err = c.CompareAndDelete("foo", "", 29817514) if err == nil { t.Fatalf("CompareAndDelete 4 should have failed. The response is: %#v", resp) } }
Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.0002960767596960068, 0.00021384764113463461, 0.00016441245679743588, 0.0001732432283461094, 0.00005477627564687282 ]
{ "id": 2, "code_window": [ "\tif err != nil {\n", "\t\treturn err\n", "\t}\n", "\tinfo := infos[0]\n", "\n", "\tresizer, err := f.Resizer(mapping)\n", "\tif err != nil {\n", "\t\treturn err\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif len(infos) > 1 {\n", "\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n", "\t}\n" ], "file_path": "pkg/kubectl/cmd/resize.go", "type": "add", "edit_start_line_idx": 96 }
package types import ( "crypto/sha512" "encoding/json" "errors" "fmt" "reflect" "strings" ) const ( maxHashSize = (sha512.Size / 2) + len("sha512-") ) // Hash encodes a hash specified in a string of the form: // "<type>-<value>" // for example // "sha512-06c733b1838136838e6d2d3e8fa5aea4c7905e92[...]" // Valid types are currently: // * sha512 type Hash struct { typ string Val string } func NewHash(s string) (*Hash, error) { elems := strings.Split(s, "-") if len(elems) != 2 { return nil, errors.New("badly formatted hash string") } nh := Hash{ typ: elems[0], Val: elems[1], } if err := nh.assertValid(); err != nil { return nil, err } return &nh, nil } func (h Hash) String() string { return fmt.Sprintf("%s-%s", h.typ, h.Val) } func (h *Hash) Set(s string) error { nh, err := NewHash(s) if err == nil { *h = *nh } return err } func (h Hash) Empty() bool { return reflect.DeepEqual(h, Hash{}) } func (h Hash) assertValid() error { switch h.typ { case "sha512": case "": return fmt.Errorf("unexpected empty hash type") default: return fmt.Errorf("unrecognized hash type: %v", h.typ) } if h.Val == "" { return fmt.Errorf("unexpected empty hash value") } return nil } func (h *Hash) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return err } nh, err := NewHash(s) if err != nil { return err } *h = *nh return nil } func (h Hash) MarshalJSON() ([]byte, error) { if err := h.assertValid(); err != nil { return nil, err } return json.Marshal(h.String()) } func NewHashSHA512(b []byte) *Hash { h := sha512.New() h.Write(b) nh, _ := NewHash(fmt.Sprintf("sha512-%x", h.Sum(nil))) return nh } func ShortHash(hash string) string { if len(hash) > maxHashSize { return hash[:maxHashSize] } return hash }
Godeps/_workspace/src/github.com/appc/spec/schema/types/hash.go
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.00020400698122102767, 0.00017276529979426414, 0.00016615963249932975, 0.00016851835243869573, 0.00001063041236193385 ]
{ "id": 2, "code_window": [ "\tif err != nil {\n", "\t\treturn err\n", "\t}\n", "\tinfo := infos[0]\n", "\n", "\tresizer, err := f.Resizer(mapping)\n", "\tif err != nil {\n", "\t\treturn err\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif len(infos) > 1 {\n", "\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n", "\t}\n" ], "file_path": "pkg/kubectl/cmd/resize.go", "type": "add", "edit_start_line_idx": 96 }
package extensions
Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go
0
https://github.com/kubernetes/kubernetes/commit/6424a2bc632fc5cee7a4b477201f74cd36fd824b
[ 0.00016986737318802625, 0.00016986737318802625, 0.00016986737318802625, 0.00016986737318802625, 0 ]
{ "id": 0, "code_window": [ " \"//pkg/parser/model\",\n", " \"//pkg/session\",\n", " \"//pkg/util\",\n", " \"//pkg/util/logutil\",\n", " \"//pkg/util/memory\",\n", " \"//pkg/util/metricsutil\",\n", " \"@com_github_gogo_protobuf//proto\",\n", " \"@com_github_pingcap_errors//:errors\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//pkg/util/gctuner\",\n" ], "file_path": "br/cmd/br/BUILD.bazel", "type": "add", "edit_start_line_idx": 37 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gctuner import ( "runtime" "runtime/debug" "testing" "time" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/util/memory" "github.com/stretchr/testify/require" ) type mockAllocator struct { m [][]byte } func (a *mockAllocator) alloc(bytes int) (handle int) { sli := make([]byte, bytes) a.m = append(a.m, sli) return len(a.m) - 1 } func (a *mockAllocator) free(handle int) { a.m[handle] = nil } func (a *mockAllocator) freeAll() { a.m = nil runtime.GC() } func TestGlobalMemoryTuner(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/util/gctuner/testMemoryLimitTuner", "return(true)")) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/util/gctuner/testMemoryLimitTuner")) }() // Close GOGCTuner gogcTuner := EnableGOGCTuner.Load() EnableGOGCTuner.Store(false) defer EnableGOGCTuner.Store(gogcTuner) memory.ServerMemoryLimit.Store(1 << 30) // 1GB GlobalMemoryLimitTuner.SetPercentage(0.8) // 1GB * 80% = 800MB GlobalMemoryLimitTuner.UpdateMemoryLimit() require.True(t, GlobalMemoryLimitTuner.isValidValueSet.Load()) defer func() { // If test.count > 1, wait tuning finished. require.Eventually(t, func() bool { //nolint: all_revive return GlobalMemoryLimitTuner.isValidValueSet.Load() }, 5*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { //nolint: all_revive return !GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() }, 5*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { //nolint: all_revive return !GlobalMemoryLimitTuner.nextGCTriggeredByMemoryLimit.Load() }, 5*time.Second, 100*time.Millisecond) }() allocator := &mockAllocator{} defer allocator.freeAll() r := &runtime.MemStats{} getNowGCNum := func() uint32 { runtime.ReadMemStats(r) return r.NumGC } checkNextGCEqualMemoryLimit := func() { runtime.ReadMemStats(r) nextGC := r.NextGC memoryLimit := GlobalMemoryLimitTuner.calcMemoryLimit(GlobalMemoryLimitTuner.GetPercentage()) // Refer to golang source code, nextGC = memoryLimit - nonHeapMemory - overageMemory - headroom require.True(t, nextGC < uint64(memoryLimit)) } memory600mb := allocator.alloc(600 << 20) gcNum := getNowGCNum() memory210mb := allocator.alloc(210 << 20) require.Eventually(t, func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNum < getNowGCNum() }, 5*time.Second, 100*time.Millisecond) // Test waiting for reset require.Eventually(t, func() bool { return GlobalMemoryLimitTuner.calcMemoryLimit(fallbackPercentage) == debug.SetMemoryLimit(-1) }, 5*time.Second, 100*time.Millisecond) gcNum = getNowGCNum() memory100mb := allocator.alloc(100 << 20) require.Eventually(t, func() bool { return gcNum == getNowGCNum() }, 5*time.Second, 100*time.Millisecond) // No GC allocator.free(memory210mb) allocator.free(memory100mb) runtime.GC() // Trigger GC in 80% again require.Eventually(t, func() bool { return GlobalMemoryLimitTuner.calcMemoryLimit(GlobalMemoryLimitTuner.GetPercentage()) == debug.SetMemoryLimit(-1) }, 5*time.Second, 100*time.Millisecond) time.Sleep(100 * time.Millisecond) gcNum = getNowGCNum() checkNextGCEqualMemoryLimit() memory210mb = allocator.alloc(210 << 20) require.Eventually(t, func() bool { return gcNum < getNowGCNum() }, 5*time.Second, 100*time.Millisecond) allocator.free(memory210mb) allocator.free(memory600mb) } func TestIssue48741(t *testing.T) { // Close GOGCTuner gogcTuner := EnableGOGCTuner.Load() EnableGOGCTuner.Store(false) defer EnableGOGCTuner.Store(gogcTuner) getMemoryLimitGCTotal := func() int64 { return memory.MemoryLimitGCTotal.Load() } waitingTunningFinishFn := func() { for GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() { time.Sleep(10 * time.Millisecond) } } allocator := &mockAllocator{} defer allocator.freeAll() checkIfMemoryLimitIsModified := func() { // Try to trigger GC by 1GB * 80% = 800MB (tidb_server_memory_limit * tidb_server_memory_limit_gc_trigger) gcNum := getMemoryLimitGCTotal() memory810mb := allocator.alloc(810 << 20) require.Eventually(t, // Wait for the GC triggered by memory810mb func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNum < getMemoryLimitGCTotal() }, 500*time.Millisecond, 100*time.Millisecond) // update memoryLimit, and sleep 500ms, let t.UpdateMemoryLimit() be called. memory.ServerMemoryLimit.Store(1500 << 20) // 1.5 GB time.Sleep(500 * time.Millisecond) // UpdateMemoryLimit success during tunning. require.True(t, GlobalMemoryLimitTuner.adjustPercentageInProgress.Load()) require.Equal(t, debug.SetMemoryLimit(-1), int64(1500<<20*80/100)) waitingTunningFinishFn() // After the GC triggered by memory810mb. gcNumAfterMemory810mb := getMemoryLimitGCTotal() memory200mb := allocator.alloc(200 << 20) time.Sleep(2 * time.Second) // The heapInUse is less than 1.5GB * 80% = 1.2GB, so the gc will not be triggered. require.Equal(t, gcNumAfterMemory810mb, getMemoryLimitGCTotal()) memory300mb := allocator.alloc(300 << 20) require.Eventually(t, // Wait for the GC triggered by memory300mb func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNumAfterMemory810mb < getMemoryLimitGCTotal() }, 5*time.Second, 100*time.Millisecond) // Sleep 500ms, let t.UpdateMemoryLimit() be called. time.Sleep(500 * time.Millisecond) // The memory limit will be 1.5GB * 110% during tunning. require.Equal(t, debug.SetMemoryLimit(-1), int64(1500<<20*110/100)) require.True(t, GlobalMemoryLimitTuner.adjustPercentageInProgress.Load()) allocator.free(memory810mb) allocator.free(memory200mb) allocator.free(memory300mb) } checkIfMemoryLimitNotModified := func() { // Try to trigger GC by 1GB * 80% = 800MB (tidb_server_memory_limit * tidb_server_memory_limit_gc_trigger) gcNum := getMemoryLimitGCTotal() memory810mb := allocator.alloc(810 << 20) require.Eventually(t, // Wait for the GC triggered by memory810mb func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNum < getMemoryLimitGCTotal() }, 500*time.Millisecond, 100*time.Millisecond) // During the process of adjusting the percentage, the memory limit will be set to 1GB * 110% = 1.1GB. require.Equal(t, debug.SetMemoryLimit(-1), int64(1<<30*110/100)) gcNumAfterMemory810mb := getMemoryLimitGCTotal() // After the GC triggered by memory810mb. waitingTunningFinishFn() require.Eventually(t, // The GC will be trigged immediately after memoryLimit is set back to 1GB * 80% = 800MB. func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNumAfterMemory810mb < getMemoryLimitGCTotal() }, 2*time.Second, 100*time.Millisecond) allocator.free(memory810mb) } require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/util/gctuner/mockUpdateGlobalVarDuringAdjustPercentage", "return(true)")) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/util/gctuner/mockUpdateGlobalVarDuringAdjustPercentage")) }() memory.ServerMemoryLimit.Store(1 << 30) // 1GB GlobalMemoryLimitTuner.SetPercentage(0.8) // 1GB * 80% = 800MB GlobalMemoryLimitTuner.UpdateMemoryLimit() require.Equal(t, debug.SetMemoryLimit(-1), int64(1<<30*80/100)) checkIfMemoryLimitNotModified() waitingTunningFinishFn() checkIfMemoryLimitIsModified() }
pkg/util/gctuner/memory_limit_tuner_test.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0012541345786303282, 0.0002269774122396484, 0.00016497702745255083, 0.00017054466297850013, 0.00021830375771969557 ]
{ "id": 0, "code_window": [ " \"//pkg/parser/model\",\n", " \"//pkg/session\",\n", " \"//pkg/util\",\n", " \"//pkg/util/logutil\",\n", " \"//pkg/util/memory\",\n", " \"//pkg/util/metricsutil\",\n", " \"@com_github_gogo_protobuf//proto\",\n", " \"@com_github_pingcap_errors//:errors\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//pkg/util/gctuner\",\n" ], "file_path": "br/cmd/br/BUILD.bazel", "type": "add", "edit_start_line_idx": 37 }
with recursive cte1 as (select 1 c1 union all select c1 + 1 c1 from cte1 where c1 < 5) select * from cte1; c1 1 2 3 4 5 with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < 10) select * from cte1 order by c1; c1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < 3 union all select c1 + 2 c1 from cte1 where c1 < 5) select * from cte1 order by c1; c1 1 2 2 3 3 3 4 4 5 5 5 6 6 drop table if exists t1; create table t1(a int); insert into t1 values(1); insert into t1 values(2); SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0) SELECT * FROM qn WHERE b=a); a 1 SELECT * FROM t1 dt WHERE EXISTS( WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0 or b = 1) SELECT * FROM qn WHERE b=a ); a 1 2 with recursive c(p) as (select 1), cte(a, b) as (select 1, 1 union select a+1, 1 from cte, c where a < 5) select * from cte order by 1, 2; a b 1 1 2 1 3 1 4 1 5 1 with recursive cte1(c1) as (select 1 union select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1; c1 1 2 3 with recursive cte1(c1) as (select 1 union all select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1; c1 1 2 3 drop table if exists t1; create table t1(c1 int, c2 int); insert into t1 values(1, 1), (1, 2), (2, 2); with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from t1) select * from cte1 order by c1; c1 1 2 3 drop table if exists t1; create table t1(c1 int); insert into t1 values(1), (1), (1), (2), (2), (2); with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 4) select * from cte1 order by c1; c1 1 2 3 4 set @@cte_max_recursion_depth = -1; with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 100) select * from cte1; Error 3636 (HY000): Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; c1 1 2 with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; c1 1 2 set @@cte_max_recursion_depth = 0; with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1; Error 3636 (HY000): Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1; Error 3636 (HY000): Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; c1 1 2 with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; c1 1 2 set @@cte_max_recursion_depth = 1; with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1; c1 1 with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1; c1 1 with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 2) select * from cte1; Error 3636 (HY000): Recursive query aborted after 2 iterations. Try increasing @@cte_max_recursion_depth to a larger value with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; c1 1 2 with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; c1 1 2 set @@cte_max_recursion_depth = default; with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 0) select * from cte1; c1 1 2 3 4 5 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 1) select * from cte1; c1 2 3 4 5 6 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 10) select * from cte1; c1 11 12 13 14 15 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 995) select * from cte1; c1 996 997 998 999 1000 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 6) select * from cte1; c1 7 8 9 10 11 set cte_max_recursion_depth=2; with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; c1 2 with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; Error 3636 (HY000): Recursive query aborted after 3 iterations. Try increasing @@cte_max_recursion_depth to a larger value set cte_max_recursion_depth=1000; with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 996) select * from cte1; c1 996 997 998 999 1000 with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 997) select * from cte1; Error 3636 (HY000): Recursive query aborted after 1001 iterations. Try increasing @@cte_max_recursion_depth to a larger value with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1; c1 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 10) select * from cte1; c1 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 order by dt1.c1, dt2.c1; c1 c1 2 2 2 3 3 2 3 3 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1 order by dt1.c1, dt1.c1; c1 c1 2 2 3 3 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where c1 in (select 2); c1 2 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 dt where c1 in (select c1 from cte1 where 1 = dt.c1 - 1); c1 2 with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where cte1.c1 = (select dt1.c1 from cte1 dt1 where dt1.c1 = cte1.c1); c1 2 3 drop table if exists t1; create table t1(c1 int); insert into t1 values(1), (2), (3); with recursive cte1(c1) as (select c1 from t1 limit 1 offset 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1; Error 1221 (HY000): Incorrect usage of UNION and LIMIT with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 1 offset 1) select * from cte1; c1 2 with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 0 offset 1) select * from cte1; c1 with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 2 offset 0) select * from cte1; c1 1 2 drop table if exists t1; create table t1(c1 int); insert into t1 values(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12), (13), (14), (15), (16), (17), (18), (19), (20), (21), (22), (23), (24), (25), (26), (27), (28), (29), (30), (31), (32), (33), (34), (35), (36), (37), (38), (39), (40), (41), (42), (43), (44), (45), (46), (47), (48), (49), (50), (51), (52), (53), (54), (55), (56), (57), (58), (59), (60), (61), (62), (63), (64), (65), (66), (67), (68), (69), (70), (71), (72), (73), (74), (75), (76), (77), (78), (79), (80), (81), (82), (83), (84), (85), (86), (87), (88), (89), (90), (91), (92), (93), (94), (95), (96), (97), (98), (99), (100), (101), (102), (103), (104), (105), (106), (107), (108), (109), (110), (111), (112), (113), (114), (115), (116), (117), (118), (119), (120), (121), (122), (123), (124), (125), (126), (127), (128), (129), (130), (131), (132), (133), (134), (135), (136), (137), (138), (139), (140), (141), (142), (143), (144), (145), (146), (147), (148), (149), (150), (151), (152), (153), (154), (155), (156), (157), (158), (159), (160), (161), (162), (163), (164), (165), (166), (167), (168), (169), (170), (171), (172), (173), (174), (175), (176), (177), (178), (179), (180), (181), (182), (183), (184), (185), (186), (187), (188), (189), (190), (191), (192), (193), (194), (195), (196), (197), (198), (199), (200), (201), (202), (203), (204), (205), (206), (207), (208), (209), (210), (211), (212), (213), (214), (215), (216), (217), (218), (219), (220), (221), (222), (223), (224), (225), (226), (227), (228), (229), (230), (231), (232), (233), (234), (235), (236), (237), (238), (239), (240), (241), (242), (243), (244), (245), (246), (247), (248), (249), (250), (251), (252), (253), (254), (255), (256), (257), (258), (259), (260), (261), (262), (263), (264), (265), (266), (267), (268), (269), (270), (271), (272), (273), (274), (275), (276), (277), (278), (279), (280), (281), (282), (283), (284), (285), (286), (287), (288), (289), (290), (291), (292), (293), (294), (295), (296), (297), (298), (299); with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1) select * from cte1; c1 0 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1 offset 100) select * from cte1; c1 100 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 5 offset 100) select * from cte1; c1 100 101 102 103 104 with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1; c1 1 2 with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; c1 c1 1 1 2 2 with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1; c1 with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; c1 c1 with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1; c1 100 101 102 with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; c1 c1 100 100 101 101 102 102 set cte_max_recursion_depth = 0; drop table if exists t1; create table t1(c1 int); insert into t1 values(0); with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 0) select * from cte1; c1 with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 1) select * from cte1; Error 3636 (HY000): Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value set cte_max_recursion_depth = 1000; drop table if exists t1; create table t1(c1 int); insert into t1 values(1), (2), (3); with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 2) select * from cte1; c1 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; c1 3 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 2) select * from cte1; c1 3 4 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 2) select * from cte1; c1 3 4 5 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 2) select * from cte1; c1 3 4 5 6 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 3) select * from cte1; c1 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; c1 4 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 3) select * from cte1; c1 4 5 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 3) select * from cte1; c1 4 5 6 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 3) select * from cte1; c1 4 5 6 7 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 4) select * from cte1; c1 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 4) select * from cte1; c1 5 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 4) select * from cte1; c1 5 6 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 4) select * from cte1; c1 5 6 7 with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 4) select * from cte1; c1 5 6 7 8 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 2) select * from cte1; c1 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; c1 3 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 2) select * from cte1; c1 3 2 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 2) select * from cte1; c1 3 2 3 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 2) select * from cte1; c1 3 2 3 4 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 3) select * from cte1; c1 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; c1 2 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 3) select * from cte1; c1 2 3 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 3) select * from cte1; c1 2 3 4 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 3) select * from cte1; c1 2 3 4 3 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 4) select * from cte1; c1 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 4) select * from cte1; c1 3 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 4) select * from cte1; c1 3 4 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 4) select * from cte1; c1 3 4 3 with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 4) select * from cte1; c1 3 4 3 4 set cte_max_recursion_depth = default; drop table if exists executor__cte.t; drop view if exists executor__cte.v; create database if not exists executor__cte1; create table executor__cte.t (a int); create table executor__cte1.t (a int); insert into executor__cte.t values (1); insert into executor__cte1.t values (2); create definer='root'@'localhost' view executor__cte.v as with tt as (select * from t) select * from tt; select * from executor__cte.v; a 1 use executor__cte1; select * from executor__cte.v; a 1 use executor__cte; drop database executor__cte1; set tidb_max_chunk_size=32; drop table if exists t1; create table t1(c1 int); insert into t1 values(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12), (13), (14), (15), (16), (17), (18), (19), (20), (21), (22), (23), (24), (25), (26), (27), (28), (29), (30), (31), (32), (33), (34), (35), (36), (37), (38), (39), (40), (41), (42), (43), (44), (45), (46), (47), (48), (49), (50), (51), (52), (53), (54), (55), (56), (57), (58), (59), (60), (61), (62), (63), (64), (65), (66), (67), (68), (69), (70), (71), (72), (73), (74), (75), (76), (77), (78), (79), (80), (81), (82), (83), (84), (85), (86), (87), (88), (89), (90), (91), (92), (93), (94), (95), (96), (97), (98), (99), (100), (101), (102), (103), (104), (105), (106), (107), (108), (109), (110), (111), (112), (113), (114), (115), (116), (117), (118), (119), (120), (121), (122), (123), (124), (125), (126), (127), (128), (129), (130), (131), (132), (133), (134), (135), (136), (137), (138), (139), (140), (141), (142), (143), (144), (145), (146), (147), (148), (149), (150), (151), (152), (153), (154), (155), (156), (157), (158), (159), (160), (161), (162), (163), (164), (165), (166), (167), (168), (169), (170), (171), (172), (173), (174), (175), (176), (177), (178), (179), (180), (181), (182), (183), (184), (185), (186), (187), (188), (189), (190), (191), (192), (193), (194), (195), (196), (197), (198), (199), (200), (201), (202), (203), (204), (205), (206), (207), (208), (209), (210), (211), (212), (213), (214), (215), (216), (217), (218), (219), (220), (221), (222), (223), (224), (225), (226), (227), (228), (229), (230), (231), (232), (233), (234), (235), (236), (237), (238), (239), (240), (241), (242), (243), (244), (245), (246), (247), (248), (249), (250), (251), (252), (253), (254), (255), (256), (257), (258), (259), (260), (261), (262), (263), (264), (265), (266), (267), (268), (269), (270), (271), (272), (273), (274), (275), (276), (277), (278), (279), (280), (281), (282), (283), (284), (285), (286), (287), (288), (289), (290), (291), (292), (293), (294), (295), (296), (297), (298), (299); with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1 offset 100) select * from cte1; c1 100 set tidb_max_chunk_size=default;
tests/integrationtest/r/executor/cte.result
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0002488129830453545, 0.00017308506357949227, 0.00016344895993825048, 0.00017195584950968623, 0.00001230913676408818 ]
{ "id": 0, "code_window": [ " \"//pkg/parser/model\",\n", " \"//pkg/session\",\n", " \"//pkg/util\",\n", " \"//pkg/util/logutil\",\n", " \"//pkg/util/memory\",\n", " \"//pkg/util/metricsutil\",\n", " \"@com_github_gogo_protobuf//proto\",\n", " \"@com_github_pingcap_errors//:errors\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//pkg/util/gctuner\",\n" ], "file_path": "br/cmd/br/BUILD.bazel", "type": "add", "edit_start_line_idx": 37 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package util import ( "github.com/pingcap/tidb/pkg/types" ) // PlanCacheMatchOpts store some property used to fetch plan from plan cache // The structure set here is to avoid import cycle type PlanCacheMatchOpts struct { // paramTypes stores all parameters' FieldType, some different parameters may share same plan ParamTypes []*types.FieldType // limitOffsetAndCount stores all the offset and key parameters extract from limit statement // only used for cache and pick plan with parameters in limit LimitOffsetAndCount []uint64 // HasSubQuery indicate whether this query has sub query HasSubQuery bool // StatsVersionHash is the hash value of the statistics version StatsVersionHash uint64 // Below are some variables that can affect the plan ForeignKeyChecks bool }
pkg/util/plancache/util.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0002097638789564371, 0.00018149454263038933, 0.000169488150277175, 0.00017336307791993022, 0.00001646731107030064 ]
{ "id": 0, "code_window": [ " \"//pkg/parser/model\",\n", " \"//pkg/session\",\n", " \"//pkg/util\",\n", " \"//pkg/util/logutil\",\n", " \"//pkg/util/memory\",\n", " \"//pkg/util/metricsutil\",\n", " \"@com_github_gogo_protobuf//proto\",\n", " \"@com_github_pingcap_errors//:errors\",\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ " \"//pkg/util/gctuner\",\n" ], "file_path": "br/cmd/br/BUILD.bazel", "type": "add", "edit_start_line_idx": 37 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package globalconn import ( "errors" "fmt" "math" "strconv" "github.com/ngaut/sync2" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" ) // GCID is the Global Connection ID, providing UNIQUE connection IDs across the whole TiDB cluster. // Used when GlobalKill feature is enable. // See https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-01-global-kill.md // 32 bits version: // // 31 21 20 1 0 // +--------+------------------+------+ // |serverID| local connID |markup| // | (11b) | (20b) | =0 | // +--------+------------------+------+ // // 64 bits version: // // 63 62 41 40 1 0 // +--+---------------------+--------------------------------------+------+ // | | serverID | local connID |markup| // |=0| (22b) | (40b) | =1 | // +--+---------------------+--------------------------------------+------+ // // NOTE: // 1. `serverId“ in 64 bits version can be less than 2^11. This will happen when the 32 bits local connID has been used up, while `serverID` stay unchanged. // 2. The local connID of a 32 bits GCID can be the same with another 64 bits GCID. This will not violate the uniqueness of GCID. type GCID struct { ServerID uint64 LocalConnID uint64 Is64bits bool } var ( // ServerIDBits32 is the number of bits of serverID for 32bits global connection ID. ServerIDBits32 uint = 11 // MaxServerID32 is maximum serverID for 32bits global connection ID. MaxServerID32 uint64 = 1<<ServerIDBits32 - 1 // LocalConnIDBits32 is the number of bits of localConnID for 32bits global connection ID. LocalConnIDBits32 uint = 20 // MaxLocalConnID32 is maximum localConnID for 32bits global connection ID. MaxLocalConnID32 uint64 = 1<<LocalConnIDBits32 - 1 ) const ( // MaxServerID64 is maximum serverID for 64bits global connection ID. MaxServerID64 = 1<<22 - 1 // LocalConnIDBits64 is the number of bits of localConnID for 64bits global connection ID. LocalConnIDBits64 = 40 // MaxLocalConnID64 is maximum localConnID for 64bits global connection ID. MaxLocalConnID64 = 1<<LocalConnIDBits64 - 1 // ReservedCount is the count of reserved connection IDs for internal processes. ReservedCount = 200 ) // ToConnID returns the 64bits connection ID func (g *GCID) ToConnID() uint64 { var id uint64 if g.Is64bits { if g.LocalConnID > MaxLocalConnID64 { panic(fmt.Sprintf("unexpected localConnID %d exceeds %d", g.LocalConnID, MaxLocalConnID64)) } if g.ServerID > MaxServerID64 { panic(fmt.Sprintf("unexpected serverID %d exceeds %d", g.ServerID, MaxServerID64)) } id |= 0x1 id |= g.LocalConnID << 1 // 40 bits local connID. id |= g.ServerID << 41 // 22 bits serverID. } else { if g.LocalConnID > MaxLocalConnID32 { panic(fmt.Sprintf("unexpected localConnID %d exceeds %d", g.LocalConnID, MaxLocalConnID32)) } if g.ServerID > MaxServerID32 { panic(fmt.Sprintf("unexpected serverID %d exceeds %d", g.ServerID, MaxServerID32)) } id |= g.LocalConnID << 1 // 20 bits local connID. id |= g.ServerID << 21 // 11 bits serverID. } return id } // ParseConnID parses an uint64 connection ID to GlobalConnID. // // `isTruncated` indicates that older versions of the client truncated the 64-bit GlobalConnID to 32-bit. func ParseConnID(id uint64) (g GCID, isTruncated bool, err error) { if id&0x80000000_00000000 > 0 { return GCID{}, false, errors.New("unexpected connectionID exceeds int64") } if id&0x1 > 0 { // 64bits if id&0xffffffff_00000000 == 0 { return GCID{}, true, nil } return GCID{ Is64bits: true, LocalConnID: (id >> 1) & MaxLocalConnID64, ServerID: (id >> 41) & MaxServerID64, }, false, nil } // 32bits if id&0xffffffff_00000000 > 0 { return GCID{}, false, errors.New("unexpected connectionID exceeds uint32") } return GCID{ Is64bits: false, LocalConnID: (id >> 1) & MaxLocalConnID32, ServerID: (id >> 21) & MaxServerID32, }, false, nil } ///////////////////////////////// Class Diagram /////////////////////////////////// // // // +----------+ +-----------------+ +-----------------------+ // // | Server | ---> | ConnIDAllocator | <<--+-- | GlobalConnIDAllocator | --+ // // +----------+ +-----------------+ | +-----------------------+ | // // +-- | SimpleConnIDAllocator | | // // +----------+------------+ | // // | | // // V | // // +--------+ +----------------------+ | // // | IDPool | <<--+-- | AutoIncPool | <--+ // // +--------+ | +----------------------+ | // // +-- | LockFreeCircularPool | <--+ // // +----------------------+ // // // /////////////////////////////////////////////////////////////////////////////////// type serverIDGetterFn func() uint64 // Allocator allocates global connection IDs. type Allocator interface { // NextID returns next connection ID. NextID() uint64 // Release releases connection ID to allocator. Release(connectionID uint64) // GetReservedConnID returns reserved connection ID. GetReservedConnID(reservedNo uint64) uint64 } var ( _ Allocator = (*SimpleAllocator)(nil) _ Allocator = (*GlobalAllocator)(nil) ) // SimpleAllocator is a simple connection id allocator used when GlobalKill feature is disable. type SimpleAllocator struct { pool AutoIncPool } // NewSimpleAllocator creates a new SimpleAllocator. func NewSimpleAllocator() *SimpleAllocator { a := &SimpleAllocator{} a.pool.Init(math.MaxUint64 - ReservedCount) return a } // NextID implements ConnIDAllocator interface. func (a *SimpleAllocator) NextID() uint64 { id, _ := a.pool.Get() return id } // Release implements ConnIDAllocator interface. func (a *SimpleAllocator) Release(id uint64) { a.pool.Put(id) } // GetReservedConnID implements ConnIDAllocator interface. func (*SimpleAllocator) GetReservedConnID(reservedNo uint64) uint64 { if reservedNo >= ReservedCount { panic("invalid reservedNo exceed ReservedCount") } return math.MaxUint64 - reservedNo } // GlobalAllocator is global connection ID allocator. type GlobalAllocator struct { is64bits sync2.AtomicInt32 // !0: true, 0: false serverIDGetter func() uint64 local32 LockFreeCircularPool local64 AutoIncPool } // is64 indicates allocate 64bits global connection ID or not. func (g *GlobalAllocator) is64() bool { return g.is64bits.Get() != 0 } // upgradeTo64 upgrade allocator to 64bits. func (g *GlobalAllocator) upgradeTo64() { g.is64bits.Set(1) logutil.BgLogger().Info("GlobalAllocator upgrade to 64 bits") } func (g *GlobalAllocator) downgradeTo32() { g.is64bits.Set(0) logutil.BgLogger().Info("GlobalAllocator downgrade to 32 bits") } // LocalConnIDAllocator64TryCount is the try count of 64bits local connID allocation. const LocalConnIDAllocator64TryCount = 10 // NewGlobalAllocator creates a GlobalAllocator. func NewGlobalAllocator(serverIDGetter serverIDGetterFn, enable32Bits bool) *GlobalAllocator { g := &GlobalAllocator{ serverIDGetter: serverIDGetter, } g.local32.InitExt(1<<LocalConnIDBits32, math.MaxUint32) g.local64.InitExt((1<<LocalConnIDBits64)-ReservedCount, true, LocalConnIDAllocator64TryCount) var is64 int32 if enable32Bits { is64 = 0 } else { is64 = 1 } g.is64bits.Set(is64) return g } // NextID returns next connection ID. func (g *GlobalAllocator) NextID() uint64 { globalConnID := g.Allocate() return globalConnID.ToConnID() } // GetReservedConnID implements ConnIDAllocator interface. func (g *GlobalAllocator) GetReservedConnID(reservedNo uint64) uint64 { if reservedNo >= ReservedCount { panic("invalid reservedNo exceed ReservedCount") } serverID := g.serverIDGetter() globalConnID := GCID{ ServerID: serverID, LocalConnID: (1 << LocalConnIDBits64) - 1 - reservedNo, Is64bits: true, } return globalConnID.ToConnID() } // Allocate allocates a new global connection ID. func (g *GlobalAllocator) Allocate() GCID { serverID := g.serverIDGetter() // 32bits. if !g.is64() && serverID <= MaxServerID32 { localConnID, ok := g.local32.Get() if ok { return GCID{ ServerID: serverID, LocalConnID: localConnID, Is64bits: false, } } g.upgradeTo64() // go on to 64bits. } // 64bits. localConnID, ok := g.local64.Get() if !ok { // local connID with 40bits pool size is big enough and should not be exhausted, as `MaxServerConnections` is no more than math.MaxUint32. panic(fmt.Sprintf("Failed to allocate 64bits local connID after try %v times. Should never happen", LocalConnIDAllocator64TryCount)) } return GCID{ ServerID: serverID, LocalConnID: localConnID, Is64bits: true, } } // Release releases connectionID to pool. func (g *GlobalAllocator) Release(connectionID uint64) { globalConnID, isTruncated, err := ParseConnID(connectionID) if err != nil || isTruncated { logutil.BgLogger().Error("failed to ParseGlobalConnID", zap.Error(err), zap.Uint64("connectionID", connectionID), zap.Bool("isTruncated", isTruncated)) return } if globalConnID.Is64bits { g.local64.Put(globalConnID.LocalConnID) } else { if ok := g.local32.Put(globalConnID.LocalConnID); ok { if g.local32.Len() < g.local32.Cap()/2 { g.downgradeTo32() } } else { logutil.BgLogger().Error("failed to release 32bits connection ID", zap.Uint64("connectionID", connectionID), zap.Uint64("localConnID", globalConnID.LocalConnID)) } } } var ( ldflagIsGlobalKillTest = "0" // 1:Yes, otherwise:No. ldflagServerIDBits32 = "11" // Bits of ServerID32. ldflagLocalConnIDBits32 = "20" // Bits of LocalConnID32. ) func initByLDFlagsForGlobalKill() { if ldflagIsGlobalKillTest == "1" { var ( i int err error ) if i, err = strconv.Atoi(ldflagServerIDBits32); err != nil { panic("invalid ldflagServerIDBits32") } ServerIDBits32 = uint(i) MaxServerID32 = 1<<ServerIDBits32 - 1 if i, err = strconv.Atoi(ldflagLocalConnIDBits32); err != nil { panic("invalid ldflagLocalConnIDBits32") } LocalConnIDBits32 = uint(i) MaxLocalConnID32 = 1<<LocalConnIDBits32 - 1 logutil.BgLogger().Info("global_kill_test is enabled", zap.Uint("ServerIDBits32", ServerIDBits32), zap.Uint64("MaxServerID32", MaxServerID32), zap.Uint("LocalConnIDBits32", LocalConnIDBits32), zap.Uint64("MaxLocalConnID32", MaxLocalConnID32), ) } } func init() { initByLDFlagsForGlobalKill() }
pkg/util/globalconn/globalconn.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00040439359145238996, 0.0001837155141402036, 0.00016272648645099252, 0.00016905389202293009, 0.000044032844016328454 ]
{ "id": 1, "code_window": [ "\t\"github.com/pingcap/tidb/br/pkg/version/build\"\n", "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/session\"\n", "\t\"github.com/pingcap/tidb/pkg/util/metricsutil\"\n", "\t\"github.com/spf13/cobra\"\n", "\t\"go.uber.org/zap\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/gctuner\"\n" ], "file_path": "br/cmd/br/backup.go", "type": "add", "edit_start_line_idx": 15 }
// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metautil import ( "bytes" "context" "crypto/sha256" "encoding/json" "fmt" "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics/handle" statstypes "github.com/pingcap/tidb/pkg/statistics/handle/types" statsutil "github.com/pingcap/tidb/pkg/statistics/handle/util" "golang.org/x/sync/errgroup" ) var maxStatsJsonTableSize = 32 * 1024 * 1024 // 32 MiB var inlineSize = 8 * 1024 // 8 KiB func getStatsFileName(physicalID int64) string { return fmt.Sprintf("backupmeta.schema.stats.%09d", physicalID) } // A lightweight function wrapper to dump the statistic type StatsWriter struct { storage storage.ExternalStorage cipher *backuppb.CipherInfo // final stats file indexes statsFileIndexes []*backuppb.StatsFileIndex // temporary variables, clear after each flush totalSize int statsFile *backuppb.StatsFile } func newStatsWriter( storage storage.ExternalStorage, cipher *backuppb.CipherInfo, ) *StatsWriter { return &StatsWriter{ storage: storage, cipher: cipher, statsFileIndexes: make([]*backuppb.StatsFileIndex, 0), totalSize: 0, statsFile: &backuppb.StatsFile{ Blocks: make([]*backuppb.StatsBlock, 0, 8), }, } } func (s *StatsWriter) clearTemporary() { // clear the temporary variables s.totalSize = 0 s.statsFile = &backuppb.StatsFile{ Blocks: make([]*backuppb.StatsBlock, 0, 8), } } func (s *StatsWriter) writeStatsFileAndClear(ctx context.Context, physicalID int64) error { fileName := getStatsFileName(physicalID) content, err := proto.Marshal(s.statsFile) if err != nil { return errors.Trace(err) } if len(s.statsFileIndexes) == 0 && len(content) < inlineSize { s.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{InlineData: content}) return nil } checksum := sha256.Sum256(content) encryptedContent, iv, err := Encrypt(content, s.cipher) if err != nil { return errors.Trace(err) } if err := s.storage.WriteFile(ctx, fileName, encryptedContent); err != nil { return errors.Trace(err) } s.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{ Name: fileName, Sha256: checksum[:], SizeEnc: uint64(len(encryptedContent)), SizeOri: uint64(len(content)), CipherIv: iv, }) s.clearTemporary() return nil } func (s *StatsWriter) BackupStats(ctx context.Context, jsonTable *statsutil.JSONTable, physicalID int64) error { if jsonTable == nil { return nil } statsBytes, err := json.Marshal(jsonTable) if err != nil { return errors.Trace(err) } s.totalSize += len(statsBytes) s.statsFile.Blocks = append(s.statsFile.Blocks, &backuppb.StatsBlock{ PhysicalId: physicalID, JsonTable: statsBytes, }) // check whether need to flush if s.totalSize > maxStatsJsonTableSize { if err := s.writeStatsFileAndClear(ctx, physicalID); err != nil { return errors.Trace(err) } } return nil } func (s *StatsWriter) BackupStatsDone(ctx context.Context) ([]*backuppb.StatsFileIndex, error) { if s.totalSize == 0 || len(s.statsFile.Blocks) == 0 { return s.statsFileIndexes, nil } if err := s.writeStatsFileAndClear(ctx, s.statsFile.Blocks[0].PhysicalId); err != nil { return nil, errors.Trace(err) } return s.statsFileIndexes, nil } func RestoreStats( ctx context.Context, storage storage.ExternalStorage, cipher *backuppb.CipherInfo, statsHandler *handle.Handle, newTableInfo *model.TableInfo, statsFileIndexes []*backuppb.StatsFileIndex, rewriteIDMap map[int64]int64, ) error { eg, ectx := errgroup.WithContext(ctx) taskCh := make(chan *statstypes.PartitionStatisticLoadTask, 8) eg.Go(func() error { return downloadStats(ectx, storage, cipher, statsFileIndexes, rewriteIDMap, taskCh) }) eg.Go(func() error { // NOTICE: skip updating cache after load stats from json return statsHandler.LoadStatsFromJSONConcurrently(ectx, newTableInfo, taskCh, 0) }) return eg.Wait() } func downloadStats( ctx context.Context, storage storage.ExternalStorage, cipher *backuppb.CipherInfo, statsFileIndexes []*backuppb.StatsFileIndex, rewriteIDMap map[int64]int64, taskCh chan<- *statstypes.PartitionStatisticLoadTask, ) error { defer close(taskCh) eg, ectx := errgroup.WithContext(ctx) downloadWorkerpool := utils.NewWorkerPool(4, "download stats for each partition") for _, statsFileIndex := range statsFileIndexes { if ectx.Err() != nil { break } statsFile := statsFileIndex downloadWorkerpool.ApplyOnErrorGroup(eg, func() error { var statsContent []byte if len(statsFile.InlineData) > 0 { statsContent = statsFile.InlineData } else { content, err := storage.ReadFile(ectx, statsFile.Name) if err != nil { return errors.Trace(err) } decryptContent, err := Decrypt(content, cipher, statsFile.CipherIv) if err != nil { return errors.Trace(err) } checksum := sha256.Sum256(decryptContent) if !bytes.Equal(statsFile.Sha256, checksum[:]) { return berrors.ErrInvalidMetaFile.GenWithStackByArgs(fmt.Sprintf( "checksum mismatch expect %x, got %x", statsFile.Sha256, checksum[:])) } statsContent = decryptContent } statsFileBlocks := &backuppb.StatsFile{} if err := proto.Unmarshal(statsContent, statsFileBlocks); err != nil { return errors.Trace(err) } for _, block := range statsFileBlocks.Blocks { physicalId, ok := rewriteIDMap[block.PhysicalId] if !ok { return berrors.ErrRestoreInvalidRewrite.GenWithStackByArgs(fmt.Sprintf( "not rewrite rule matched, old physical id: %d", block.PhysicalId)) } jsonTable := &statsutil.JSONTable{} if err := json.Unmarshal(block.JsonTable, jsonTable); err != nil { return errors.Trace(err) } select { case <-ectx.Done(): return nil case taskCh <- &statstypes.PartitionStatisticLoadTask{ PhysicalID: physicalId, JSONTable: jsonTable, }: } } return nil }) } return eg.Wait() }
br/pkg/metautil/statsfile.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.005068090278655291, 0.00041456095641478896, 0.00016324289026670158, 0.0001707288174657151, 0.0009799766121432185 ]
{ "id": 1, "code_window": [ "\t\"github.com/pingcap/tidb/br/pkg/version/build\"\n", "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/session\"\n", "\t\"github.com/pingcap/tidb/pkg/util/metricsutil\"\n", "\t\"github.com/spf13/cobra\"\n", "\t\"go.uber.org/zap\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/gctuner\"\n" ], "file_path": "br/cmd/br/backup.go", "type": "add", "edit_start_line_idx": 15 }
INSERT INTO `dup_detect` VALUES (83, 'IsVNO', 2241037762547431879, 'j0xw', 73, 4, 355883464), (180, 'KfcXL', 6039907346589536910, '5', 80, 2, 113117135), (112, 'joYVZ', 929853189485966327, '7w4ony', 13, 2, 218141761), (140, 'Sqggb', 853898774711616258, '3s9', 7, 3, 388355973), (146, 'SEa6H', 8308907227481935941, 'ar3a', 66, 3, 280312226), (24, '246hW', 4324710001896112883, 'de8tl', 39, 3, 133366183), (152, '6zyes', 555751856947326226, 'h5e3', 85, 4, 57855866), (99, 'gno2D', 5911623359071280574, 'z', 68, 4, 414974327), (136, '0Ux7H', 4632018848723169085, 'jt8', 21, 1, 91618391), (30, '4OSnq', 6361165803295992855, 'td0ycq', 75, 4, 133065615); INSERT INTO `dup_detect` VALUES (98, 'DuupT', 10285462810710690, 'r', 8, 3, 290637016), (153, 'pKQ7R', 3070424367330108076, 'j8j6', 31, 1, 456326651), (139, 'mYn9D', 8849622168641656671, 'cl7fq', 60, 2, 308170178), (197, 'nvg9k', 2049567136774876210, 'dv0', 25, 1, 224780646), (62, 'hitxp', 4387090517431638101, 'vq', 51, 4, 434264920), (30, 'a4fFB', 4998704806615568546, 'rfrnpp', 63, 1, 489370203), (177, 'rN0Rb', 5661738155720520659, '42', 85, 3, 496669056), (177, 'Ek4Qm', 2220809508242776412, 'y58y', 1, 4, 408963660), (40, '4wdZz', 6390410616431923107, 'rj', 32, 1, 39083348), (56, 'lkjEd', 2301375651887989672, '9j', 17, 2, 412716009); INSERT INTO `dup_detect` VALUES (183, 'GgdHK', 8398462613837472754, 'uh', 63, 2, 264628017), (171, 'cHbCR', 5835140940781394208, 'xl', 28, 1, 197111684), (89, '5abrM', 3379035217232248053, 'kvg', 98, 4, 356252729), (155, 'MwKff', 1460315338363085441, 'mc', 10, 3, 464751128), (40, 'aHGNS', 738633024583152976, '6y', 78, 3, 256840448), (143, '0UKUc', 7029576482342060973, 'rdt', 27, 3, 385708670), (48, '54d5c', 5438251565653898867, 'bx', 74, 3, 230530972), (76, '8Gwsw', 461657777974232859, 'vkiu62', 7, 4, 158902373), (94, 'bFSNK', 4686923505238851008, 'r9', 59, 1, 488482291), (102, 'aiSTZ', 1332234922327691600, '8au', 10, 2, 234277558); INSERT INTO `dup_detect` VALUES (145, 'CyxCU', 1684002032652602838, '6q', 52, 0, 367558093), (153, 'cLJWL', 90542643455198173, 'a9k', 29, 1, 230418685), (107, 'voYq2', 7809590085989764141, '3sz9', 61, 1, 195555952), (9, 'gtKZp', 6424617016328530470, 'dam', 36, 1, 28716540), (193, 'QaA1G', 9192687524024546519, 'o', 95, 3, 303889826), (58, 'Ha0TU', 2995937967861158585, 'zf66', 56, 3, 470431247), (89, 'PdNn9', 376156765147513236, '1wr9l', 18, 1, 491218092), (99, 'Q3Ilz', 6283536016294875361, '5hkj', 78, 0, 151302063), (96, 'oXxJS', 8877305793201813585, 'saas2c', 27, 1, 132958261), (138, 'Vw5he', 2461106025221525040, 'u02fb', 6, 4, 318301521); INSERT INTO `dup_detect` VALUES (77, 'VRG0Z', 3617128947837732137, 'n4', 45, 4, 187957281), (41, 'rcqMo', 4569580803661574058, 'lk51go', 53, 4, 455586745), (31, 'hfi6p', 3695464848223876413, 'o7barl', 40, 3, 95210774), (20, 'n8ctF', 5731519319138790907, 'nklws', 85, 4, 166063348), (75, 'wPseS', 8724841512884214158, 'd', 1, 4, 412031306), (135, 'GMdzP', 3800039750845613952, '6hdtv', 4, 4, 18549943), (148, 'x17mF', 5314037124751327024, '9rgqx', 66, 1, 278086525), (103, 'Um5E7', 6578210549532015579, '5hwe2', 45, 1, 4296274), (52, 'b5yPQ', 6400082436489263585, '6r253', 38, 4, 430939716), (92, 'c8FQ8', 7197644280034294678, 'fyiux', 75, 2, 103976619); INSERT INTO `dup_detect` VALUES (186, 'wfCor', 5955409004387754003, 'qxg', 12, 3, 367504782), (114, 'W49G7', 5088058312900327194, 'ccyji7', 7, 0, 394540544), (44, '6BvHj', 3720992033805039523, 'ptixof', 40, 0, 297541133), (143, 'FVvMl', 3261818725120867343, 'mdqk', 21, 4, 164359973), (4, '7KIRt', 8275036501204847929, 'zi6f', 22, 1, 161535563), (197, 'YOLYM', 1798601617332435844, '41ihf', 44, 0, 351558840), (126, 'qQSyt', 5540112381878245958, '0ax', 67, 0, 236426215), (134, 'XzrUE', 98106572312661729, 'nkbbi', 82, 1, 34678346), (183, 'CgQtx', 7760329983217705728, '2', 26, 3, 374828063), (153, 'gKIjc', 9045250952377791192, 't8hb', 35, 0, 365360832); INSERT INTO `dup_detect` VALUES (61, 'zu4x7', 216347212423853969, 'wfcn8', 28, 4, 209753292), (54, 'XiJAr', 3171768713560251482, '1mt3vb', 24, 0, 55789261), (55, 'QkAtr', 4279527522377337050, 'ko6', 8, 4, 491418560), (99, 'fj9BB', 5695247238451845871, 'qe6aa', 35, 3, 194614406), (174, 'ubPhu', 8523822214992321879, 'ai4', 78, 3, 249793865), (196, '3wkkw', 7331258055858595528, 'vbbv', 70, 2, 241643547), (149, 'p2DX4', 8181182407667595658, 'rc', 56, 1, 364679748), (99, 'JI4DT', 6412112959158558175, 'munq6', 17, 0, 174384468), (143, 'FWFkd', 308142846562235349, 'r4cifn', 90, 3, 77648429), (157, '6EN69', 7953119855198919762, 'eim', 49, 3, 474801621); INSERT INTO `dup_detect` VALUES (87, 'fqTEt', 8461930344035202898, '94', 51, 2, 427605230), (86, 'QaOBb', 6193933506961622580, '03mo', 41, 2, 193143289), (100, '5YA3k', 443905727622327969, '43ci', 92, 0, 56930439), (96, 't62JE', 7162126946372106331, 'gac', 12, 3, 396713526), (96, 'gvQrY', 5043208588998510317, 'fha', 7, 0, 265770925), (47, 'eDAva', 2268545993504485175, 'um', 21, 1, 161786034), (3, 'NYf1n', 7469229552772445510, 'kg', 31, 3, 449839120), (37, 'wijow', 843912082350435481, 'ts', 52, 3, 194846911), (78, 'HWRjS', 5063408084732038050, 'rs', 78, 1, 178064179), (36, 'AtPIN', 2382438166468982286, 'r', 52, 1, 351477279); INSERT INTO `dup_detect` VALUES (6, 'P14Bs', 909588658481590114, 'ul', 84, 0, 133214167), (143, 'F9at3', 5825294979115281738, 'reoid', 94, 4, 366991306), (178, 'Z21jI', 2875848124617984698, '9dxjh', 82, 3, 396878919), (35, '9L3dn', 2312086918456086291, 'pv', 81, 3, 15935876), (136, 'bdusE', 4333785909102689023, 'yked', 44, 2, 242392043), (69, '2Wpox', 5692169190211631132, 'u4i', 52, 3, 51644386), (41, 'dAo0h', 7058319714032491853, '2wuj', 29, 4, 455435242), (152, '7susG', 3796743711910116264, '1hii39', 89, 2, 339803716), (43, '04vOU', 2850207776650641744, 'fp', 61, 1, 158327333), (121, 'XGCg9', 5604995708168410974, 'vqqy', 95, 0, 24597538); INSERT INTO `dup_detect` VALUES (37, 'zERy6', 8794146820600298588, '8r', 30, 3, 449616535), (52, 'UtLSL', 7286554232001430853, 'lvsah', 5, 2, 38528958), (15, 'mch8F', 2349881589236359304, 'i8ra', 49, 4, 23850025), (47, '5k0d7', 623994919542800279, '4', 74, 0, 225180010), (150, 'itSx5', 7106952139469792731, 'k', 58, 1, 102542221), (2, 'Y10cK', 4687593162036194085, 'e74', 37, 3, 56285566), (150, 'jWxSt', 4950970749692005892, '21d15c', 98, 0, 293194243), (141, '4Wtmr', 9061058647512236690, 'yfg', 51, 1, 411690808), (70, 'A21nJ', 2624269271790371549, 'v0281', 72, 0, 176061556), (66, 'WA4Lz', 5647568340668202073, 'hy9da3', 67, 3, 179726484); (987, 'nEoKu', 7836621565948506759, 'y6', 48, 0, 177543185),
br/tests/lightning_duplicate_detection_new/data/test.dup_detect.4.sql
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001728668576106429, 0.00016658932145219296, 0.00016378561849705875, 0.00016618266818113625, 0.0000024338091861864086 ]
{ "id": 1, "code_window": [ "\t\"github.com/pingcap/tidb/br/pkg/version/build\"\n", "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/session\"\n", "\t\"github.com/pingcap/tidb/pkg/util/metricsutil\"\n", "\t\"github.com/spf13/cobra\"\n", "\t\"go.uber.org/zap\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/gctuner\"\n" ], "file_path": "br/cmd/br/backup.go", "type": "add", "edit_start_line_idx": 15 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "testing" "github.com/pingcap/tidb/pkg/testkit/testsetup" "go.uber.org/goleak" ) func TestMain(m *testing.M) { opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), goleak.IgnoreTopFunction("github.com/bazelbuild/rules_go/go/tools/bzltestutil.RegisterTimeoutHandler.func1"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), } testsetup.SetupForCommonTest() goleak.VerifyTestMain(m, opts...) }
br/pkg/utils/main_test.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00025393476244062185, 0.0002062211569864303, 0.00016765433247201145, 0.00020164776651654392, 0.000037630630686180666 ]
{ "id": 1, "code_window": [ "\t\"github.com/pingcap/tidb/br/pkg/version/build\"\n", "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/session\"\n", "\t\"github.com/pingcap/tidb/pkg/util/metricsutil\"\n", "\t\"github.com/spf13/cobra\"\n", "\t\"go.uber.org/zap\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/gctuner\"\n" ], "file_path": "br/cmd/br/backup.go", "type": "add", "edit_start_line_idx": 15 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trxevents import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" ) // EventType represents the type of a transaction event. type EventType = int const ( // EventTypeCopMeetLock stands for the CopMeetLock event type. EventTypeCopMeetLock = iota ) // CopMeetLock represents an event that coprocessor reading encounters lock. type CopMeetLock struct { LockInfo *kvrpcpb.LockInfo } // TransactionEvent represents a transaction event that may belong to any of the possible types. type TransactionEvent struct { inner any eventType EventType } // GetCopMeetLock tries to extract the inner CopMeetLock event from a TransactionEvent. Returns nil if it's not a // CopMeetLock event. func (e TransactionEvent) GetCopMeetLock() *CopMeetLock { if e.eventType == EventTypeCopMeetLock { return e.inner.(*CopMeetLock) } return nil } // WrapCopMeetLock wraps a CopMeetLock event into a TransactionEvent object. func WrapCopMeetLock(copMeetLock *CopMeetLock) TransactionEvent { return TransactionEvent{ eventType: EventTypeCopMeetLock, inner: copMeetLock, } } // EventCallback is the callback type that handles `TransactionEvent`s. type EventCallback = func(event TransactionEvent)
pkg/util/trxevents/trx_events.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00017655613191891462, 0.00016984622925519943, 0.00016529869753867388, 0.00016992379096336663, 0.000003725700707946089 ]
{ "id": 2, "code_window": [ "\n", "\t// No need to cache the coproceesor result\n", "\tconfig.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0\n", "\n", "\tif err := task.RunBackup(ctx, tidbGlue, cmdName, &cfg); err != nil {\n", "\t\tlog.Error(\"failed to backup\", zap.Error(err))\n", "\t\treturn errors.Trace(err)\n", "\t}\n", "\treturn nil\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Disable the memory limit tuner. That's because the server memory is get from TiDB node instead of BR node.\n", "\tgctuner.GlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n", "\tdefer gctuner.GlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n", "\n" ], "file_path": "br/cmd/br/backup.go", "type": "add", "edit_start_line_idx": 50 }
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. package main import ( "fmt" "github.com/pingcap/errors" "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/gluetikv" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/task" "github.com/pingcap/tidb/br/pkg/trace" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version/build" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/util/metricsutil" "github.com/spf13/cobra" "go.uber.org/zap" "sourcegraph.com/sourcegraph/appdash" ) func runRestoreCommand(command *cobra.Command, cmdName string) error { cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } if err := metricsutil.RegisterMetricsForBR(cfg.PD, cfg.KeyspaceName); err != nil { return errors.Trace(err) } if task.IsStreamRestore(cmdName) { if err := cfg.ParseStreamRestoreFlags(command.Flags()); err != nil { return errors.Trace(err) } } // have to skip grant table, in order to NotifyUpdatePrivilege in binary mode config.GetGlobalConfig().Security.SkipGrantTable = true ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if cfg.FullBackupType == task.FullBackupTypeEBS { if cfg.Prepare { if err := task.RunRestoreEBSMeta(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg); err != nil { log.Error("failed to restore EBS meta", zap.Error(err)) return errors.Trace(err) } } else { if err := task.RunResolveKvData(GetDefaultContext(), tidbGlue, cmdName, &cfg); err != nil { log.Error("failed to restore data", zap.Error(err)) return errors.Trace(err) } } return nil } // No need to cache the coproceesor result config.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0 if err := task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg); err != nil { log.Error("failed to restore", zap.Error(err)) printWorkaroundOnFullRestoreError(command, err) return errors.Trace(err) } return nil } // print workaround when we met not fresh or incompatible cluster error on full cluster restore func printWorkaroundOnFullRestoreError(command *cobra.Command, err error) { if !errors.ErrorEqual(err, berrors.ErrRestoreNotFreshCluster) && !errors.ErrorEqual(err, berrors.ErrRestoreIncompatibleSys) { return } fmt.Println("#######################################################################") switch { case errors.ErrorEqual(err, berrors.ErrRestoreNotFreshCluster): fmt.Println("# the target cluster is not fresh, cannot restore.") fmt.Println("# you can drop existing databases and tables and start restore again") case errors.ErrorEqual(err, berrors.ErrRestoreIncompatibleSys): fmt.Println("# the target cluster is not compatible with the backup data,") fmt.Println("# you can use '--with-sys-table=false' to skip restoring system tables") } fmt.Println("#######################################################################") } func runRestoreRawCommand(command *cobra.Command, cmdName string) error { cfg := task.RestoreRawConfig{ RawKvConfig: task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}}, } if err := cfg.ParseFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if err := task.RunRestoreRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg); err != nil { log.Error("failed to restore raw kv", zap.Error(err)) return errors.Trace(err) } return nil } func runRestoreTxnCommand(command *cobra.Command, cmdName string) error { cfg := task.Config{LogProgress: HasLogFile()} if err := cfg.ParseFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if err := task.RunRestoreTxn(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg); err != nil { log.Error("failed to restore txn kv", zap.Error(err)) return errors.Trace(err) } return nil } // NewRestoreCommand returns a restore subcommand. func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "restore", Short: "restore a TiDB/TiKV cluster", SilenceUsage: true, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return errors.Trace(err) } build.LogInfo(build.BR) utils.LogEnvVariables() task.LogArguments(c) session.DisableStats4Test() summary.SetUnit(summary.RestoreUnit) return nil }, } command.AddCommand( newFullRestoreCommand(), newDBRestoreCommand(), newTableRestoreCommand(), newRawRestoreCommand(), newTxnRestoreCommand(), newStreamRestoreCommand(), ) task.DefineRestoreFlags(command.PersistentFlags()) return command } func newFullRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "full", Short: "restore all tables", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, _ []string) error { return runRestoreCommand(cmd, task.FullRestoreCmd) }, } task.DefineFilterFlags(command, filterOutSysAndMemTables, false) task.DefineRestoreSnapshotFlags(command) return command } func newDBRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "db", Short: "restore tables in a database from the backup data", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, _ []string) error { return runRestoreCommand(cmd, task.DBRestoreCmd) }, } task.DefineDatabaseFlags(command) return command } func newTableRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "table", Short: "restore a table from the backup data", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, _ []string) error { return runRestoreCommand(cmd, task.TableRestoreCmd) }, } task.DefineTableFlags(command) return command } func newRawRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "raw", Short: "(experimental) restore a raw kv range to TiKV cluster", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, _ []string) error { return runRestoreRawCommand(cmd, task.RawRestoreCmd) }, } task.DefineRawRestoreFlags(command) return command } func newTxnRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "txn", Short: "(experimental) restore txn kv to TiKV cluster", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, _ []string) error { return runRestoreTxnCommand(cmd, task.TxnRestoreCmd) }, } task.DefineRawRestoreFlags(command) return command } func newStreamRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "point", Short: "restore data from log until specify commit timestamp", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runRestoreCommand(command, task.PointRestoreCmd) }, } task.DefineFilterFlags(command, filterOutSysAndMemTables, true) task.DefineStreamRestoreFlags(command) return command }
br/cmd/br/restore.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.9892191290855408, 0.10759430378675461, 0.00016882683848962188, 0.0005944567965343595, 0.2679959237575531 ]
{ "id": 2, "code_window": [ "\n", "\t// No need to cache the coproceesor result\n", "\tconfig.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0\n", "\n", "\tif err := task.RunBackup(ctx, tidbGlue, cmdName, &cfg); err != nil {\n", "\t\tlog.Error(\"failed to backup\", zap.Error(err))\n", "\t\treturn errors.Trace(err)\n", "\t}\n", "\treturn nil\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Disable the memory limit tuner. That's because the server memory is get from TiDB node instead of BR node.\n", "\tgctuner.GlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n", "\tdefer gctuner.GlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n", "\n" ], "file_path": "br/cmd/br/backup.go", "type": "add", "edit_start_line_idx": 50 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package unistore import ( "testing" "github.com/pingcap/tidb/pkg/testkit/testsetup" "go.uber.org/goleak" ) func TestMain(m *testing.M) { testsetup.SetupForCommonTest() opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), goleak.IgnoreTopFunction("github.com/bazelbuild/rules_go/go/tools/bzltestutil.RegisterTimeoutHandler.func1"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), } goleak.VerifyTestMain(m, opts...) }
pkg/store/mockstore/unistore/main_test.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001769690861692652, 0.00017401920922566205, 0.00017109808686655015, 0.00017400484648533165, 0.0000021836960968357744 ]
{ "id": 2, "code_window": [ "\n", "\t// No need to cache the coproceesor result\n", "\tconfig.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0\n", "\n", "\tif err := task.RunBackup(ctx, tidbGlue, cmdName, &cfg); err != nil {\n", "\t\tlog.Error(\"failed to backup\", zap.Error(err))\n", "\t\treturn errors.Trace(err)\n", "\t}\n", "\treturn nil\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Disable the memory limit tuner. That's because the server memory is get from TiDB node instead of BR node.\n", "\tgctuner.GlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n", "\tdefer gctuner.GlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n", "\n" ], "file_path": "br/cmd/br/backup.go", "type": "add", "edit_start_line_idx": 50 }
1,a1,1.1 3,b3,3.3 5,c5,5.5 7,d7,7.7 9,e9,9.9
br/tests/lightning_distributed_import/data1/distributed_import.t.csv
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001673261431278661, 0.0001673261431278661, 0.0001673261431278661, 0.0001673261431278661, 0 ]
{ "id": 2, "code_window": [ "\n", "\t// No need to cache the coproceesor result\n", "\tconfig.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0\n", "\n", "\tif err := task.RunBackup(ctx, tidbGlue, cmdName, &cfg); err != nil {\n", "\t\tlog.Error(\"failed to backup\", zap.Error(err))\n", "\t\treturn errors.Trace(err)\n", "\t}\n", "\treturn nil\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Disable the memory limit tuner. That's because the server memory is get from TiDB node instead of BR node.\n", "\tgctuner.GlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n", "\tdefer gctuner.GlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n", "\n" ], "file_path": "br/cmd/br/backup.go", "type": "add", "edit_start_line_idx": 50 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package importer import ( "fmt" "strconv" "strings" "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/parser" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/types" _ "github.com/pingcap/tidb/pkg/types/parser_driver" // for parser driver "github.com/pingcap/tidb/pkg/util/dbutil" "go.uber.org/zap" ) type column struct { data *datum tp *types.FieldType table *table name string comment string min string max string set []string idx int step int64 } func (col *column) String() string { if col == nil { return "<nil>" } return fmt.Sprintf("[column]idx: %d, name: %s, tp: %v, min: %s, max: %s, step: %d, set: %v\n", col.idx, col.name, col.tp, col.min, col.max, col.step, col.set) } func (col *column) parseRule(kvs []string) { if len(kvs) != 2 { return } key := strings.TrimSpace(kvs[0]) value := strings.TrimSpace(kvs[1]) if key == "range" { fields := strings.Split(value, ",") if len(fields) == 1 { col.min = strings.TrimSpace(fields[0]) } else if len(fields) == 2 { col.min = strings.TrimSpace(fields[0]) col.max = strings.TrimSpace(fields[1]) } } else if key == "step" { var err error col.step, err = strconv.ParseInt(value, 10, 64) if err != nil { log.Fatal("parseRule", zap.Error(err)) } } else if key == "set" { fields := strings.Split(value, ",") for _, field := range fields { col.set = append(col.set, strings.TrimSpace(field)) } } } // parse the data rules. // rules like `a int unique comment '[[range=1,10;step=1]]'`, // then we will get value from 1,2...10 func (col *column) parseColumnComment() { comment := strings.TrimSpace(col.comment) start := strings.Index(comment, "[[") end := strings.Index(comment, "]]") var content string if start < end { content = comment[start+2 : end] } fields := strings.Split(content, ";") for _, field := range fields { field = strings.TrimSpace(field) kvs := strings.Split(field, "=") col.parseRule(kvs) } } func (col *column) parseColumn(cd *ast.ColumnDef) { col.name = cd.Name.Name.L col.tp = cd.Tp col.parseColumnOptions(cd.Options) col.parseColumnComment() col.table.columns = append(col.table.columns, col) } func (col *column) parseColumnOptions(ops []*ast.ColumnOption) { for _, op := range ops { switch op.Tp { case ast.ColumnOptionPrimaryKey, ast.ColumnOptionAutoIncrement, ast.ColumnOptionUniqKey: col.table.uniqIndices[col.name] = col case ast.ColumnOptionComment: col.comment = op.Expr.(ast.ValueExpr).GetDatumString() } } } type table struct { indices map[string]*column uniqIndices map[string]*column unsignedCols map[string]*column name string columnList string columns []*column } func (t *table) printColumns() string { ret := "" for _, col := range t.columns { ret += fmt.Sprintf("%v", col) } return ret } func (t *table) String() string { if t == nil { return "<nil>" } ret := fmt.Sprintf("[table]name: %s\n", t.name) ret += "[table]columns:\n" ret += t.printColumns() ret += fmt.Sprintf("[table]column list: %s\n", t.columnList) ret += "[table]indices:\n" for k, v := range t.indices { ret += fmt.Sprintf("key->%s, value->%v", k, v) } ret += "[table]unique indices:\n" for k, v := range t.uniqIndices { ret += fmt.Sprintf("key->%s, value->%v", k, v) } return ret } func newTable() *table { return &table{ indices: make(map[string]*column), uniqIndices: make(map[string]*column), unsignedCols: make(map[string]*column), } } func (*table) findCol(cols []*column, name string) *column { for _, col := range cols { if col.name == name { return col } } return nil } func (t *table) parseTableConstraint(cons *ast.Constraint) { switch cons.Tp { case ast.ConstraintPrimaryKey, ast.ConstraintKey, ast.ConstraintUniq, ast.ConstraintUniqKey, ast.ConstraintUniqIndex: for _, indexCol := range cons.Keys { name := indexCol.Column.Name.L t.uniqIndices[name] = t.findCol(t.columns, name) } case ast.ConstraintIndex: for _, indexCol := range cons.Keys { name := indexCol.Column.Name.L t.indices[name] = t.findCol(t.columns, name) } } } func (t *table) buildColumnList() { columns := make([]string, 0, len(t.columns)) for _, column := range t.columns { columns = append(columns, dbutil.ColumnName(column.name)) } t.columnList = strings.Join(columns, ",") } func parseTable(t *table, stmt *ast.CreateTableStmt) error { t.name = stmt.Table.Name.L t.columns = make([]*column, 0, len(stmt.Cols)) for i, col := range stmt.Cols { column := &column{idx: i + 1, table: t, step: defaultStep, data: newDatum()} column.parseColumn(col) } for _, cons := range stmt.Constraints { t.parseTableConstraint(cons) } t.buildColumnList() return nil } func parseTableSQL(table *table, sql string) error { stmt, err := parser.New().ParseOneStmt(sql, "", "") if err != nil { return errors.Trace(err) } switch node := stmt.(type) { case *ast.CreateTableStmt: err = parseTable(table, node) default: err = errors.Errorf("invalid statement - %v", stmt.Text()) } return errors.Trace(err) } func parseIndex(table *table, stmt *ast.CreateIndexStmt) error { if table.name != stmt.Table.Name.L { return errors.Errorf("mismatch table name for create index - %s : %s", table.name, stmt.Table.Name.L) } for _, indexCol := range stmt.IndexPartSpecifications { name := indexCol.Column.Name.L if stmt.KeyType == ast.IndexKeyTypeUnique { table.uniqIndices[name] = table.findCol(table.columns, name) } else { table.indices[name] = table.findCol(table.columns, name) } } return nil } func parseIndexSQL(table *table, sql string) error { if len(sql) == 0 { return nil } stmt, err := parser.New().ParseOneStmt(sql, "", "") if err != nil { return errors.Trace(err) } switch node := stmt.(type) { case *ast.CreateIndexStmt: err = parseIndex(table, node) default: err = errors.Errorf("invalid statement - %v", stmt.Text()) } return errors.Trace(err) }
pkg/util/importer/parser.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00019403694022912532, 0.00017298692546319216, 0.00016459973994642496, 0.0001713473175186664, 0.000006199951712915208 ]
{ "id": 3, "code_window": [ "\t\"github.com/pingcap/tidb/br/pkg/trace\"\n", "\t\"github.com/pingcap/tidb/br/pkg/utils\"\n", "\t\"github.com/pingcap/tidb/br/pkg/version/build\"\n", "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/session\"\n", "\t\"github.com/pingcap/tidb/pkg/util/metricsutil\"\n", "\t\"github.com/spf13/cobra\"\n", "\t\"go.uber.org/zap\"\n", "\t\"sourcegraph.com/sourcegraph/appdash\"\n", ")\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/gctuner\"\n" ], "file_path": "br/cmd/br/restore.go", "type": "add", "edit_start_line_idx": 18 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gctuner import ( "math" "runtime/debug" "time" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/memory" atomicutil "go.uber.org/atomic" ) // GlobalMemoryLimitTuner only allow one memory limit tuner in one process var GlobalMemoryLimitTuner = &memoryLimitTuner{} // Go runtime trigger GC when hit memory limit which managed via runtime/debug.SetMemoryLimit. // So we can change memory limit dynamically to avoid frequent GC when memory usage is greater than the limit. type memoryLimitTuner struct { finalizer *finalizer isValidValueSet atomicutil.Bool percentage atomicutil.Float64 adjustPercentageInProgress atomicutil.Bool serverMemLimitBeforeAdjust atomicutil.Uint64 percentageBeforeAdjust atomicutil.Float64 nextGCTriggeredByMemoryLimit atomicutil.Bool } // fallbackPercentage indicates the fallback memory limit percentage when turning. const fallbackPercentage float64 = 1.1 var memoryGoroutineCntInTest = *atomicutil.NewInt64(0) // WaitMemoryLimitTunerExitInTest is used to wait memory limit tuner exit in test. func WaitMemoryLimitTunerExitInTest() { if intest.InTest { for memoryGoroutineCntInTest.Load() > 0 { time.Sleep(100 * time.Millisecond) } } } // tuning check the memory nextGC and judge whether this GC is trigger by memory limit. // Go runtime ensure that it will be called serially. func (t *memoryLimitTuner) tuning() { if !t.isValidValueSet.Load() { return } r := memory.ForceReadMemStats() gogc := util.GetGOGC() ratio := float64(100+gogc) / 100 // This `if` checks whether the **last** GC was triggered by MemoryLimit as far as possible. // If the **last** GC was triggered by MemoryLimit, we'll set MemoryLimit to MAXVALUE to return control back to GOGC // to avoid frequent GC when memory usage fluctuates above and below MemoryLimit. // The logic we judge whether the **last** GC was triggered by MemoryLimit is as follows: // suppose `NextGC` = `HeapInUse * (100 + GOGC) / 100)`, // - If NextGC < MemoryLimit, the **next** GC will **not** be triggered by MemoryLimit thus we do not care about // why the **last** GC is triggered. And MemoryLimit will not be reset this time. // - Only if NextGC >= MemoryLimit , the **next** GC will be triggered by MemoryLimit. Thus, we need to reset // MemoryLimit after the **next** GC happens if needed. if float64(r.HeapInuse)*ratio > float64(debug.SetMemoryLimit(-1)) { if t.nextGCTriggeredByMemoryLimit.Load() && t.adjustPercentageInProgress.CompareAndSwap(false, true) { // It's ok to update `adjustPercentageInProgress`, `serverMemLimitBeforeAdjust` and `percentageBeforeAdjust` not in a transaction. // The update of memory limit is eventually consistent. t.serverMemLimitBeforeAdjust.Store(memory.ServerMemoryLimit.Load()) t.percentageBeforeAdjust.Store(t.GetPercentage()) go func() { if intest.InTest { memoryGoroutineCntInTest.Inc() defer memoryGoroutineCntInTest.Dec() } memory.MemoryLimitGCLast.Store(time.Now()) memory.MemoryLimitGCTotal.Add(1) debug.SetMemoryLimit(t.calcMemoryLimit(fallbackPercentage)) resetInterval := 1 * time.Minute // Wait 1 minute and set back, to avoid frequent GC if intest.InTest { resetInterval = 3 * time.Second } failpoint.Inject("mockUpdateGlobalVarDuringAdjustPercentage", func(val failpoint.Value) { if val, ok := val.(bool); val && ok { time.Sleep(300 * time.Millisecond) t.UpdateMemoryLimit() } }) failpoint.Inject("testMemoryLimitTuner", func(val failpoint.Value) { if val, ok := val.(bool); val && ok { resetInterval = 1 * time.Second } }) time.Sleep(resetInterval) debug.SetMemoryLimit(t.calcMemoryLimit(t.GetPercentage())) for !t.adjustPercentageInProgress.CompareAndSwap(true, false) { continue } }() memory.TriggerMemoryLimitGC.Store(true) } t.nextGCTriggeredByMemoryLimit.Store(true) } else { t.nextGCTriggeredByMemoryLimit.Store(false) memory.TriggerMemoryLimitGC.Store(false) } } // Start starts the memory limit tuner. func (t *memoryLimitTuner) Start() { t.finalizer = newFinalizer(t.tuning) // Start tuning } // Stop stops the memory limit tuner. func (t *memoryLimitTuner) Stop() { t.finalizer.stop() } // SetPercentage set the percentage for memory limit tuner. func (t *memoryLimitTuner) SetPercentage(percentage float64) { t.percentage.Store(percentage) } // GetPercentage get the percentage from memory limit tuner. func (t *memoryLimitTuner) GetPercentage() float64 { return t.percentage.Load() } // UpdateMemoryLimit updates the memory limit. // This function should be called when `tidb_server_memory_limit` or `tidb_server_memory_limit_gc_trigger` is modified. func (t *memoryLimitTuner) UpdateMemoryLimit() { if t.adjustPercentageInProgress.Load() { if t.serverMemLimitBeforeAdjust.Load() == memory.ServerMemoryLimit.Load() && t.percentageBeforeAdjust.Load() == t.GetPercentage() { return } } var memoryLimit = t.calcMemoryLimit(t.GetPercentage()) if memoryLimit == math.MaxInt64 { t.isValidValueSet.Store(false) memoryLimit = initGOMemoryLimitValue } else { t.isValidValueSet.Store(true) } debug.SetMemoryLimit(memoryLimit) } func (*memoryLimitTuner) calcMemoryLimit(percentage float64) int64 { memoryLimit := int64(float64(memory.ServerMemoryLimit.Load()) * percentage) // `tidb_server_memory_limit` * `tidb_server_memory_limit_gc_trigger` if memoryLimit == 0 { memoryLimit = math.MaxInt64 } return memoryLimit } var initGOMemoryLimitValue int64 func init() { initGOMemoryLimitValue = debug.SetMemoryLimit(-1) GlobalMemoryLimitTuner.Start() }
pkg/util/gctuner/memory_limit_tuner.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00394661957398057, 0.000379571138182655, 0.0001634587679291144, 0.00017062490223906934, 0.0008651416283100843 ]
{ "id": 3, "code_window": [ "\t\"github.com/pingcap/tidb/br/pkg/trace\"\n", "\t\"github.com/pingcap/tidb/br/pkg/utils\"\n", "\t\"github.com/pingcap/tidb/br/pkg/version/build\"\n", "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/session\"\n", "\t\"github.com/pingcap/tidb/pkg/util/metricsutil\"\n", "\t\"github.com/spf13/cobra\"\n", "\t\"go.uber.org/zap\"\n", "\t\"sourcegraph.com/sourcegraph/appdash\"\n", ")\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/gctuner\"\n" ], "file_path": "br/cmd/br/restore.go", "type": "add", "edit_start_line_idx": 18 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logutil import ( "fmt" "time" "github.com/pingcap/errors" "github.com/pingcap/log" "go.uber.org/zap" "go.uber.org/zap/buffer" "go.uber.org/zap/zapcore" ) var _pool = buffer.NewPool() func newSlowQueryLogger(cfg *LogConfig) (*zap.Logger, *log.ZapProperties, error) { // create the slow query logger sqLogger, prop, err := log.InitLogger(newSlowQueryLogConfig(cfg)) if err != nil { return nil, nil, errors.Trace(err) } // replace 2018-12-19-unified-log-format text encoder with slow log encoder newCore := log.NewTextCore(&slowLogEncoder{}, prop.Syncer, prop.Level) sqLogger = sqLogger.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { return newCore })) prop.Core = newCore return sqLogger, prop, nil } func newSlowQueryLogConfig(cfg *LogConfig) *log.Config { // copy the global log config to slow log config // if the filename of slow log config is empty, slow log will behave the same as global log. sqConfig := cfg.Config // level of the global log config doesn't affect the slow query logger which determines whether to // log by execution duration. sqConfig.Level = LogConfig{}.Level if len(cfg.SlowQueryFile) != 0 { sqConfig.File = cfg.File sqConfig.File.Filename = cfg.SlowQueryFile } return &sqConfig } type slowLogEncoder struct{} func (*slowLogEncoder) EncodeEntry(entry zapcore.Entry, _ []zapcore.Field) (*buffer.Buffer, error) { b := _pool.Get() fmt.Fprintf(b, "# Time: %s\n", entry.Time.Format(SlowLogTimeFormat)) fmt.Fprintf(b, "%s\n", entry.Message) return b, nil } func (e *slowLogEncoder) Clone() zapcore.Encoder { return e } func (*slowLogEncoder) AddArray(string, zapcore.ArrayMarshaler) error { return nil } func (*slowLogEncoder) AddObject(string, zapcore.ObjectMarshaler) error { return nil } func (*slowLogEncoder) AddBinary(string, []byte) {} func (*slowLogEncoder) AddByteString(string, []byte) {} func (*slowLogEncoder) AddBool(string, bool) {} func (*slowLogEncoder) AddComplex128(string, complex128) {} func (*slowLogEncoder) AddComplex64(string, complex64) {} func (*slowLogEncoder) AddDuration(string, time.Duration) {} func (*slowLogEncoder) AddFloat64(string, float64) {} func (*slowLogEncoder) AddFloat32(string, float32) {} func (*slowLogEncoder) AddInt(string, int) {} func (*slowLogEncoder) AddInt64(string, int64) {} func (*slowLogEncoder) AddInt32(string, int32) {} func (*slowLogEncoder) AddInt16(string, int16) {} func (*slowLogEncoder) AddInt8(string, int8) {} func (*slowLogEncoder) AddString(string, string) {} func (*slowLogEncoder) AddTime(string, time.Time) {} func (*slowLogEncoder) AddUint(string, uint) {} func (*slowLogEncoder) AddUint64(string, uint64) {} func (*slowLogEncoder) AddUint32(string, uint32) {} func (*slowLogEncoder) AddUint16(string, uint16) {} func (*slowLogEncoder) AddUint8(string, uint8) {} func (*slowLogEncoder) AddUintptr(string, uintptr) {} func (*slowLogEncoder) AddReflected(string, any) error { return nil } func (*slowLogEncoder) OpenNamespace(string) {}
pkg/util/logutil/slow_query_logger.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.003381245071068406, 0.0005664900527335703, 0.0001654882653383538, 0.00017429243598598987, 0.0009533845004625618 ]
{ "id": 3, "code_window": [ "\t\"github.com/pingcap/tidb/br/pkg/trace\"\n", "\t\"github.com/pingcap/tidb/br/pkg/utils\"\n", "\t\"github.com/pingcap/tidb/br/pkg/version/build\"\n", "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/session\"\n", "\t\"github.com/pingcap/tidb/pkg/util/metricsutil\"\n", "\t\"github.com/spf13/cobra\"\n", "\t\"go.uber.org/zap\"\n", "\t\"sourcegraph.com/sourcegraph/appdash\"\n", ")\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/gctuner\"\n" ], "file_path": "br/cmd/br/restore.go", "type": "add", "edit_start_line_idx": 18 }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package expression import ( "math/rand" "testing" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/mock" ) func genCastIntAsInt(ctx BuildContext) (*builtinCastIntAsIntSig, *chunk.Chunk, *chunk.Column) { col := &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0} baseFunc, err := newBaseBuiltinFunc(ctx, "", []Expression{col}, types.NewFieldType(mysql.TypeLonglong)) if err != nil { panic(err) } baseCast := newBaseBuiltinCastFunc(baseFunc, false) cast := &builtinCastIntAsIntSig{baseCast} input := chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, 1024) for i := 0; i < 1024; i++ { input.AppendInt64(0, rand.Int63n(10000)-5000) } result := chunk.NewColumn(types.NewFieldType(mysql.TypeLonglong), 1024) return cast, input, result } func BenchmarkCastIntAsIntRow(b *testing.B) { ctx := mock.NewContext() cast, input, _ := genCastIntAsInt(ctx) it := chunk.NewIterator4Chunk(input) b.ResetTimer() for i := 0; i < b.N; i++ { for row := it.Begin(); row != it.End(); row = it.Next() { if _, _, err := cast.evalInt(ctx, row); err != nil { b.Fatal(err) } } } } func BenchmarkCastIntAsIntVec(b *testing.B) { ctx := mock.NewContext() cast, input, result := genCastIntAsInt(ctx) b.ResetTimer() for i := 0; i < b.N; i++ { if err := cast.vecEvalInt(ctx, input, result); err != nil { b.Fatal(err) } } }
pkg/expression/builtin_cast_bench_test.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0016553954919800162, 0.00038367180968634784, 0.00016691374185029417, 0.0001712760131340474, 0.0005191859672777355 ]
{ "id": 3, "code_window": [ "\t\"github.com/pingcap/tidb/br/pkg/trace\"\n", "\t\"github.com/pingcap/tidb/br/pkg/utils\"\n", "\t\"github.com/pingcap/tidb/br/pkg/version/build\"\n", "\t\"github.com/pingcap/tidb/pkg/config\"\n", "\t\"github.com/pingcap/tidb/pkg/session\"\n", "\t\"github.com/pingcap/tidb/pkg/util/metricsutil\"\n", "\t\"github.com/spf13/cobra\"\n", "\t\"go.uber.org/zap\"\n", "\t\"sourcegraph.com/sourcegraph/appdash\"\n", ")\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"github.com/pingcap/tidb/pkg/util/gctuner\"\n" ], "file_path": "br/cmd/br/restore.go", "type": "add", "edit_start_line_idx": 18 }
CREATE TABLE `no_pk` ( `id` int(11) NOT NULL, v int )ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
br/tests/lightning_csv/data/no_auto_incr_id.no_pk-schema.sql
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00019646943837869912, 0.00019646943837869912, 0.00019646943837869912, 0.00019646943837869912, 0 ]
{ "id": 4, "code_window": [ "\t// No need to cache the coproceesor result\n", "\tconfig.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0\n", "\n", "\tif err := task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg); err != nil {\n", "\t\tlog.Error(\"failed to restore\", zap.Error(err))\n", "\t\tprintWorkaroundOnFullRestoreError(command, err)\n", "\t\treturn errors.Trace(err)\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Disable the memory limit tuner. That's because the server memory is get from TiDB node instead of BR node.\n", "\tgctuner.GlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n", "\tdefer gctuner.GlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n", "\n" ], "file_path": "br/cmd/br/restore.go", "type": "add", "edit_start_line_idx": 69 }
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. package main import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/gluetikv" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/task" "github.com/pingcap/tidb/br/pkg/trace" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version/build" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/util/metricsutil" "github.com/spf13/cobra" "go.uber.org/zap" "sourcegraph.com/sourcegraph/appdash" ) func runBackupCommand(command *cobra.Command, cmdName string) error { cfg := task.BackupConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } if err := metricsutil.RegisterMetricsForBR(cfg.PD, cfg.KeyspaceName); err != nil { return errors.Trace(err) } ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if cfg.FullBackupType == task.FullBackupTypeEBS { if err := task.RunBackupEBS(ctx, tidbGlue, &cfg); err != nil { log.Error("failed to backup", zap.Error(err)) return errors.Trace(err) } return nil } // No need to cache the coproceesor result config.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0 if err := task.RunBackup(ctx, tidbGlue, cmdName, &cfg); err != nil { log.Error("failed to backup", zap.Error(err)) return errors.Trace(err) } return nil } func runBackupRawCommand(command *cobra.Command, cmdName string) error { cfg := task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseBackupConfigFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if err := task.RunBackupRaw(ctx, gluetikv.Glue{}, cmdName, &cfg); err != nil { log.Error("failed to backup raw kv", zap.Error(err)) return errors.Trace(err) } return nil } func runBackupTxnCommand(command *cobra.Command, cmdName string) error { cfg := task.TxnKvConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseBackupConfigFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if err := task.RunBackupTxn(ctx, gluetikv.Glue{}, cmdName, &cfg); err != nil { log.Error("failed to backup txn kv", zap.Error(err)) return errors.Trace(err) } return nil } // NewBackupCommand return a full backup subcommand. func NewBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "backup", Short: "backup a TiDB/TiKV cluster", SilenceUsage: true, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return errors.Trace(err) } build.LogInfo(build.BR) utils.LogEnvVariables() task.LogArguments(c) // Do not run stat worker in BR. session.DisableStats4Test() // Do not run ddl worker in BR. config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(false) summary.SetUnit(summary.BackupUnit) return nil }, } command.AddCommand( newFullBackupCommand(), newDBBackupCommand(), newTableBackupCommand(), newRawBackupCommand(), newTxnBackupCommand(), ) task.DefineBackupFlags(command.PersistentFlags()) return command } // newFullBackupCommand return a full backup subcommand. func newFullBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "full", Short: "backup all database", // prevents incorrect usage like `--checksum false` instead of `--checksum=false`. // the former, according to pflag parsing rules, means `--checksum=true false`. Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { // empty db/table means full backup. return runBackupCommand(command, task.FullBackupCmd) }, } task.DefineFilterFlags(command, acceptAllTables, false) task.DefineBackupEBSFlags(command.PersistentFlags()) return command } // newDBBackupCommand return a db backup subcommand. func newDBBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "db", Short: "backup a database", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runBackupCommand(command, task.DBBackupCmd) }, } task.DefineDatabaseFlags(command) return command } // newTableBackupCommand return a table backup subcommand. func newTableBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "table", Short: "backup a table", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runBackupCommand(command, task.TableBackupCmd) }, } task.DefineTableFlags(command) return command } // newRawBackupCommand return a raw kv range backup subcommand. func newRawBackupCommand() *cobra.Command { // TODO: remove experimental tag if it's stable command := &cobra.Command{ Use: "raw", Short: "(experimental) backup a raw kv range from TiKV cluster", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runBackupRawCommand(command, task.RawBackupCmd) }, } task.DefineRawBackupFlags(command) return command } // newTxnBackupCommand return a txn kv range backup subcommand. func newTxnBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "txn", Short: "(experimental) backup a txn kv range from TiKV cluster", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runBackupTxnCommand(command, task.TxnBackupCmd) }, } task.DefineTxnBackupFlags(command) return command }
br/cmd/br/backup.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.9790729284286499, 0.24031184613704681, 0.00016631274775136262, 0.002208525314927101, 0.3895549476146698 ]
{ "id": 4, "code_window": [ "\t// No need to cache the coproceesor result\n", "\tconfig.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0\n", "\n", "\tif err := task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg); err != nil {\n", "\t\tlog.Error(\"failed to restore\", zap.Error(err))\n", "\t\tprintWorkaroundOnFullRestoreError(command, err)\n", "\t\treturn errors.Trace(err)\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Disable the memory limit tuner. That's because the server memory is get from TiDB node instead of BR node.\n", "\tgctuner.GlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n", "\tdefer gctuner.GlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n", "\n" ], "file_path": "br/cmd/br/restore.go", "type": "add", "edit_start_line_idx": 69 }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package expression import ( "testing" "time" "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/mock" "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/require" ) func TestPBToExpr(t *testing.T) { ctx := mock.NewContext() fieldTps := make([]*types.FieldType, 1) ds := []types.Datum{types.NewIntDatum(1), types.NewUintDatum(1), types.NewFloat64Datum(1), types.NewDecimalDatum(newMyDecimal(t, "1")), types.NewDurationDatum(newDuration(time.Second))} for _, d := range ds { expr := datumExpr(t, d) expr.Val = expr.Val[:len(expr.Val)/2] _, err := PBToExpr(ctx, expr, fieldTps) require.Error(t, err) } expr := &tipb.Expr{ Tp: tipb.ExprType_ScalarFunc, Children: []*tipb.Expr{ { Tp: tipb.ExprType_ValueList, }, }, } _, err := PBToExpr(ctx, expr, fieldTps) require.NoError(t, err) val := make([]byte, 0, 32) val = codec.EncodeInt(val, 1) expr = &tipb.Expr{ Tp: tipb.ExprType_ScalarFunc, Children: []*tipb.Expr{ { Tp: tipb.ExprType_ValueList, Val: val[:len(val)/2], }, }, } _, err = PBToExpr(ctx, expr, fieldTps) require.Error(t, err) expr = &tipb.Expr{ Tp: tipb.ExprType_ScalarFunc, Children: []*tipb.Expr{ { Tp: tipb.ExprType_ValueList, Val: val, }, }, Sig: tipb.ScalarFuncSig_AbsInt, FieldType: ToPBFieldType(newIntFieldType()), } _, err = PBToExpr(ctx, expr, fieldTps) require.Error(t, err) } // TestEval test expr.Eval(). func TestEval(t *testing.T) { row := chunk.MutRowFromDatums([]types.Datum{types.NewDatum(100)}).ToRow() fieldTps := make([]*types.FieldType, 1) fieldTps[0] = types.NewFieldType(mysql.TypeLonglong) tests := []struct { expr *tipb.Expr result types.Datum }{ // Datums. { datumExpr(t, types.NewFloat32Datum(1.1)), types.NewFloat32Datum(1.1), }, { datumExpr(t, types.NewFloat64Datum(1.1)), types.NewFloat64Datum(1.1), }, { datumExpr(t, types.NewIntDatum(1)), types.NewIntDatum(1), }, { datumExpr(t, types.NewUintDatum(1)), types.NewUintDatum(1), }, { datumExpr(t, types.NewBytesDatum([]byte("abc"))), types.NewBytesDatum([]byte("abc")), }, { datumExpr(t, types.NewStringDatum("abc")), types.NewStringDatum("abc"), }, { datumExpr(t, types.Datum{}), types.Datum{}, }, { datumExpr(t, types.NewDurationDatum(types.Duration{Duration: time.Hour})), types.NewDurationDatum(types.Duration{Duration: time.Hour}), }, { datumExpr(t, types.NewDecimalDatum(types.NewDecFromFloatForTest(1.1))), types.NewDecimalDatum(types.NewDecFromFloatForTest(1.1)), }, // Columns. { columnExpr(0), types.NewIntDatum(100), }, // Scalar Functions. { scalarFunctionExpr(tipb.ScalarFuncSig_JsonDepthSig, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, `true`), ), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_JsonDepthSig, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, `[10, {"a": 20}]`), ), types.NewIntDatum(3), }, { scalarFunctionExpr(tipb.ScalarFuncSig_JsonStorageSizeSig, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, `[{"a":{"a":1},"b":2}]`), ), types.NewIntDatum(82), }, { scalarFunctionExpr(tipb.ScalarFuncSig_JsonSearchSig, toPBFieldType(newJSONFieldType()), jsonDatumExpr(t, `["abc", [{"k": "10"}, "def"], {"x":"abc"}, {"y":"bcd"}]`), datumExpr(t, types.NewBytesDatum([]byte(`all`))), datumExpr(t, types.NewBytesDatum([]byte(`10`))), datumExpr(t, types.NewBytesDatum([]byte(`\`))), datumExpr(t, types.NewBytesDatum([]byte(`$**.k`))), ), newJSONDatum(t, `"$[1][0].k"`), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(2333))), types.NewIntDatum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(2333))), types.NewIntDatum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("2333"))), types.NewIntDatum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2333")))), types.NewIntDatum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewIntDatum(2333))), types.NewFloat64Datum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(2333))), types.NewFloat64Datum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewStringDatum("2333"))), types.NewFloat64Datum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2333")))), types.NewFloat64Datum(2333), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewStringDatum("2333"))), types.NewStringDatum("2333"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewIntDatum(2333))), types.NewStringDatum("2333"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewFloat64Datum(2333))), types.NewStringDatum("2333"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2333")))), types.NewStringDatum("2333"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2333")))), types.NewDecimalDatum(newMyDecimal(t, "2333")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewIntDatum(2333))), types.NewDecimalDatum(newMyDecimal(t, "2333")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewFloat64Datum(2333))), types.NewDecimalDatum(newMyDecimal(t, "2333")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewStringDatum("2333"))), types.NewDecimalDatum(newMyDecimal(t, "2333")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(2)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(2)), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LTReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_EQReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LTDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_EQDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GTDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_EQDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("1")), datumExpr(t, types.NewStringDatum("1"))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("1")), datumExpr(t, types.NewStringDatum("1"))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("2")), datumExpr(t, types.NewStringDatum("1"))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GTJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[2]"), jsonDatumExpr(t, "[1]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_GEJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[2]"), jsonDatumExpr(t, "[1]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LTJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[2]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LEJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[2]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_EQJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[1]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NEJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[2]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_NullEQJson, toPBFieldType(newIntFieldType()), jsonDatumExpr(t, "[1]"), jsonDatumExpr(t, "[1]")), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_DecimalIsNull, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_DurationIsNull, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_RealIsNull, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LeftShift, ToPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(1)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(-1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsUInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewUintDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(-1.23))), types.NewFloat64Datum(1.23), }, { scalarFunctionExpr(tipb.ScalarFuncSig_AbsDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "-1.23")))), types.NewDecimalDatum(newMyDecimal(t, "1.23")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LogicalAnd, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LogicalOr, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_LogicalXor, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_BitAndSig, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_BitOrSig, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_BitXorSig, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_BitNegSig, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(0))), types.NewIntDatum(-1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_InReal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_InDecimal, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_InString, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewStringDatum("1")), datumExpr(t, types.NewStringDatum("1"))), types.NewIntDatum(1), }, // { // scalarFunctionExpr(tipb.ScalarFuncSig_InTime, // toPBFieldType(newIntFieldType()), datumExpr(t, types.NewTimeDatum(types.ZeroDate)), datumExpr(t, types.NewTimeDatum(types.ZeroDate))), // types.NewIntDatum(1), // }, { scalarFunctionExpr(tipb.ScalarFuncSig_InDuration, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second))), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewFloat64Datum(1))), types.NewFloat64Datum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewFloat64Datum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewDecimalDatum(newMyDecimal(t, "1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewDecimalDatum(newMyDecimal(t, "2")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewStringDatum("1"))), types.NewStringDatum("1"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewStringDatum("2"))), types.NewStringDatum("2"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfNullDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewDatum(nil)), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewDurationDatum(newDuration(time.Second)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_IfDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*2)))), types.NewDurationDatum(newDuration(time.Second * 2)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDurationAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second*1)))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewStringDatum("1"))), types.NewDurationDatum(newDuration(time.Second * 1)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastTimeAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewTimeDatum(newDateTime(t, "2000-01-01")))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastIntAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewIntDatum(20000101))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastRealAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewFloat64Datum(20000101))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastDecimalAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "20000101")))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CastStringAsTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewStringDatum("20000101"))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_PlusInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(3), }, { scalarFunctionExpr(tipb.ScalarFuncSig_PlusDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewDecimalDatum(newMyDecimal(t, "3")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_PlusReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewFloat64Datum(3), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MinusInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(-1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MinusDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewDecimalDatum(newMyDecimal(t, "-1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MinusReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewFloat64Datum(-1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MultiplyInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1)), datumExpr(t, types.NewIntDatum(2))), types.NewIntDatum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MultiplyDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1"))), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "2")))), types.NewDecimalDatum(newMyDecimal(t, "2")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_MultiplyReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1)), datumExpr(t, types.NewFloat64Datum(2))), types.NewFloat64Datum(2), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CeilIntToInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CeilIntToDec, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewDecimalDatum(newMyDecimal(t, "1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CeilDecToInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CeilReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewFloat64Datum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_FloorIntToInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_FloorIntToDec, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewDecimalDatum(newMyDecimal(t, "1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_FloorDecToInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_FloorReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewFloat64Datum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceInt, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewIntDatum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceReal, toPBFieldType(newRealFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewFloat64Datum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceDecimal, toPBFieldType(newDecimalFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewDecimalDatum(newMyDecimal(t, "1")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceString, toPBFieldType(newStringFieldType()), datumExpr(t, types.NewStringDatum("1"))), types.NewStringDatum("1"), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceDuration, toPBFieldType(newDurFieldType()), datumExpr(t, types.NewDurationDatum(newDuration(time.Second)))), types.NewDurationDatum(newDuration(time.Second)), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CoalesceTime, toPBFieldType(newDateFieldType()), datumExpr(t, types.NewTimeDatum(newDateTime(t, "2000-01-01")))), types.NewTimeDatum(newDateTime(t, "2000-01-01")), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenInt, toPBFieldType(newIntFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenReal, toPBFieldType(newRealFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenDecimal, toPBFieldType(newDecimalFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenDuration, toPBFieldType(newDurFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenTime, toPBFieldType(newDateFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_CaseWhenJson, toPBFieldType(newJSONFieldType())), types.NewDatum(nil), }, { scalarFunctionExpr(tipb.ScalarFuncSig_RealIsFalse, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(0), }, { scalarFunctionExpr(tipb.ScalarFuncSig_DecimalIsFalse, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(0), }, { scalarFunctionExpr(tipb.ScalarFuncSig_RealIsTrue, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewFloat64Datum(1))), types.NewIntDatum(1), }, { scalarFunctionExpr(tipb.ScalarFuncSig_DecimalIsTrue, toPBFieldType(newIntFieldType()), datumExpr(t, types.NewDecimalDatum(newMyDecimal(t, "1")))), types.NewIntDatum(1), }, } ctx := mock.NewContext() for _, tt := range tests { expr, err := PBToExpr(ctx, tt.expr, fieldTps) require.NoError(t, err) result, err := expr.Eval(ctx, row) require.NoError(t, err) require.Equal(t, tt.result.Kind(), result.Kind()) cmp, err := result.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &tt.result, collate.GetCollator(fieldTps[0].GetCollate())) require.NoError(t, err) require.Equal(t, 0, cmp) } } func TestPBToExprWithNewCollation(t *testing.T) { collate.SetNewCollationEnabledForTest(false) ctx := mock.NewContext() fieldTps := make([]*types.FieldType, 1) cases := []struct { name string expName string id int32 pbID int32 }{ {"utf8_general_ci", "utf8_general_ci", 33, 33}, {"UTF8MB4_BIN", "utf8mb4_bin", 46, 46}, {"utf8mb4_bin", "utf8mb4_bin", 46, 46}, {"utf8mb4_general_ci", "utf8mb4_general_ci", 45, 45}, {"", "utf8mb4_bin", 46, 46}, {"some_error_collation", "utf8mb4_bin", 46, 46}, {"utf8_unicode_ci", "utf8_unicode_ci", 192, 192}, {"utf8mb4_unicode_ci", "utf8mb4_unicode_ci", 224, 224}, {"utf8mb4_zh_pinyin_tidb_as_cs", "utf8mb4_zh_pinyin_tidb_as_cs", 2048, 2048}, } for _, cs := range cases { ft := types.NewFieldType(mysql.TypeString) ft.SetCollate(cs.name) expr := new(tipb.Expr) expr.Tp = tipb.ExprType_String expr.FieldType = toPBFieldType(ft) require.Equal(t, cs.pbID, expr.FieldType.Collate) e, err := PBToExpr(ctx, expr, fieldTps) require.NoError(t, err) cons, ok := e.(*Constant) require.True(t, ok) require.Equal(t, cs.expName, cons.Value.Collation()) } collate.SetNewCollationEnabledForTest(true) for _, cs := range cases { ft := types.NewFieldType(mysql.TypeString) ft.SetCollate(cs.name) expr := new(tipb.Expr) expr.Tp = tipb.ExprType_String expr.FieldType = toPBFieldType(ft) require.Equal(t, -cs.pbID, expr.FieldType.Collate) e, err := PBToExpr(ctx, expr, fieldTps) require.NoError(t, err) cons, ok := e.(*Constant) require.True(t, ok) require.Equal(t, cs.expName, cons.Value.Collation()) } } // Test convert various scalar functions. func TestPBToScalarFuncExpr(t *testing.T) { ctx := mock.NewContext() fieldTps := make([]*types.FieldType, 1) exprs := []*tipb.Expr{ { Tp: tipb.ExprType_ScalarFunc, Sig: tipb.ScalarFuncSig_RegexpSig, FieldType: ToPBFieldType(newStringFieldType()), }, { Tp: tipb.ExprType_ScalarFunc, Sig: tipb.ScalarFuncSig_RegexpUTF8Sig, FieldType: ToPBFieldType(newStringFieldType()), }, } for _, expr := range exprs { _, err := PBToExpr(ctx, expr, fieldTps) require.NoError(t, err) } } func datumExpr(t *testing.T, d types.Datum) *tipb.Expr { expr := new(tipb.Expr) switch d.Kind() { case types.KindInt64: expr.Tp = tipb.ExprType_Int64 expr.FieldType = toPBFieldType(types.NewFieldType(mysql.TypeLonglong)) expr.Val = codec.EncodeInt(nil, d.GetInt64()) case types.KindUint64: expr.Tp = tipb.ExprType_Uint64 expr.FieldType = toPBFieldType(types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).SetFlag(mysql.UnsignedFlag).BuildP()) expr.Val = codec.EncodeUint(nil, d.GetUint64()) case types.KindString: expr.Tp = tipb.ExprType_String expr.FieldType = toPBFieldType(types.NewFieldType(mysql.TypeString)) expr.Val = d.GetBytes() case types.KindBytes: expr.Tp = tipb.ExprType_Bytes expr.Val = d.GetBytes() case types.KindFloat32: expr.Tp = tipb.ExprType_Float32 expr.Val = codec.EncodeFloat(nil, d.GetFloat64()) case types.KindFloat64: expr.Tp = tipb.ExprType_Float64 expr.Val = codec.EncodeFloat(nil, d.GetFloat64()) case types.KindMysqlDuration: expr.Tp = tipb.ExprType_MysqlDuration expr.Val = codec.EncodeInt(nil, int64(d.GetMysqlDuration().Duration)) case types.KindMysqlDecimal: expr.Tp = tipb.ExprType_MysqlDecimal expr.FieldType = toPBFieldType(types.NewFieldType(mysql.TypeNewDecimal)) var err error expr.Val, err = codec.EncodeDecimal(nil, d.GetMysqlDecimal(), d.Length(), d.Frac()) require.NoError(t, err) case types.KindMysqlJSON: expr.Tp = tipb.ExprType_MysqlJson var err error expr.Val = make([]byte, 0, 1024) expr.Val, err = codec.EncodeValue(time.UTC, expr.Val, d) require.NoError(t, err) case types.KindMysqlTime: expr.Tp = tipb.ExprType_MysqlTime var err error expr.Val, err = codec.EncodeMySQLTime(nil, d.GetMysqlTime(), mysql.TypeUnspecified, nil) require.NoError(t, err) expr.FieldType = ToPBFieldType(newDateFieldType()) default: expr.Tp = tipb.ExprType_Null } return expr } func newJSONDatum(t *testing.T, s string) (d types.Datum) { j, err := types.ParseBinaryJSONFromString(s) require.NoError(t, err) d.SetMysqlJSON(j) return d } func jsonDatumExpr(t *testing.T, s string) *tipb.Expr { return datumExpr(t, newJSONDatum(t, s)) } func columnExpr(columnID int64) *tipb.Expr { expr := new(tipb.Expr) expr.Tp = tipb.ExprType_ColumnRef expr.Val = codec.EncodeInt(nil, columnID) return expr } // toPBFieldType converts *types.FieldType to *tipb.FieldType. func toPBFieldType(ft *types.FieldType) *tipb.FieldType { return &tipb.FieldType{ Tp: int32(ft.GetType()), Flag: uint32(ft.GetFlag()), Flen: int32(ft.GetFlen()), Decimal: int32(ft.GetDecimal()), Charset: ft.GetCharset(), Collate: collate.CollationToProto(ft.GetCollate()), Elems: ft.GetElems(), } } func newMyDecimal(t *testing.T, s string) *types.MyDecimal { d := new(types.MyDecimal) require.Nil(t, d.FromString([]byte(s))) return d } func newDuration(dur time.Duration) types.Duration { return types.Duration{ Duration: dur, Fsp: types.DefaultFsp, } } func newDateTime(t *testing.T, s string) types.Time { tt, err := types.ParseDate(types.DefaultStmtNoWarningContext, s) require.NoError(t, err) return tt } func newDateFieldType() *types.FieldType { return types.NewFieldType(mysql.TypeDate) } func newIntFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).SetFlag(mysql.BinaryFlag).SetFlen(mysql.MaxIntWidth).BuildP() } func newDurFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeDuration).SetDecimal(types.DefaultFsp).BuildP() } func newStringFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeVarString).SetFlen(types.UnspecifiedLength).BuildP() } func newRealFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeFloat).SetFlen(types.UnspecifiedLength).BuildP() } func newDecimalFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeNewDecimal).SetFlen(types.UnspecifiedLength).BuildP() } func newJSONFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeJSON).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func newFloatFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeFloat).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func newBinaryLiteralFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeBit).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func newBlobFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeBlob).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func newEnumFieldType() *types.FieldType { return types.NewFieldTypeBuilder().SetType(mysql.TypeEnum).SetFlen(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP() } func scalarFunctionExpr(sigCode tipb.ScalarFuncSig, retType *tipb.FieldType, args ...*tipb.Expr) *tipb.Expr { return &tipb.Expr{ Tp: tipb.ExprType_ScalarFunc, Sig: sigCode, Children: args, FieldType: retType, } }
pkg/expression/distsql_builtin_test.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0003953823761548847, 0.00017531565390527248, 0.00015968337538652122, 0.00017427807324565947, 0.000022122298105387017 ]
{ "id": 4, "code_window": [ "\t// No need to cache the coproceesor result\n", "\tconfig.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0\n", "\n", "\tif err := task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg); err != nil {\n", "\t\tlog.Error(\"failed to restore\", zap.Error(err))\n", "\t\tprintWorkaroundOnFullRestoreError(command, err)\n", "\t\treturn errors.Trace(err)\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Disable the memory limit tuner. That's because the server memory is get from TiDB node instead of BR node.\n", "\tgctuner.GlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n", "\tdefer gctuner.GlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n", "\n" ], "file_path": "br/cmd/br/restore.go", "type": "add", "edit_start_line_idx": 69 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "linux", srcs = [ "sys_linux.go", "sys_other.go", "sys_windows.go", ], importpath = "github.com/pingcap/tidb/pkg/util/sys/linux", visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:aix": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:android": [ "@org_golang_x_exp//constraints", "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:darwin": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:dragonfly": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:freebsd": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:illumos": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:ios": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:js": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:linux": [ "@org_golang_x_exp//constraints", "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:netbsd": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:openbsd": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:plan9": [ "@org_golang_x_sys//unix", ], "@io_bazel_rules_go//go/platform:solaris": [ "@org_golang_x_sys//unix", ], "//conditions:default": [], }), ) go_test( name = "linux_test", timeout = "short", srcs = [ "main_test.go", "sys_test.go", ], flaky = True, deps = [ ":linux", "//pkg/testkit/testsetup", "@com_github_stretchr_testify//require", "@org_uber_go_goleak//:goleak", ], )
pkg/util/sys/linux/BUILD.bazel
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001757124118739739, 0.00017366274551022798, 0.00017083383863791823, 0.00017400438082404435, 0.000001503191128904291 ]
{ "id": 4, "code_window": [ "\t// No need to cache the coproceesor result\n", "\tconfig.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0\n", "\n", "\tif err := task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg); err != nil {\n", "\t\tlog.Error(\"failed to restore\", zap.Error(err))\n", "\t\tprintWorkaroundOnFullRestoreError(command, err)\n", "\t\treturn errors.Trace(err)\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Disable the memory limit tuner. That's because the server memory is get from TiDB node instead of BR node.\n", "\tgctuner.GlobalMemoryLimitTuner.DisableAdjustMemoryLimit()\n", "\tdefer gctuner.GlobalMemoryLimitTuner.EnableAdjustMemoryLimit()\n", "\n" ], "file_path": "br/cmd/br/restore.go", "type": "add", "edit_start_line_idx": 69 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cgroup import ( "os" "path/filepath" "regexp" "strings" "testing" "github.com/stretchr/testify/require" ) func isError(err error, re string) bool { if err == nil && re == "" { return true } if err == nil || re == "" { return false } matched, merr := regexp.MatchString(re, err.Error()) if merr != nil { return false } return matched } func TestCgroupsGetMemoryUsage(t *testing.T) { for _, tc := range []struct { name string paths map[string]string errMsg string value uint64 warn string }{ { errMsg: "failed to read memory cgroup from cgroups file:", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithoutMemoryController, "/proc/self/mountinfo": v1MountsWithoutMemController, }, warn: "no cgroup memory controller detected", value: 0, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, }, errMsg: "failed to read mounts info from file:", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, "/proc/self/mountinfo": v1MountsWithoutMemController, }, errMsg: "failed to detect cgroup root mount and version", value: 0, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, "/proc/self/mountinfo": v1MountsWithMemController, "/sys/fs/cgroup/memory/memory.usage_in_bytes": v1MemoryUsageInBytes, }, value: 276328448, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryControllerNS, "/proc/self/mountinfo": v1MountsWithMemControllerNS, "/sys/fs/cgroup/memory/cgroup_test/memory.usage_in_bytes": v1MemoryUsageInBytes, }, value: 276328448, }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, }, errMsg: "can't read memory.current from cgroup v2", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.current": "unparsable\n", }, errMsg: "failed to parse value in memory.current from cgroup v2", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.current": "276328448", }, value: 276328448, }, } { dir := createFiles(t, tc.paths) limit, err := getCgroupMemUsage(dir) require.True(t, isError(err, tc.errMsg), "%v %v", err, tc.errMsg) require.Equal(t, tc.value, limit) } } func TestCgroupsGetMemoryInactiveFileUsage(t *testing.T) { for _, tc := range []struct { name string paths map[string]string errMsg string value uint64 warn string }{ { errMsg: "failed to read memory cgroup from cgroups file:", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithoutMemoryController, "/proc/self/mountinfo": v1MountsWithoutMemController, }, warn: "no cgroup memory controller detected", value: 0, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, }, errMsg: "failed to read mounts info from file:", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, "/proc/self/mountinfo": v1MountsWithoutMemController, }, errMsg: "failed to detect cgroup root mount and version", value: 0, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, "/proc/self/mountinfo": v1MountsWithMemController, "/sys/fs/cgroup/memory/memory.stat": v1MemoryStat, }, value: 1363746816, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryControllerNS, "/proc/self/mountinfo": v1MountsWithMemControllerNS, "/sys/fs/cgroup/memory/cgroup_test/memory.stat": v1MemoryStat, }, value: 1363746816, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithEccentricMemoryController, "/proc/self/mountinfo": v1MountsWithEccentricMemController, "/sys/fs/cgroup/memory/memory.stat": v1MemoryStat, }, value: 1363746816, }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, }, errMsg: "can't read file memory.stat from cgroup v2", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.stat": "inactive_file unparsable\n", }, errMsg: "can't read \"inactive_file\" memory stat from cgroup v2 in memory.stat", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.stat": v2MemoryStat, }, value: 1363746816, }, } { dir := createFiles(t, tc.paths) limit, err := getCgroupMemInactiveFileUsage(dir) require.True(t, isError(err, tc.errMsg), "%v %v", err, tc.errMsg) require.Equal(t, tc.value, limit) } } func TestCgroupsGetMemoryLimit(t *testing.T) { for _, tc := range []struct { name string paths map[string]string errMsg string limit uint64 warn string }{ { errMsg: "failed to read memory cgroup from cgroups file:", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithoutMemoryController, "/proc/self/mountinfo": v1MountsWithoutMemController, }, limit: 0, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, }, errMsg: "failed to read mounts info from file:", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, "/proc/self/mountinfo": v1MountsWithoutMemController, }, errMsg: "failed to detect cgroup root mount and version", limit: 0, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryController, "/proc/self/mountinfo": v1MountsWithMemController, "/sys/fs/cgroup/memory/memory.stat": v1MemoryStat, }, limit: 2936016896, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithMemoryControllerNS, "/proc/self/mountinfo": v1MountsWithMemControllerNS, "/sys/fs/cgroup/memory/cgroup_test/memory.stat": v1MemoryStat, }, limit: 2936016896, }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, }, errMsg: "can't read memory.max from cgroup v2", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.max": "unparsable\n", }, errMsg: "failed to parse value in memory.max from cgroup v2", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.max": "1073741824\n", }, limit: 1073741824, }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.max": "max\n", }, limit: 9223372036854775807, }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithEccentricMemoryController, "/proc/self/mountinfo": v1MountsWithEccentricMemController, "/sys/fs/cgroup/memory/memory.stat": v1MemoryStat, }, limit: 2936016896, }, } { dir := createFiles(t, tc.paths) limit, err := getCgroupMemLimit(dir) require.True(t, isError(err, tc.errMsg), "%v %v", err, tc.errMsg) require.Equal(t, tc.limit, limit) } } const ( v1CgroupWithEccentricMemoryController = ` 13:devices:/system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 12:freezer:/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 11:cpu,cpuacct:/system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 10:perf_event:/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 9:rdma:/ 8:pids:/system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 7:blkio:/system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 6:hugetlb:/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 5:memory:/system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 4:cpuset:/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 3:files:/ 2:net_cls,net_prio:/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 1:name=systemd:/system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 0::/ ` v1MountsWithEccentricMemController = ` 1421 1021 0:133 / / rw,relatime master:412 - overlay overlay rw,lowerdir=/apps/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1285288/fs:/apps/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1285287/fs:/apps/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1285286/fs:/apps/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1285285/fs:/apps/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1283928/fs,upperdir=/apps/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1287880/fs,workdir=/apps/data/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1287880/work 1442 1421 0:136 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 1443 1421 0:137 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 1444 1443 0:138 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 2303 1443 0:119 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 2304 1421 0:129 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro 2305 2304 0:139 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 2306 2305 0:25 /system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:5 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 2307 2305 0:28 /kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:6 - cgroup cgroup rw,net_cls,net_prio 2308 2305 0:29 / /sys/fs/cgroup/files ro,nosuid,nodev,noexec,relatime master:7 - cgroup cgroup rw,files 2309 2305 0:30 /kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:8 - cgroup cgroup rw,cpuset 2310 2305 0:31 /system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,memory 2311 2305 0:32 /kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,hugetlb 2312 2305 0:33 /system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,blkio 2313 2305 0:34 /system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,pids 2314 2305 0:35 / /sys/fs/cgroup/rdma ro,nosuid,nodev,noexec,relatime master:13 - cgroup cgroup rw,rdma 2315 2305 0:36 /kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,perf_event 2316 2305 0:37 /system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,cpu,cpuacct 2317 2305 0:38 /kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:16 - cgroup cgroup rw,freezer 2318 2305 0:39 /system.slice/containerd.service/kubepods-burstable-pod94598a35_ad1e_4a00_91b1_1db37e8f52f6.slice:cri-containerd:0ac322a00cf64a4d58144a1974b993d91537f3ceec12928b10d881af6be8bbb2 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,devices 2319 1421 0:101 / /etc/podinfo ro,relatime - tmpfs tmpfs rw 2320 1421 253:3 /data/containerd/io.containerd.grpc.v1.cri/sandboxes/22c18c845c47667097eb8973fd0ec05256be685cd1b1a8b0fe7c748a04401cdb/hostname /etc/hostname rw,relatime - xfs /dev/mapper/vg1-lvm1k8s rw,attr2,inode64,sunit=512,swidth=512,noquota 2321 1421 253:3 /data/kubelet/pods/94598a35-ad1e-4a00-91b1-1db37e8f52f6/volumes/kubernetes.io~configmap/config /etc/tikv ro,relatime - xfs /dev/mapper/vg1-lvm1k8s rw,attr2,inode64,sunit=512,swidth=512,noquota 2322 1443 0:104 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k 2323 1421 253:3 /data/kubelet/pods/94598a35-ad1e-4a00-91b1-1db37e8f52f6/etc-hosts /etc/hosts rw,relatime - xfs /dev/mapper/vg1-lvm1k8s rw,attr2,inode64,sunit=512,swidth=512,noquota 2324 1443 253:3 /data/kubelet/pods/94598a35-ad1e-4a00-91b1-1db37e8f52f6/containers/tikv/0981845c /dev/termination-log rw,relatime - xfs /dev/mapper/vg1-lvm1k8s rw,attr2,inode64,sunit=512,swidth=512,noquota 2325 1421 253:3 /data/containerd/io.containerd.grpc.v1.cri/sandboxes/22c18c845c47667097eb8973fd0ec05256be685cd1b1a8b0fe7c748a04401cdb/resolv.conf /etc/resolv.conf rw,relatime - xfs /dev/mapper/vg1-lvm1k8s rw,attr2,inode64,sunit=512,swidth=512,noquota 2326 1421 253:2 /pv03 /var/lib/tikv rw,relatime - xfs /dev/mapper/vg2-lvm2k8s rw,attr2,inode64,sunit=512,swidth=512,noquota 2327 1421 253:3 /data/kubelet/pods/94598a35-ad1e-4a00-91b1-1db37e8f52f6/volumes/kubernetes.io~configmap/startup-script /usr/local/bin ro,relatime - xfs /dev/mapper/vg1-lvm1k8s rw,attr2,inode64,sunit=512,swidth=512,noquota 2328 1421 0:102 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw 1022 1442 0:136 /bus /proc/bus ro,nosuid,nodev,noexec,relatime - proc proc rw 1034 1442 0:136 /fs /proc/fs ro,nosuid,nodev,noexec,relatime - proc proc rw 1035 1442 0:136 /irq /proc/irq ro,nosuid,nodev,noexec,relatime - proc proc rw 1036 1442 0:136 /sys /proc/sys ro,nosuid,nodev,noexec,relatime - proc proc rw 1037 1442 0:136 /sysrq-trigger /proc/sysrq-trigger ro,nosuid,nodev,noexec,relatime - proc proc rw 1038 1442 0:161 / /proc/acpi ro,relatime - tmpfs tmpfs ro 1039 1442 0:137 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 1040 1442 0:137 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 1041 1442 0:137 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 1042 1442 0:137 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 1043 1442 0:162 / /proc/scsi ro,relatime - tmpfs tmpfs ro 1044 2304 0:163 / /sys/firmware ro,relatime - tmpfs tmpfs ro ` ) func TestCgroupsGetCPU(t *testing.T) { for i := 0; i < 2; i++ { if i == 1 { // The field in /proc/self/cgroup and /proc/self/meminfo may appear as "cpuacct,cpu" or "rw,cpuacct,cpu" // while the input controller is "cpu,cpuacct" v1CgroupWithCPUController = strings.ReplaceAll(v1CgroupWithCPUController, "cpu,cpuacct", "cpuacct,cpu") v1CgroupWithCPUControllerNS = strings.ReplaceAll(v1CgroupWithCPUControllerNS, "cpu,cpuacct", "cpuacct,cpu") v1CgroupWithCPUControllerNSMountRel = strings.ReplaceAll(v1CgroupWithCPUControllerNSMountRel, "cpu,cpuacct", "cpuacct,cpu") v1CgroupWithCPUControllerNSMountRelRemount = strings.ReplaceAll(v1CgroupWithCPUControllerNSMountRelRemount, "cpu,cpuacct", "cpuacct,cpu") v1CgroupWithCPUControllerNS2 = strings.ReplaceAll(v1CgroupWithCPUControllerNS2, "cpu,cpuacct", "cpuacct,cpu") v1MountsWithCPUController = strings.ReplaceAll(v1MountsWithCPUController, "rw,cpu,cpuacct", "rw,cpuacct,cpu") v1MountsWithCPUControllerNS = strings.ReplaceAll(v1MountsWithCPUControllerNS, "rw,cpu,cpuacct", "rw,cpuacct,cpu") v1MountsWithCPUControllerNSMountRel = strings.ReplaceAll(v1MountsWithCPUControllerNSMountRel, "rw,cpu,cpuacct", "rw,cpuacct,cpu") v1MountsWithCPUControllerNSMountRelRemount = strings.ReplaceAll(v1MountsWithCPUControllerNSMountRelRemount, "rw,cpu,cpuacct", "rw,cpuacct,cpu") v1MountsWithCPUControllerNS2 = strings.ReplaceAll(v1MountsWithCPUControllerNS2, "rw,cpu,cpuacct", "rw,cpuacct,cpu") } testCgroupsGetCPU(t) } } func testCgroupsGetCPU(t *testing.T) { for _, tc := range []struct { name string paths map[string]string errMsg string period int64 quota int64 user uint64 system uint64 }{ { errMsg: "failed to read cpu,cpuacct cgroup from cgroups file:", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithoutCPUController, "/proc/self/mountinfo": v1MountsWithoutCPUController, }, errMsg: "no cpu controller detected", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithCPUController, }, errMsg: "failed to read mounts info from file:", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithCPUController, "/proc/self/mountinfo": v1MountsWithoutCPUController, }, errMsg: "failed to detect cgroup root mount and version", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithCPUController, "/proc/self/mountinfo": v1MountsWithCPUController, "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us": "12345", "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us": "67890", "/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage_sys": "123", "/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage_user": "456", }, quota: int64(12345), period: int64(67890), system: uint64(123), user: uint64(456), }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithCPUControllerNS, "/proc/self/mountinfo": v1MountsWithCPUControllerNS, "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_quota_us": "12345", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_period_us": "67890", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_sys": "123", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_user": "456", }, quota: int64(12345), period: int64(67890), system: uint64(123), user: uint64(456), }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithCPUControllerNSMountRel, "/proc/self/mountinfo": v1MountsWithCPUControllerNSMountRel, }, errMsg: "failed to detect cgroup root mount and version", }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithCPUControllerNSMountRelRemount, "/proc/self/mountinfo": v1MountsWithCPUControllerNSMountRelRemount, "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_quota_us": "12345", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_period_us": "67890", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_sys": "123", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_user": "456", }, quota: int64(12345), period: int64(67890), system: uint64(123), user: uint64(456), }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithCPUControllerNS2, "/proc/self/mountinfo": v1MountsWithCPUControllerNS2, "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_quota_us": "12345", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_period_us": "67890", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_sys": "123", "/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_user": "456", }, quota: int64(12345), period: int64(67890), system: uint64(123), user: uint64(456), }, { paths: map[string]string{ "/proc/self/cgroup": v1CgroupWithCPUController, "/proc/self/mountinfo": v1MountsWithCPUController, "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us": "-1", "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us": "67890", }, quota: int64(-1), period: int64(67890), errMsg: "error when reading cpu system time from cgroup v1", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, }, errMsg: "error when read cpu quota from cgroup v2", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "foo bar\n", }, errMsg: "error when reading cpu quota from cgroup v2 at", }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "100 1000\n", "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.stat": "user_usec 100\nsystem_usec 200", }, quota: int64(100), period: int64(1000), user: uint64(100), system: uint64(200), }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "max 1000\n", "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.stat": "user_usec 100\nsystem_usec 200", }, quota: int64(-1), period: int64(1000), user: uint64(100), system: uint64(200), }, { paths: map[string]string{ "/proc/self/cgroup": v2CgroupWithMemoryController, "/proc/self/mountinfo": v2Mounts, "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "100 1000\n", }, quota: int64(100), period: int64(1000), errMsg: "can't read cpu usage from cgroup v2", }, { paths: map[string]string{ "/proc/self/cgroup": MixCgroup, "/proc/self/mountinfo": MixMounts, "/sys/fs/cgroup/cpu,cpuacct/user.slice/cpu.cfs_quota_us": "12345", "/sys/fs/cgroup/cpu,cpuacct/user.slice/cpu.cfs_period_us": "67890", "/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.usage_sys": "123", "/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.usage_user": "456", }, quota: int64(12345), period: int64(67890), system: uint64(123), user: uint64(456), }, } { dir := createFiles(t, tc.paths) cpuusage, err := getCgroupCPU(dir) require.True(t, isError(err, tc.errMsg), "%v %v", err, tc.errMsg) require.Equal(t, tc.quota, cpuusage.Quota) require.Equal(t, tc.period, cpuusage.Period) require.Equal(t, tc.system, cpuusage.Stime) require.Equal(t, tc.user, cpuusage.Utime) } } func createFiles(t *testing.T, paths map[string]string) (dir string) { dir = t.TempDir() for path, data := range paths { path = filepath.Join(dir, path) require.NoError(t, os.MkdirAll(filepath.Dir(path), 0755)) require.NoError(t, os.WriteFile(path, []byte(data), 0755)) } return dir } var ( v1CgroupWithMemoryController = `11:blkio:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 10:devices:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 9:perf_event:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 8:cpu,cpuacct:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 7:pids:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 6:cpuset:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 5:memory:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 4:net_cls,net_prio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 3:hugetlb:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 2:freezer:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 1:name=systemd:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 ` v1CgroupWithoutMemoryController = `10:blkio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 9:devices:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 8:perf_event:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 7:cpu,cpuacct:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 6:pids:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 5:cpuset:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 4:net_cls,net_prio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 3:hugetlb:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 2:freezer:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 1:name=systemd:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 ` v1CgroupWithCPUController = `11:blkio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 10:devices:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 9:perf_event:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 8:cpu,cpuacct:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 7:pids:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 6:cpuset:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 5:memory:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 4:net_cls,net_prio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 3:hugetlb:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 2:freezer:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 1:name=systemd:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 ` v1CgroupWithoutCPUController = `10:blkio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 9:devices:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 8:perf_event:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 7:pids:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 6:cpuset:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 5:memory:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 4:net_cls,net_prio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 3:hugetlb:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 2:freezer:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 1:name=systemd:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 ` v2CgroupWithMemoryController = `0::/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope ` v1MountsWithMemController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work 626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro 703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 705 703 0:23 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 711 703 0:25 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer 726 703 0:26 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb 727 703 0:27 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio 733 703 0:28 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime master:13 - cgroup cgroup rw,memory 734 703 0:29 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset 735 703 0:30 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids 736 703 0:31 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:16 - cgroup cgroup rw,cpu,cpuacct 737 703 0:32 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event 740 703 0:33 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices 742 703 0:34 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio 744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered 814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw 815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k 819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw 368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw 375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw 376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw 381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw 397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw 213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro 216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro ` v1MountsWithoutMemController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work 626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro 703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 705 703 0:23 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 711 703 0:25 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer 726 703 0:26 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb 727 703 0:27 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio 734 703 0:29 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset 735 703 0:30 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids 736 703 0:31 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:16 - cgroup cgroup rw,cpu,cpuacct 737 703 0:32 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event 740 703 0:33 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices 742 703 0:34 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio 744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered 814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw 815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k 819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw 368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw 375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw 376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw 381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw 397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw 213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro 216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro ` v1MountsWithCPUController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work 626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro 703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 705 703 0:23 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 711 703 0:25 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer 726 703 0:26 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb 727 703 0:27 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio 733 703 0:28 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime master:13 - cgroup cgroup rw,memory 734 703 0:29 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset 735 703 0:30 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids 736 703 0:31 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:16 - cgroup cgroup rw,cpu,cpuacct 737 703 0:32 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event 740 703 0:33 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices 742 703 0:34 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio 744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered 814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw 815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k 819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw 368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw 375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw 376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw 381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw 397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw 213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro 216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro ` v1MountsWithoutCPUController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work 626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro 703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 705 703 0:23 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 711 703 0:25 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer 726 703 0:26 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb 727 703 0:27 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio 734 703 0:29 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset 735 703 0:30 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids 737 703 0:32 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event 740 703 0:33 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices 742 703 0:34 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio 744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered 814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw 815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota 818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k 819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw 368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw 375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw 376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw 381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw 397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw 213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro 216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro ` v2Mounts = `371 344 0:35 / / rw,relatime - overlay overlay rw,context="system_u:object_r:container_file_t:s0:c200,c321",lowerdir=/var/lib/containers/storage/overlay/l/SPNDOAU3AZNJMNKU3F5THCA36R,upperdir=/var/lib/containers/storage/overlay/7dcd88f815bded7b833fb5dc0f25de897250bcfa828624c0d78393689d0bc312/diff,workdir=/var/lib/containers/storage/overlay/7dcd88f815bded7b833fb5dc0f25de897250bcfa828624c0d78393689d0bc312/work 372 371 0:37 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 373 371 0:38 / /dev rw,nosuid - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=65536k,mode=755 374 371 0:39 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs rw,seclabel 375 373 0:40 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,context="system_u:object_r:container_file_t:s0:c200,c321",gid=5,mode=620,ptmxmode=666 376 373 0:36 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw,seclabel 377 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/hostname /etc/hostname rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755 378 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/.containerenv /run/.containerenv rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755 379 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/run/secrets /run/secrets rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755 380 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/resolv.conf /etc/resolv.conf rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755 381 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/hosts /etc/hosts rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755 382 373 0:33 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=64000k 383 374 0:25 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - cgroup2 cgroup2 rw,seclabel 384 372 0:41 / /proc/acpi ro,relatime - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=0k 385 372 0:6 /null /proc/kcore rw,nosuid - devtmpfs devtmpfs rw,seclabel,size=1869464k,nr_inodes=467366,mode=755 386 372 0:6 /null /proc/keys rw,nosuid - devtmpfs devtmpfs rw,seclabel,size=1869464k,nr_inodes=467366,mode=755 387 372 0:6 /null /proc/timer_list rw,nosuid - devtmpfs devtmpfs rw,seclabel,size=1869464k,nr_inodes=467366,mode=755 388 372 0:6 /null /proc/sched_debug rw,nosuid - devtmpfs devtmpfs rw,seclabel,size=1869464k,nr_inodes=467366,mode=755 389 372 0:42 / /proc/scsi ro,relatime - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=0k 390 374 0:43 / /sys/firmware ro,relatime - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=0k 391 374 0:44 / /sys/fs/selinux ro,relatime - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=0k 392 372 0:37 /bus /proc/bus ro,relatime - proc proc rw 393 372 0:37 /fs /proc/fs ro,relatime - proc proc rw 394 372 0:37 /irq /proc/irq ro,relatime - proc proc rw 395 372 0:37 /sys /proc/sys ro,relatime - proc proc rw 396 372 0:37 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw 345 373 0:40 /0 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,context="system_u:object_r:container_file_t:s0:c200,c321",gid=5,mode=620,ptmxmode=666 ` v1MemoryStat = `cache 784113664 rss 1703952384 rss_huge 27262976 shmem 0 mapped_file 14520320 dirty 4096 writeback 0 swap 0 pgpgin 35979039 pgpgout 35447229 pgfault 24002539 pgmajfault 3871 inactive_anon 0 active_anon 815435776 inactive_file 1363746816 active_file 308867072 unevictable 0 hierarchical_memory_limit 2936016896 hierarchical_memsw_limit 9223372036854771712 total_cache 784113664 total_rss 1703952384 total_rss_huge 27262976 total_shmem 0 total_mapped_file 14520320 total_dirty 4096 total_writeback 0 total_swap 0 total_pgpgin 35979039 total_pgpgout 35447229 total_pgfault 24002539 total_pgmajfault 3871 total_inactive_anon 0 total_active_anon 815435776 total_inactive_file 1363746816 total_active_file 308867072 total_unevictable 0 ` v2MemoryStat = `anon 784113664 file 1703952384 kernel_stack 27262976 pagetables 0 percpu 14520320 sock 4096 shmem 0 file_mapped 0 file_dirty 35979039 file_writeback 35447229 swapcached 24002539 anon_thp 3871 file_thp 0 shmem_thp 815435776 inactive_anon 1363746816 active_anon 308867072 inactive_file 1363746816 active_file 2936016896 unevictable 9223372036854771712 slab_reclaimable 784113664 slab_unreclaimable 1703952384 slab 27262976 workingset_refault_anon 0 workingset_refault_file 14520320 workingset_activate_anon 4096 workingset_activate_file 0 workingset_restore_anon 0 workingset_restore_file 35979039 workingset_nodereclaim 35447229 pgfault 24002539 pgmajfault 3871 pgrefill 0 pgscan 815435776 pgsteal 1363746816 pgactivate 308867072 pgdeactivate 0 pglazyfree 0 pglazyfreed 0 thp_fault_alloc 0 thp_collapse_alloc 0 ` v1MemoryUsageInBytes = "276328448" // Both /proc/<pid>/mountinfo and /proc/<pid>/cgroup will show the mount and the cgroup relative to the cgroup NS root // This tests the case where the memory controller mount and the cgroup are not exactly the same (as is with k8s pods). v1CgroupWithMemoryControllerNS = "12:memory:/cgroup_test" v1MountsWithMemControllerNS = "50 35 0:44 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:25 - cgroup cgroup rw,memory" // Example where the paths in /proc/self/mountinfo and /proc/self/cgroup are not the same for the cpu controller // // sudo cgcreate -t $USER:$USER -a $USER:$USER -g cpu:crdb_test // echo 100000 > /sys/fs/cgroup/cpu/crdb_test/cpu.cfs_period_us // echo 33300 > /sys/fs/cgroup/cpu/crdb_test/cpu.cfs_quota_us // cgexec -g cpu:crdb_test ./cockroach ... v1CgroupWithCPUControllerNS = "5:cpu,cpuacct:/crdb_test" v1MountsWithCPUControllerNS = "43 35 0:37 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,cpu,cpuacct" // Same as above but with unshare -C // Can't determine the location of the mount v1CgroupWithCPUControllerNSMountRel = "5:cpu,cpuacct:/" v1MountsWithCPUControllerNSMountRel = "43 35 0:37 /.. /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,cpu,cpuacct" // Same as above but with mounting the cgroup fs one more time in the NS // sudo mount -t cgroup -o cpu,cpuacct none /sys/fs/cgroup/cpu,cpuacct/crdb_test v1CgroupWithCPUControllerNSMountRelRemount = "5:cpu,cpuacct:/" v1MountsWithCPUControllerNSMountRelRemount = ` 43 35 0:37 /.. /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,cpu,cpuacct 161 43 0:37 / /sys/fs/cgroup/cpu,cpuacct/crdb_test rw,relatime shared:95 - cgroup none rw,cpu,cpuacct ` // Same as above but exiting the NS w/o unmounting v1CgroupWithCPUControllerNS2 = "5:cpu,cpuacct:/crdb_test" v1MountsWithCPUControllerNS2 = "161 43 0:37 /crdb_test /sys/fs/cgroup/cpu,cpuacct/crdb_test rw,relatime shared:95 - cgroup none rw,cpu,cpuacct" MixCgroup = `12:hugetlb:/ 11:memory:/user.slice/user-1006.slice/session-17838.scope 10:pids:/user.slice/user-1006.slice/session-17838.scope 9:devices:/user.slice 8:perf_event:/ 7:cpu,cpuacct:/user.slice 6:blkio:/user.slice 5:cpuset:/ 4:net_cls,net_prio:/ 3:freezer:/ 2:rdma:/ 1:name=systemd:/user.slice/user-1006.slice/session-17838.scope 0::/user.slice/user-1006.slice/session-17838.scope ` MixMounts = ` 25 30 0:23 / /sys rw,relatime shared:7 - sysfs sysfs rw 26 30 0:5 / /proc rw,relatime shared:14 - proc proc rw 27 30 0:6 / /dev rw,nosuid,noexec,relatime shared:2 - devtmpfs udev rw,size=197385544k,nr_inodes=49346386,mode=755 28 27 0:24 / /dev/pts rw,relatime shared:3 - devpts devpts rw,gid=5,mode=620,ptmxmode=000 29 30 0:25 / /run rw,nosuid,nodev,noexec,relatime shared:5 - tmpfs tmpfs rw,size=39486148k,mode=755 30 1 8:3 / / rw,relatime shared:1 - ext4 /dev/sda3 rw,stripe=16 31 25 0:7 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:8 - securityfs securityfs rw 32 27 0:26 / /dev/shm rw shared:4 - tmpfs tmpfs rw 33 29 0:27 / /run/lock rw,nosuid,nodev,noexec,relatime shared:6 - tmpfs tmpfs rw,size=5120k 34 25 0:28 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:9 - tmpfs tmpfs ro,mode=755 35 34 0:29 / /sys/fs/cgroup/unified rw,nosuid,nodev,noexec,relatime shared:10 - cgroup2 cgroup2 rw 36 34 0:30 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,xattr,name=systemd 37 25 0:31 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:12 - pstore pstore rw 39 34 0:33 / /sys/fs/cgroup/rdma rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,rdma 40 34 0:34 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,freezer 41 34 0:35 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,net_cls,net_prio 42 34 0:36 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,cpuset 43 34 0:37 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,blkio 44 34 0:38 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:20 - cgroup cgroup rw,cpu,cpuacct 45 34 0:39 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:21 - cgroup cgroup rw,perf_event 46 34 0:40 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:22 - cgroup cgroup rw,devices 47 34 0:41 / /sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:23 - cgroup cgroup rw,pids 48 34 0:42 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:24 - cgroup cgroup rw,memory 49 34 0:43 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:25 - cgroup cgroup rw,hugetlb 50 26 0:44 / /proc/sys/fs/binfmt_misc rw,relatime shared:26 - autofs systemd-1 rw,fd=28,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=91621 51 27 0:45 / /dev/hugepages rw,relatime shared:27 - hugetlbfs hugetlbfs rw,pagesize=2M 52 27 0:21 / /dev/mqueue rw,nosuid,nodev,noexec,relatime shared:28 - mqueue mqueue rw 53 25 0:8 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime shared:29 - debugfs debugfs rw 54 25 0:12 / /sys/kernel/tracing rw,nosuid,nodev,noexec,relatime shared:30 - tracefs tracefs rw 55 25 0:46 / /sys/fs/fuse/connections rw,nosuid,nodev,noexec,relatime shared:31 - fusectl fusectl rw 56 25 0:22 / /sys/kernel/config rw,nosuid,nodev,noexec,relatime shared:32 - configfs configfs rw 142 30 8:2 / /boot rw,relatime shared:79 - ext4 /dev/sda2 rw,stripe=16,data=ordered 145 30 259:1 / /data/nvme0n1 rw,relatime shared:81 - ext4 /dev/nvme0n1 rw 605 29 0:25 /snapd/ns /run/snapd/ns rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,size=39486148k,mode=755 624 29 0:49 / /run/user/0 rw,nosuid,nodev,relatime shared:341 - tmpfs tmpfs rw,size=39486144k,mode=700 642 30 259:3 / /mnt/c42ca499-9a7c-4d19-ae60-e8a46a6956ba rw,relatime shared:348 - ext4 /dev/nvme2n1 rw 798 30 259:2 / /mnt/a688878a-492b-4536-a03c-f50ce8a1f014 rw,relatime shared:386 - ext4 /dev/nvme3n1 rw 887 30 259:0 / /mnt/f97f162d-be90-4bfa-bae5-2698f5ce634d rw,relatime shared:424 - ext4 /dev/nvme1n1 rw 976 29 0:53 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/2191a8cf52bd8313c6abef93260d6964f6b7240117f7cc0723c9647caa78bb45/shm rw,nosuid,nodev,noexec,relatime shared:462 - tmpfs shm rw,size=65536k 993 29 0:54 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/2191a8cf52bd8313c6abef93260d6964f6b7240117f7cc0723c9647caa78bb45/rootfs rw,relatime shared:469 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/8260/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/8260/work,xino=off 1113 145 0:80 / /data/nvme0n1/kubelet/pods/428eee2f-da5c-44de-aae1-951b3746e3d8/volumes/kubernetes.io~secret/clustermesh-secrets rw,relatime shared:497 - tmpfs tmpfs rw 1185 145 0:89 / /data/nvme0n1/kubelet/pods/07d67bd3-f23b-4d2c-84ae-03a2df2f42a6/volumes/kubernetes.io~projected/kube-api-access-6c2d6 rw,relatime shared:518 - tmpfs tmpfs rw 1223 29 0:90 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/8231ae5d138d3e9da5a996bbc833175580fe7c201dfe66398df2cc0285e1bdfe/shm rw,nosuid,nodev,noexec,relatime shared:525 - tmpfs shm rw,size=65536k 1257 29 0:92 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/8231ae5d138d3e9da5a996bbc833175580fe7c201dfe66398df2cc0285e1bdfe/rootfs rw,relatime shared:539 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/8264/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/8264/work,xino=off 1521 145 0:112 / /data/nvme0n1/kubelet/pods/428eee2f-da5c-44de-aae1-951b3746e3d8/volumes/kubernetes.io~projected/kube-api-access-6kcqx rw,relatime shared:567 - tmpfs tmpfs rw 1519 29 0:121 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/11cbc306aeead0190cb68c6329bafaf979353cd193e8ecb47e61b693cfd2b0f5/shm rw,nosuid,nodev,noexec,relatime shared:574 - tmpfs shm rw,size=65536k 1556 29 0:122 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/11cbc306aeead0190cb68c6329bafaf979353cd193e8ecb47e61b693cfd2b0f5/rootfs rw,relatime shared:581 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/8268/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/8268/work,xino=off 5312 50 0:803 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime shared:1817 - binfmt_misc binfmt_misc rw 2312 30 7:2 / /snap/lxd/23991 ro,nodev,relatime shared:742 - squashfs /dev/loop2 ro 458 605 0:4 mnt:[4026537320] /run/snapd/ns/lxd.mnt rw - nsfs nsfs rw 755 30 7:4 / /snap/lxd/24061 ro,nodev,relatime shared:511 - squashfs /dev/loop4 ro 1027 29 0:63 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/a802a3a6b73af45d20aa3d7a07b6c280b618b91320b4378812a2552e413935c2/rootfs rw,relatime shared:476 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/13/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/12/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/11/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/10/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/6/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/4/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26137/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26137/work,xino=off 4698 29 0:110 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/8b6bac7cc0fd04bc2d1baf50f8f2fee0f81f36a13bf06f4b4ff195586a87588f/rootfs rw,relatime shared:2321 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/15/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/9/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26141/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26141/work,xino=off 10124 29 0:176 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/ed885e6b247e68336830e4120db20d7f43c7a996a82fa86a196e48743a52eff4/rootfs rw,relatime shared:2417 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/28/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/27/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/25/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/24/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/23/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/22/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26143/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26143/work,xino=off 3432 30 7:3 / /snap/snapd/18357 ro,nodev,relatime shared:1001 - squashfs /dev/loop3 ro 2768 30 7:11 / /snap/core/14784 ro,nodev,relatime shared:938 - squashfs /dev/loop11 ro 2265 30 7:12 / /snap/k9s/151 ro,nodev,relatime shared:725 - squashfs /dev/loop12 ro 2297 30 7:13 / /snap/core20/1828 ro,nodev,relatime shared:765 - squashfs /dev/loop13 ro 1411 30 7:14 / /snap/go/10073 ro,nodev,relatime shared:550 - squashfs /dev/loop14 ro 115 145 0:103 / /data/nvme0n1/kubelet/pods/bb3ece51-01a1-4d9d-a48a-43093c72a3a2/volumes/kubernetes.io~projected/kube-api-access-5nvm9 rw,relatime shared:442 - tmpfs tmpfs rw 1454 29 0:108 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/282a3e51b05ed2b168285995fce71e9d882db9d4cb33e54a367791fe92fd8cd2/shm rw,nosuid,nodev,noexec,relatime shared:740 - tmpfs shm rw,size=65536k 1516 29 0:109 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/282a3e51b05ed2b168285995fce71e9d882db9d4cb33e54a367791fe92fd8cd2/rootfs rw,relatime shared:788 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26610/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26610/work,xino=off 2409 29 0:209 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/29d6387bdeed4df9882df5f73645c071317999c6f913a739d42390507485e8c5/rootfs rw,relatime shared:869 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/9/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26611/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/26611/work,xino=off 2270 30 7:15 / /snap/core18/2714 ro,nodev,relatime shared:1068 - squashfs /dev/loop15 ro 137 30 0:71 / /var/lib/docker/overlay2/41ea64be1d943b66e7cce1d07ca48f1c6359dd7e983ffc4100b122289d4fc457/merged rw,relatime shared:49 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/MZDFUOLZV7OH5FLPS5ZWNO6JHC:/var/lib/docker/overlay2/l/SFPFSYXYX4E3ST5E7Y5BTG3RAG:/var/lib/docker/overlay2/l/NVHRNLY3A7KDIXEPAEZZUAKKAF:/var/lib/docker/overlay2/l/3NWZQJULTIAEMU33EBOV3DO4KK:/var/lib/docker/overlay2/l/24BFJIPXS2PJ4XI7A4OB4FCK3N,upperdir=/var/lib/docker/overlay2/41ea64be1d943b66e7cce1d07ca48f1c6359dd7e983ffc4100b122289d4fc457/diff,workdir=/var/lib/docker/overlay2/41ea64be1d943b66e7cce1d07ca48f1c6359dd7e983ffc4100b122289d4fc457/work,xino=off 138 30 0:72 / /var/lib/docker/overlay2/6ba83a6794d649bd38fb9698e067ce8fb22e6a976af0cd9d003b86847b54dde7/merged rw,relatime shared:73 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DAWHHXAM2AYTI37UJOH6W453EZ:/var/lib/docker/overlay2/l/SFPFSYXYX4E3ST5E7Y5BTG3RAG:/var/lib/docker/overlay2/l/NVHRNLY3A7KDIXEPAEZZUAKKAF:/var/lib/docker/overlay2/l/3NWZQJULTIAEMU33EBOV3DO4KK:/var/lib/docker/overlay2/l/24BFJIPXS2PJ4XI7A4OB4FCK3N,upperdir=/var/lib/docker/overlay2/6ba83a6794d649bd38fb9698e067ce8fb22e6a976af0cd9d003b86847b54dde7/diff,workdir=/var/lib/docker/overlay2/6ba83a6794d649bd38fb9698e067ce8fb22e6a976af0cd9d003b86847b54dde7/work,xino=off 4039 29 0:4 net:[4026537384] /run/docker/netns/c8477f57c25f rw shared:123 - nsfs nsfs rw 4059 29 0:4 net:[4026537817] /run/docker/netns/64d7952bb68f rw shared:833 - nsfs nsfs rw 665 30 259:3 /vol1 /mnt/disks/c42ca499-9a7c-4d19-ae60-e8a46a6956ba_vol1 rw,relatime shared:348 - ext4 /dev/nvme2n1 rw 750 30 259:2 /vol1 /mnt/disks/a688878a-492b-4536-a03c-f50ce8a1f014_vol1 rw,relatime shared:386 - ext4 /dev/nvme3n1 rw 779 30 259:0 /vol1 /mnt/disks/f97f162d-be90-4bfa-bae5-2698f5ce634d_vol1 rw,relatime shared:424 - ext4 /dev/nvme1n1 rw 38 25 0:256 / /sys/fs/bpf rw,relatime shared:13 - bpf none rw 3174 30 7:16 / /snap/core/14946 ro,nodev,relatime shared:846 - squashfs /dev/loop16 ro 965 30 7:17 / /snap/core20/1852 ro,nodev,relatime shared:436 - squashfs /dev/loop17 ro 3663 30 7:5 / /snap/snapd/18596 ro,nodev,relatime shared:1170 - squashfs /dev/loop5 ro 2275 30 7:8 / /snap/core22/583 ro,nodev,relatime shared:449 - squashfs /dev/loop8 ro 4856 30 7:9 / /snap/core18/2721 ro,nodev,relatime shared:1229 - squashfs /dev/loop9 ro 1225 29 0:487 / /run/user/1003 rw,nosuid,nodev,relatime shared:987 - tmpfs tmpfs rw,size=39486144k,mode=700,uid=1003,gid=1003 311 605 0:4 mnt:[4026537731] /run/snapd/ns/k9s.mnt rw - nsfs nsfs rw 80 29 0:32 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/c6189676909828b8d0cbce711e115fa3037e30122a4e359bfca0fc4f628d91da/shm rw,nosuid,nodev,noexec,relatime shared:55 - tmpfs shm rw,size=65536k 100 29 0:50 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/c6189676909828b8d0cbce711e115fa3037e30122a4e359bfca0fc4f628d91da/rootfs rw,relatime shared:62 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30973/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30973/work,xino=off 497 29 0:91 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/19a62c19cdd472887f8b1510143d8a79151f70f24bef25a649b8c063a7fe0dff/rootfs rw,relatime shared:75 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30976/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30975/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30974/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30977/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30977/work,xino=off 643 145 0:113 / /data/nvme0n1/kubelet/pods/8c096a91-03cb-4948-b0b1-10bf212928e8/volumes/kubernetes.io~secret/kubevirt-virt-handler-certs rw,relatime shared:171 - tmpfs tmpfs rw 696 145 0:115 / /data/nvme0n1/kubelet/pods/8c096a91-03cb-4948-b0b1-10bf212928e8/volumes/kubernetes.io~secret/kubevirt-virt-handler-server-certs rw,relatime shared:223 - tmpfs tmpfs rw 699 145 0:116 / /data/nvme0n1/kubelet/pods/8c096a91-03cb-4948-b0b1-10bf212928e8/volumes/kubernetes.io~downward-api/podinfo rw,relatime shared:272 - tmpfs tmpfs rw 810 145 0:119 / /data/nvme0n1/kubelet/pods/8c096a91-03cb-4948-b0b1-10bf212928e8/volumes/kubernetes.io~projected/kube-api-access-b4ctw rw,relatime shared:323 - tmpfs tmpfs rw 848 29 0:4 net:[4026537303] /run/netns/cni-86b41b5e-f846-34a4-3d24-4bc6f7a2b70d rw shared:360 - nsfs nsfs rw 1191 29 0:133 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/e5326f2fb12efbadde2acc5095415191214862b730fb4abbe4acc4f0a982dfc0/shm rw,nosuid,nodev,noexec,relatime shared:382 - tmpfs shm rw,size=65536k 1226 29 0:134 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/e5326f2fb12efbadde2acc5095415191214862b730fb4abbe4acc4f0a982dfc0/rootfs rw,relatime shared:396 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30978/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30978/work,xino=off 3252 29 0:350 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/0525c449b405367e89061da50eb3cd480692aa0c3f47e98944fd61a6d9d686e1/rootfs rw,relatime shared:1057 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30998/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30989/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30999/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30999/work,xino=off 4090 145 0:422 / /data/nvme0n1/kubelet/pods/c7676e13-5326-4a20-9823-1709d2ec124f/volumes/kubernetes.io~secret/chaos-daemon-cert rw,relatime shared:1120 - tmpfs tmpfs rw 4149 145 0:423 / /data/nvme0n1/kubelet/pods/c7676e13-5326-4a20-9823-1709d2ec124f/volumes/kubernetes.io~projected/kube-api-access-jvl6g rw,relatime shared:1140 - tmpfs tmpfs rw 4166 29 0:4 net:[4026538463] /run/netns/cni-f4356533-4c6e-f374-942a-f8fb8aaef128 rw shared:1147 - nsfs nsfs rw 4218 29 0:424 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/38bf769b3260aac70efc295dbeeba2d9ae1e0de3df2347f1926e2344336ed61a/shm rw,nosuid,nodev,noexec,relatime shared:1154 - tmpfs shm rw,size=65536k 4238 29 0:425 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/38bf769b3260aac70efc295dbeeba2d9ae1e0de3df2347f1926e2344336ed61a/rootfs rw,relatime shared:1161 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31135/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31135/work,xino=off 3764 29 0:414 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/debe913e95afafbe487c2ff40032ef88fedab297dc35f01873a17eb4d3911137/rootfs rw,relatime shared:1099 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31149/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31148/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31147/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31146/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31145/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31144/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31143/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31142/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31141/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31140/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31139/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31138/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31137/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31136/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31150/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31150/work,xino=off 4325 145 0:447 / /data/nvme0n1/kubelet/pods/eaa8a805-56af-4054-a3d1-87d89b4427f7/volumes/kubernetes.io~projected/kube-api-access-kdkpp rw,relatime shared:1189 - tmpfs tmpfs rw 4354 29 0:4 net:[4026538607] /run/netns/cni-3767b6e7-dbc4-0ccd-d26c-b7a20f4db13a rw shared:1196 - nsfs nsfs rw 4374 29 0:448 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/ea5708a1a0574c874fa64e782929b1ce52a68561d6d4c57200dcaeae42a412a0/shm rw,nosuid,nodev,noexec,relatime shared:1203 - tmpfs shm rw,size=65536k 4402 29 0:449 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/ea5708a1a0574c874fa64e782929b1ce52a68561d6d4c57200dcaeae42a412a0/rootfs rw,relatime shared:1210 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31154/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31154/work,xino=off 4472 29 0:460 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/3f73b6ed13e1e285c13f20c568e353945cc9705dd7b027985c281a0f03c514b4/rootfs rw,relatime shared:1217 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31158/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31157/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31156/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31155/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31159/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31159/work,xino=off 4655 29 0:465 / /run/user/1006 rw,nosuid,nodev,relatime shared:1117 - tmpfs tmpfs rw,size=39486144k,mode=700,uid=1006,gid=1006 5412 30 7:6 / /snap/core22/607 ro,nodev,relatime shared:1364 - squashfs /dev/loop6 ro 4865 30 7:1 / /snap/hello-world/29 ro,nodev,relatime shared:1292 - squashfs /dev/loop1 ro 4982 30 7:18 / /snap/go/10135 ro,nodev,relatime shared:1299 - squashfs /dev/loop18 ro 1136 145 0:149 / /data/nvme0n1/kubelet/pods/61b41e1e-6caf-4f4c-aa97-b8ba1b38168e/volumes/kubernetes.io~projected/kube-api-access-r9chm rw,relatime shared:404 - tmpfs tmpfs rw 1357 29 0:4 net:[4026537584] /run/netns/cni-b6788c05-b9f7-d2a8-1cca-729821632d90 rw shared:451 - nsfs nsfs rw 1497 29 0:167 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/af946ebe99e17756f7b3d7a97fcb290dcae7cc6b9f816f2d6942f15afba7c28b/shm rw,nosuid,nodev,noexec,relatime shared:477 - tmpfs shm rw,size=65536k 1657 29 0:168 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/af946ebe99e17756f7b3d7a97fcb290dcae7cc6b9f816f2d6942f15afba7c28b/rootfs rw,relatime shared:510 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33975/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33975/work,xino=off 1679 145 0:180 / /data/nvme0n1/kubelet/pods/7830474c-474d-4ea7-b568-bc70f0c804b2/volumes/kubernetes.io~projected/kube-api-access-lsctl rw,relatime shared:547 - tmpfs tmpfs rw 1740 29 0:4 net:[4026537658] /run/netns/cni-85144543-ac5f-d5cb-e113-613e2360d33e rw shared:559 - nsfs nsfs rw 1765 29 0:181 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/ecd66ebdf9e85e447076ab5f010a9a721102b34878b61a090537ffe4735d5cc8/shm rw,nosuid,nodev,noexec,relatime shared:596 - tmpfs shm rw,size=65536k 1785 29 0:182 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/ecd66ebdf9e85e447076ab5f010a9a721102b34878b61a090537ffe4735d5cc8/rootfs rw,relatime shared:604 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33979/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33979/work,xino=off 1827 29 0:192 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/c78d2dc899501734043224ccae3b1844ca17bde418fa1a2e6b9230b3b8bb100c/rootfs rw,relatime shared:613 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33982/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33981/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33980/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33978/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33977/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33976/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33983/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33983/work,xino=off 1860 145 259:2 /vol1 /data/nvme0n1/kubelet/pods/a954a19c-b1eb-49f4-903a-edc75070cb01/volumes/kubernetes.io~local-volume/local-pv-2ed50413 rw,relatime shared:386 - ext4 /dev/nvme3n1 rw 2017 145 0:194 / /data/nvme0n1/kubelet/pods/a954a19c-b1eb-49f4-903a-edc75070cb01/volumes/kubernetes.io~projected/kube-api-access-pfpxr rw,relatime shared:628 - tmpfs tmpfs rw 1897 29 0:201 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/2d3a2171ef379ec2abb999beaf83e6a864352c1bfd12a3b7e02839842df996ee/rootfs rw,relatime shared:636 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33296/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33295/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33294/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31119/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33984/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33984/work,xino=off 1916 29 0:4 net:[4026537735] /run/netns/cni-10de9502-de86-307b-30b6-4038e734172f rw shared:644 - nsfs nsfs rw 1963 29 0:212 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/8883ff252cae98cf882bd79b528a009c36f3618a6e1e2f0de5e8df85c90dca7f/shm rw,nosuid,nodev,noexec,relatime shared:652 - tmpfs shm rw,size=65536k 1983 29 0:213 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/8883ff252cae98cf882bd79b528a009c36f3618a6e1e2f0de5e8df85c90dca7f/rootfs rw,relatime shared:660 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33985/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33985/work,xino=off 2001 145 0:226 / /data/nvme0n1/kubelet/pods/8faa8aef-6440-4932-8281-5350b429bbf5/volumes/kubernetes.io~downward-api/annotations rw,relatime shared:668 - tmpfs tmpfs rw 2087 145 0:227 / /data/nvme0n1/kubelet/pods/8faa8aef-6440-4932-8281-5350b429bbf5/volumes/kubernetes.io~projected/kube-api-access-6c682 rw,relatime shared:677 - tmpfs tmpfs rw 2116 29 0:4 net:[4026537808] /run/netns/cni-6c186bcd-915d-ae9f-6240-f005d7558f3c rw shared:685 - nsfs nsfs rw 2164 29 0:228 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/98d1dc3da1f767caa508aeb7a13ba91eb29f93e6afcc6d9374c477956a587c9c/shm rw,nosuid,nodev,noexec,relatime shared:694 - tmpfs shm rw,size=65536k 2184 29 0:229 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/98d1dc3da1f767caa508aeb7a13ba91eb29f93e6afcc6d9374c477956a587c9c/rootfs rw,relatime shared:703 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33991/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33991/work,xino=off 2205 145 0:240 / /data/nvme0n1/kubelet/pods/661d95b2-0ff0-4ae4-b154-2a8cbc436538/volumes/kubernetes.io~secret/tls-assets rw,relatime shared:710 - tmpfs tmpfs rw 2259 145 0:241 / /data/nvme0n1/kubelet/pods/661d95b2-0ff0-4ae4-b154-2a8cbc436538/volumes/kubernetes.io~projected/kube-api-access-c494f rw,relatime shared:717 - tmpfs tmpfs rw 2280 29 0:4 net:[4026537950] /run/netns/cni-0a5d3c52-1cc9-1d7b-2189-30d737f65e6c rw shared:724 - nsfs nsfs rw 2358 29 0:242 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/d39f5fd65ac2df941fba6dd4e7cffb25a612111082711e6afb44aa16171c86be/shm rw,nosuid,nodev,noexec,relatime shared:755 - tmpfs shm rw,size=65536k 2395 29 0:243 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/d39f5fd65ac2df941fba6dd4e7cffb25a612111082711e6afb44aa16171c86be/rootfs rw,relatime shared:766 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33992/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33992/work,xino=off 2442 29 0:265 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/9a265bdcf7c04c2d56cd82b5126f17a18dd34d557ffea27ba74f1006dcffd510/rootfs rw,relatime shared:775 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33997/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33996/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33995/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33994/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33993/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33990/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33998/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33998/work,xino=off 2476 29 0:266 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/f5bb1b65d7857de4c14b191dec87bb583ea42d0f7336ed9c622989446d52a7e3/rootfs rw,relatime shared:789 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/32889/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/32888/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31105/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33999/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33999/work,xino=off 2538 29 0:283 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/9491e655ded1c73a4d985437535e88469725b90ad8e26d64cd854eead7ff5aa3/rootfs rw,relatime shared:796 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31241/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31240/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31239/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31238/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31237/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31236/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31235/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31234/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31233/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31232/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30975/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/30974/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34006/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34006/work,xino=off 2576 29 0:291 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/cf0caadef274fa509bb964c214abc9329509c2372f39107d334a36312fd0a7e9/rootfs rw,relatime shared:804 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31245/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31244/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34007/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34007/work,xino=off 2634 29 0:299 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/e54b94b04ed316c75dea0c75e980aee377dc9d44f025017021c14dd147346c80/rootfs rw,relatime shared:811 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31258/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31257/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31256/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31255/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31254/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31253/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31252/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31251/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34008/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34008/work,xino=off 2655 145 0:307 / /data/nvme0n1/kubelet/pods/54dd0c3f-051c-48f7-840c-1842661c69a3/volumes/kubernetes.io~downward-api/annotations rw,relatime shared:820 - tmpfs tmpfs rw 2725 145 259:3 /vol1 /data/nvme0n1/kubelet/pods/54dd0c3f-051c-48f7-840c-1842661c69a3/volumes/kubernetes.io~local-volume/local-pv-2928e757 rw,relatime shared:348 - ext4 /dev/nvme2n1 rw 2747 145 0:308 / /data/nvme0n1/kubelet/pods/54dd0c3f-051c-48f7-840c-1842661c69a3/volumes/kubernetes.io~projected/kube-api-access-clj59 rw,relatime shared:841 - tmpfs tmpfs rw 2764 29 0:4 net:[4026538031] /run/netns/cni-c14eeef7-4134-bb74-f1cc-007ecd2a97b5 rw shared:856 - nsfs nsfs rw 2876 29 0:309 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/4f0074f05034c726a79d9c779d6f1d0d1f209a6ece89524d25b2a0b416d87ee9/shm rw,nosuid,nodev,noexec,relatime shared:871 - tmpfs shm rw,size=65536k 2996 29 0:310 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/4f0074f05034c726a79d9c779d6f1d0d1f209a6ece89524d25b2a0b416d87ee9/rootfs rw,relatime shared:878 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34011/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34011/work,xino=off 3036 29 0:320 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/d9bb40166d27212a2449a8cf85810a108c11aff3f9bde7d51b939ecd9acb4cf3/rootfs rw,relatime shared:885 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33296/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33295/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33294/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31119/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34012/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34012/work,xino=off 3088 145 0:329 / /data/nvme0n1/kubelet/pods/e2df7121-3621-4707-b7e9-6b99c3ce58b8/volumes/kubernetes.io~downward-api/annotations rw,relatime shared:893 - tmpfs tmpfs rw 3120 145 259:0 /vol1 /data/nvme0n1/kubelet/pods/e2df7121-3621-4707-b7e9-6b99c3ce58b8/volumes/kubernetes.io~local-volume/local-pv-b1fc6e56 rw,relatime shared:424 - ext4 /dev/nvme1n1 rw 3141 145 0:330 / /data/nvme0n1/kubelet/pods/e2df7121-3621-4707-b7e9-6b99c3ce58b8/volumes/kubernetes.io~projected/kube-api-access-jpzvq rw,relatime shared:906 - tmpfs tmpfs rw 3164 29 0:4 net:[4026538106] /run/netns/cni-bebf9810-8b52-9b5d-42cf-8760067f6b0b rw shared:915 - nsfs nsfs rw 3196 29 0:331 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/d585588652781fc3d764c036575ca7680416cdd1c7725ec9819897ab9066ede9/shm rw,nosuid,nodev,noexec,relatime shared:922 - tmpfs shm rw,size=65536k 3231 29 0:332 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/d585588652781fc3d764c036575ca7680416cdd1c7725ec9819897ab9066ede9/rootfs rw,relatime shared:930 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34015/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34015/work,xino=off 3303 29 0:342 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/a503134d5c6b832363be3926d09d6f641dfae06dd2108af08d80449368c4b04e/rootfs rw,relatime shared:937 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33296/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33295/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/33294/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31119/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34016/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34016/work,xino=off 1154 145 0:146 / /data/nvme0n1/kubelet/pods/4d283eb8-d2d7-490c-bb71-2be40dab9450/volumes/kubernetes.io~projected/kube-api-access-t8zfb rw,relatime shared:403 - tmpfs tmpfs rw 1299 29 0:4 net:[4026537510] /run/netns/cni-2f2e413a-7585-1a30-7cb1-ac92569fed15 rw shared:412 - nsfs nsfs rw 1329 29 0:147 / /run/containerd/io.containerd.grpc.v1.cri/sandboxes/3e4cd92bbd312e86fb59ebb9ce183220e0917434d86afb245c1715ffc5460bf8/shm rw,nosuid,nodev,noexec,relatime shared:421 - tmpfs shm rw,size=65536k 1355 29 0:148 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/3e4cd92bbd312e86fb59ebb9ce183220e0917434d86afb245c1715ffc5460bf8/rootfs rw,relatime shared:437 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34017/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34017/work,xino=off 1457 145 259:1 /kubelet/pods/4d283eb8-d2d7-490c-bb71-2be40dab9450/volumes/kubernetes.io~configmap/config/..2023_04_11_00_48_09.037538042/fluent-bit.conf /data/nvme0n1/kubelet/pods/4d283eb8-d2d7-490c-bb71-2be40dab9450/volume-subpaths/config/fluent-bit/0 rw,relatime shared:81 - ext4 /dev/nvme0n1 rw 1573 145 259:1 /kubelet/pods/4d283eb8-d2d7-490c-bb71-2be40dab9450/volumes/kubernetes.io~configmap/config/..2023_04_11_00_48_09.037538042/custom_parsers.conf /data/nvme0n1/kubelet/pods/4d283eb8-d2d7-490c-bb71-2be40dab9450/volume-subpaths/config/fluent-bit/1 rw,relatime shared:81 - ext4 /dev/nvme0n1 rw 1628 29 0:159 / /run/containerd/io.containerd.runtime.v2.task/k8s.io/5eec42b4a282163996409e4c7dbad906cb9649ef63f0e3cf16613c41e8c81909/rootfs rw,relatime shared:565 - overlay overlay rw,lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31166/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31165/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31164/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31163/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31162/fs:/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/31161/fs,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34018/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/34018/work,xino=off ` )
pkg/util/cgroup/cgroup_mock_test.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0021506811026483774, 0.0002821456582751125, 0.00015801121480762959, 0.00017331559502054006, 0.0003448675852268934 ]
{ "id": 5, "code_window": [ "\t\t},\n", "\t}\n", "}\n", "\n", "func (s *StatsWriter) clearTemporary() {\n", "\t// clear the temporary variables\n", "\ts.totalSize = 0\n", "\ts.statsFile = &backuppb.StatsFile{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "// flush temporary and clear []byte to make it garbage collected as soon as possible\n", "func (s *StatsWriter) flushTemporary() ([]byte, error) {\n", "\tdefer s.clearTemporary()\n", "\treturn proto.Marshal(s.statsFile)\n", "}\n", "\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "add", "edit_start_line_idx": 73 }
// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metautil import ( "bytes" "context" "crypto/sha256" "encoding/json" "fmt" "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/statistics/handle" statstypes "github.com/pingcap/tidb/pkg/statistics/handle/types" statsutil "github.com/pingcap/tidb/pkg/statistics/handle/util" "golang.org/x/sync/errgroup" ) var maxStatsJsonTableSize = 32 * 1024 * 1024 // 32 MiB var inlineSize = 8 * 1024 // 8 KiB func getStatsFileName(physicalID int64) string { return fmt.Sprintf("backupmeta.schema.stats.%09d", physicalID) } // A lightweight function wrapper to dump the statistic type StatsWriter struct { storage storage.ExternalStorage cipher *backuppb.CipherInfo // final stats file indexes statsFileIndexes []*backuppb.StatsFileIndex // temporary variables, clear after each flush totalSize int statsFile *backuppb.StatsFile } func newStatsWriter( storage storage.ExternalStorage, cipher *backuppb.CipherInfo, ) *StatsWriter { return &StatsWriter{ storage: storage, cipher: cipher, statsFileIndexes: make([]*backuppb.StatsFileIndex, 0), totalSize: 0, statsFile: &backuppb.StatsFile{ Blocks: make([]*backuppb.StatsBlock, 0, 8), }, } } func (s *StatsWriter) clearTemporary() { // clear the temporary variables s.totalSize = 0 s.statsFile = &backuppb.StatsFile{ Blocks: make([]*backuppb.StatsBlock, 0, 8), } } func (s *StatsWriter) writeStatsFileAndClear(ctx context.Context, physicalID int64) error { fileName := getStatsFileName(physicalID) content, err := proto.Marshal(s.statsFile) if err != nil { return errors.Trace(err) } if len(s.statsFileIndexes) == 0 && len(content) < inlineSize { s.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{InlineData: content}) return nil } checksum := sha256.Sum256(content) encryptedContent, iv, err := Encrypt(content, s.cipher) if err != nil { return errors.Trace(err) } if err := s.storage.WriteFile(ctx, fileName, encryptedContent); err != nil { return errors.Trace(err) } s.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{ Name: fileName, Sha256: checksum[:], SizeEnc: uint64(len(encryptedContent)), SizeOri: uint64(len(content)), CipherIv: iv, }) s.clearTemporary() return nil } func (s *StatsWriter) BackupStats(ctx context.Context, jsonTable *statsutil.JSONTable, physicalID int64) error { if jsonTable == nil { return nil } statsBytes, err := json.Marshal(jsonTable) if err != nil { return errors.Trace(err) } s.totalSize += len(statsBytes) s.statsFile.Blocks = append(s.statsFile.Blocks, &backuppb.StatsBlock{ PhysicalId: physicalID, JsonTable: statsBytes, }) // check whether need to flush if s.totalSize > maxStatsJsonTableSize { if err := s.writeStatsFileAndClear(ctx, physicalID); err != nil { return errors.Trace(err) } } return nil } func (s *StatsWriter) BackupStatsDone(ctx context.Context) ([]*backuppb.StatsFileIndex, error) { if s.totalSize == 0 || len(s.statsFile.Blocks) == 0 { return s.statsFileIndexes, nil } if err := s.writeStatsFileAndClear(ctx, s.statsFile.Blocks[0].PhysicalId); err != nil { return nil, errors.Trace(err) } return s.statsFileIndexes, nil } func RestoreStats( ctx context.Context, storage storage.ExternalStorage, cipher *backuppb.CipherInfo, statsHandler *handle.Handle, newTableInfo *model.TableInfo, statsFileIndexes []*backuppb.StatsFileIndex, rewriteIDMap map[int64]int64, ) error { eg, ectx := errgroup.WithContext(ctx) taskCh := make(chan *statstypes.PartitionStatisticLoadTask, 8) eg.Go(func() error { return downloadStats(ectx, storage, cipher, statsFileIndexes, rewriteIDMap, taskCh) }) eg.Go(func() error { // NOTICE: skip updating cache after load stats from json return statsHandler.LoadStatsFromJSONConcurrently(ectx, newTableInfo, taskCh, 0) }) return eg.Wait() } func downloadStats( ctx context.Context, storage storage.ExternalStorage, cipher *backuppb.CipherInfo, statsFileIndexes []*backuppb.StatsFileIndex, rewriteIDMap map[int64]int64, taskCh chan<- *statstypes.PartitionStatisticLoadTask, ) error { defer close(taskCh) eg, ectx := errgroup.WithContext(ctx) downloadWorkerpool := utils.NewWorkerPool(4, "download stats for each partition") for _, statsFileIndex := range statsFileIndexes { if ectx.Err() != nil { break } statsFile := statsFileIndex downloadWorkerpool.ApplyOnErrorGroup(eg, func() error { var statsContent []byte if len(statsFile.InlineData) > 0 { statsContent = statsFile.InlineData } else { content, err := storage.ReadFile(ectx, statsFile.Name) if err != nil { return errors.Trace(err) } decryptContent, err := Decrypt(content, cipher, statsFile.CipherIv) if err != nil { return errors.Trace(err) } checksum := sha256.Sum256(decryptContent) if !bytes.Equal(statsFile.Sha256, checksum[:]) { return berrors.ErrInvalidMetaFile.GenWithStackByArgs(fmt.Sprintf( "checksum mismatch expect %x, got %x", statsFile.Sha256, checksum[:])) } statsContent = decryptContent } statsFileBlocks := &backuppb.StatsFile{} if err := proto.Unmarshal(statsContent, statsFileBlocks); err != nil { return errors.Trace(err) } for _, block := range statsFileBlocks.Blocks { physicalId, ok := rewriteIDMap[block.PhysicalId] if !ok { return berrors.ErrRestoreInvalidRewrite.GenWithStackByArgs(fmt.Sprintf( "not rewrite rule matched, old physical id: %d", block.PhysicalId)) } jsonTable := &statsutil.JSONTable{} if err := json.Unmarshal(block.JsonTable, jsonTable); err != nil { return errors.Trace(err) } select { case <-ectx.Done(): return nil case taskCh <- &statstypes.PartitionStatisticLoadTask{ PhysicalID: physicalId, JSONTable: jsonTable, }: } } return nil }) } return eg.Wait() }
br/pkg/metautil/statsfile.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.9988941550254822, 0.17087221145629883, 0.0001673046062933281, 0.003268301719799638, 0.3447405993938446 ]
{ "id": 5, "code_window": [ "\t\t},\n", "\t}\n", "}\n", "\n", "func (s *StatsWriter) clearTemporary() {\n", "\t// clear the temporary variables\n", "\ts.totalSize = 0\n", "\ts.statsFile = &backuppb.StatsFile{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "// flush temporary and clear []byte to make it garbage collected as soon as possible\n", "func (s *StatsWriter) flushTemporary() ([]byte, error) {\n", "\tdefer s.clearTemporary()\n", "\treturn proto.Marshal(s.statsFile)\n", "}\n", "\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "add", "edit_start_line_idx": 73 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package column import ( "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" ) // ConvertColumnInfo converts `*ast.ResultField` to `*Info` func ConvertColumnInfo(fld *ast.ResultField) (ci *Info) { ci = &Info{ Name: fld.ColumnAsName.O, OrgName: fld.Column.Name.O, Table: fld.TableAsName.O, Schema: fld.DBName.O, Flag: uint16(fld.Column.GetFlag()), Charset: uint16(mysql.CharsetNameToID(fld.Column.GetCharset())), Type: fld.Column.GetType(), DefaultValue: fld.Column.GetDefaultValue(), } if fld.EmptyOrgName { ci.OrgName = "" } if fld.Table != nil { ci.OrgTable = fld.Table.Name.O } if fld.Column.GetFlen() != types.UnspecifiedLength { ci.ColumnLength = uint32(fld.Column.GetFlen()) } if fld.Column.GetType() == mysql.TypeNewDecimal { // Consider the negative sign. ci.ColumnLength++ if fld.Column.GetDecimal() > types.DefaultFsp { // Consider the decimal point. ci.ColumnLength++ } } else if types.IsString(fld.Column.GetType()) || fld.Column.GetType() == mysql.TypeEnum || fld.Column.GetType() == mysql.TypeSet { // issue #18870 // Fix issue #4540. // The flen is a hint, not a precise value, so most client will not use the value. // But we found in rare MySQL client, like Navicat for MySQL(version before 12) will truncate // the `show create table` result. To fix this case, we must use a large enough flen to prevent // the truncation, in MySQL, it will multiply bytes length by a multiple based on character set. // For examples: // * latin, the multiple is 1 // * gb2312, the multiple is 2 // * Utf-8, the multiple is 3 // * utf8mb4, the multiple is 4 // We used to check non-string types to avoid the truncation problem in some MySQL // client such as Navicat. Now we only allow string type enter this branch. charsetDesc, err := charset.GetCharsetInfo(fld.Column.GetCharset()) if err != nil { ci.ColumnLength *= 4 } else { ci.ColumnLength *= uint32(charsetDesc.Maxlen) } } if fld.Column.GetDecimal() == types.UnspecifiedLength { if fld.Column.GetType() == mysql.TypeDuration { ci.Decimal = uint8(types.DefaultFsp) } else { ci.Decimal = mysql.NotFixedDec } } else { ci.Decimal = uint8(fld.Column.GetDecimal()) } // Keep things compatible for old clients. // Refer to mysql-server/sql/protocol.cc send_result_set_metadata() if ci.Type == mysql.TypeVarchar { ci.Type = mysql.TypeVarString } return }
pkg/server/internal/column/convert.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00018866411119233817, 0.00017213226237799972, 0.00016675530059728771, 0.00017034131451509893, 0.000006156424205983058 ]
{ "id": 5, "code_window": [ "\t\t},\n", "\t}\n", "}\n", "\n", "func (s *StatsWriter) clearTemporary() {\n", "\t// clear the temporary variables\n", "\ts.totalSize = 0\n", "\ts.statsFile = &backuppb.StatsFile{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "// flush temporary and clear []byte to make it garbage collected as soon as possible\n", "func (s *StatsWriter) flushTemporary() ([]byte, error) {\n", "\tdefer s.clearTemporary()\n", "\treturn proto.Marshal(s.statsFile)\n", "}\n", "\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "add", "edit_start_line_idx": 73 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "etcd", srcs = ["etcd.go"], importpath = "github.com/pingcap/tidb/pkg/util/etcd", visibility = ["//visibility:public"], deps = [ "@com_github_pingcap_errors//:errors", "@io_etcd_go_etcd_client_v3//:client", "@io_etcd_go_etcd_client_v3//namespace", ], ) go_test( name = "etcd_test", timeout = "short", srcs = ["etcd_test.go"], embed = [":etcd"], flaky = True, deps = [ "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", "@io_etcd_go_etcd_client_v3//:client", "@io_etcd_go_etcd_tests_v3//integration", ], )
pkg/util/etcd/BUILD.bazel
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00017068990564439446, 0.0001698704290902242, 0.00016921701899264008, 0.00016970436263363808, 6.126621201474336e-7 ]
{ "id": 5, "code_window": [ "\t\t},\n", "\t}\n", "}\n", "\n", "func (s *StatsWriter) clearTemporary() {\n", "\t// clear the temporary variables\n", "\ts.totalSize = 0\n", "\ts.statsFile = &backuppb.StatsFile{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "// flush temporary and clear []byte to make it garbage collected as soon as possible\n", "func (s *StatsWriter) flushTemporary() ([]byte, error) {\n", "\tdefer s.clearTemporary()\n", "\treturn proto.Marshal(s.statsFile)\n", "}\n", "\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "add", "edit_start_line_idx": 73 }
recordcount=1000 operationcount=0 workload=core readallfields=true readproportion=0 updateproportion=0 scanproportion=0 insertproportion=0 requestdistribution=uniform
br/tests/br_split_region_fail/workload
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00017070243484340608, 0.00016951837460510433, 0.0001683343289187178, 0.00016951837460510433, 0.0000011840529623441398 ]
{ "id": 6, "code_window": [ "\n", "func (s *StatsWriter) writeStatsFileAndClear(ctx context.Context, physicalID int64) error {\n", "\tfileName := getStatsFileName(physicalID)\n", "\tcontent, err := proto.Marshal(s.statsFile)\n", "\tif err != nil {\n", "\t\treturn errors.Trace(err)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontent, err := s.flushTemporary()\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 83 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gctuner import ( "runtime" "runtime/debug" "testing" "time" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/util/memory" "github.com/stretchr/testify/require" ) type mockAllocator struct { m [][]byte } func (a *mockAllocator) alloc(bytes int) (handle int) { sli := make([]byte, bytes) a.m = append(a.m, sli) return len(a.m) - 1 } func (a *mockAllocator) free(handle int) { a.m[handle] = nil } func (a *mockAllocator) freeAll() { a.m = nil runtime.GC() } func TestGlobalMemoryTuner(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/util/gctuner/testMemoryLimitTuner", "return(true)")) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/util/gctuner/testMemoryLimitTuner")) }() // Close GOGCTuner gogcTuner := EnableGOGCTuner.Load() EnableGOGCTuner.Store(false) defer EnableGOGCTuner.Store(gogcTuner) memory.ServerMemoryLimit.Store(1 << 30) // 1GB GlobalMemoryLimitTuner.SetPercentage(0.8) // 1GB * 80% = 800MB GlobalMemoryLimitTuner.UpdateMemoryLimit() require.True(t, GlobalMemoryLimitTuner.isValidValueSet.Load()) defer func() { // If test.count > 1, wait tuning finished. require.Eventually(t, func() bool { //nolint: all_revive return GlobalMemoryLimitTuner.isValidValueSet.Load() }, 5*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { //nolint: all_revive return !GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() }, 5*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { //nolint: all_revive return !GlobalMemoryLimitTuner.nextGCTriggeredByMemoryLimit.Load() }, 5*time.Second, 100*time.Millisecond) }() allocator := &mockAllocator{} defer allocator.freeAll() r := &runtime.MemStats{} getNowGCNum := func() uint32 { runtime.ReadMemStats(r) return r.NumGC } checkNextGCEqualMemoryLimit := func() { runtime.ReadMemStats(r) nextGC := r.NextGC memoryLimit := GlobalMemoryLimitTuner.calcMemoryLimit(GlobalMemoryLimitTuner.GetPercentage()) // Refer to golang source code, nextGC = memoryLimit - nonHeapMemory - overageMemory - headroom require.True(t, nextGC < uint64(memoryLimit)) } memory600mb := allocator.alloc(600 << 20) gcNum := getNowGCNum() memory210mb := allocator.alloc(210 << 20) require.Eventually(t, func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNum < getNowGCNum() }, 5*time.Second, 100*time.Millisecond) // Test waiting for reset require.Eventually(t, func() bool { return GlobalMemoryLimitTuner.calcMemoryLimit(fallbackPercentage) == debug.SetMemoryLimit(-1) }, 5*time.Second, 100*time.Millisecond) gcNum = getNowGCNum() memory100mb := allocator.alloc(100 << 20) require.Eventually(t, func() bool { return gcNum == getNowGCNum() }, 5*time.Second, 100*time.Millisecond) // No GC allocator.free(memory210mb) allocator.free(memory100mb) runtime.GC() // Trigger GC in 80% again require.Eventually(t, func() bool { return GlobalMemoryLimitTuner.calcMemoryLimit(GlobalMemoryLimitTuner.GetPercentage()) == debug.SetMemoryLimit(-1) }, 5*time.Second, 100*time.Millisecond) time.Sleep(100 * time.Millisecond) gcNum = getNowGCNum() checkNextGCEqualMemoryLimit() memory210mb = allocator.alloc(210 << 20) require.Eventually(t, func() bool { return gcNum < getNowGCNum() }, 5*time.Second, 100*time.Millisecond) allocator.free(memory210mb) allocator.free(memory600mb) } func TestIssue48741(t *testing.T) { // Close GOGCTuner gogcTuner := EnableGOGCTuner.Load() EnableGOGCTuner.Store(false) defer EnableGOGCTuner.Store(gogcTuner) getMemoryLimitGCTotal := func() int64 { return memory.MemoryLimitGCTotal.Load() } waitingTunningFinishFn := func() { for GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() { time.Sleep(10 * time.Millisecond) } } allocator := &mockAllocator{} defer allocator.freeAll() checkIfMemoryLimitIsModified := func() { // Try to trigger GC by 1GB * 80% = 800MB (tidb_server_memory_limit * tidb_server_memory_limit_gc_trigger) gcNum := getMemoryLimitGCTotal() memory810mb := allocator.alloc(810 << 20) require.Eventually(t, // Wait for the GC triggered by memory810mb func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNum < getMemoryLimitGCTotal() }, 500*time.Millisecond, 100*time.Millisecond) // update memoryLimit, and sleep 500ms, let t.UpdateMemoryLimit() be called. memory.ServerMemoryLimit.Store(1500 << 20) // 1.5 GB time.Sleep(500 * time.Millisecond) // UpdateMemoryLimit success during tunning. require.True(t, GlobalMemoryLimitTuner.adjustPercentageInProgress.Load()) require.Equal(t, debug.SetMemoryLimit(-1), int64(1500<<20*80/100)) waitingTunningFinishFn() // After the GC triggered by memory810mb. gcNumAfterMemory810mb := getMemoryLimitGCTotal() memory200mb := allocator.alloc(200 << 20) time.Sleep(2 * time.Second) // The heapInUse is less than 1.5GB * 80% = 1.2GB, so the gc will not be triggered. require.Equal(t, gcNumAfterMemory810mb, getMemoryLimitGCTotal()) memory300mb := allocator.alloc(300 << 20) require.Eventually(t, // Wait for the GC triggered by memory300mb func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNumAfterMemory810mb < getMemoryLimitGCTotal() }, 5*time.Second, 100*time.Millisecond) // Sleep 500ms, let t.UpdateMemoryLimit() be called. time.Sleep(500 * time.Millisecond) // The memory limit will be 1.5GB * 110% during tunning. require.Equal(t, debug.SetMemoryLimit(-1), int64(1500<<20*110/100)) require.True(t, GlobalMemoryLimitTuner.adjustPercentageInProgress.Load()) allocator.free(memory810mb) allocator.free(memory200mb) allocator.free(memory300mb) } checkIfMemoryLimitNotModified := func() { // Try to trigger GC by 1GB * 80% = 800MB (tidb_server_memory_limit * tidb_server_memory_limit_gc_trigger) gcNum := getMemoryLimitGCTotal() memory810mb := allocator.alloc(810 << 20) require.Eventually(t, // Wait for the GC triggered by memory810mb func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNum < getMemoryLimitGCTotal() }, 500*time.Millisecond, 100*time.Millisecond) // During the process of adjusting the percentage, the memory limit will be set to 1GB * 110% = 1.1GB. require.Equal(t, debug.SetMemoryLimit(-1), int64(1<<30*110/100)) gcNumAfterMemory810mb := getMemoryLimitGCTotal() // After the GC triggered by memory810mb. waitingTunningFinishFn() require.Eventually(t, // The GC will be trigged immediately after memoryLimit is set back to 1GB * 80% = 800MB. func() bool { return GlobalMemoryLimitTuner.adjustPercentageInProgress.Load() && gcNumAfterMemory810mb < getMemoryLimitGCTotal() }, 2*time.Second, 100*time.Millisecond) allocator.free(memory810mb) } require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/util/gctuner/mockUpdateGlobalVarDuringAdjustPercentage", "return(true)")) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/util/gctuner/mockUpdateGlobalVarDuringAdjustPercentage")) }() memory.ServerMemoryLimit.Store(1 << 30) // 1GB GlobalMemoryLimitTuner.SetPercentage(0.8) // 1GB * 80% = 800MB GlobalMemoryLimitTuner.UpdateMemoryLimit() require.Equal(t, debug.SetMemoryLimit(-1), int64(1<<30*80/100)) checkIfMemoryLimitNotModified() waitingTunningFinishFn() checkIfMemoryLimitIsModified() }
pkg/util/gctuner/memory_limit_tuner_test.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001765127235557884, 0.00017073366325348616, 0.0001624917349545285, 0.00017154059605672956, 0.000003687705429911148 ]
{ "id": 6, "code_window": [ "\n", "func (s *StatsWriter) writeStatsFileAndClear(ctx context.Context, physicalID int64) error {\n", "\tfileName := getStatsFileName(physicalID)\n", "\tcontent, err := proto.Marshal(s.statsFile)\n", "\tif err != nil {\n", "\t\treturn errors.Trace(err)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontent, err := s.flushTemporary()\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 83 }
insert into b values (13);
br/tests/lightning_checkpoint_engines/data/cpeng.b.2.sql
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.000172543921507895, 0.000172543921507895, 0.000172543921507895, 0.000172543921507895, 0 ]
{ "id": 6, "code_window": [ "\n", "func (s *StatsWriter) writeStatsFileAndClear(ctx context.Context, physicalID int64) error {\n", "\tfileName := getStatsFileName(physicalID)\n", "\tcontent, err := proto.Marshal(s.statsFile)\n", "\tif err != nil {\n", "\t\treturn errors.Trace(err)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontent, err := s.flushTemporary()\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 83 }
set tidb_cost_model_version=1; set @@sql_mode = 'strict_trans_tables'; drop table if exists t; create table t (a varchar(255) charset gbk, b varchar(255) charset ascii, c varchar(255) charset utf8); insert into t values ('中文', 'asdf', '字符集'); -- error 1366 insert into t values ('À', 'ø', '😂'); -- error 1366 insert into t values ('中文À中文', 'asdføfdsa', '字符集😂字符集'); -- error 1366 insert into t values (0x4040ffff, 0x4040ffff, 0x4040ffff); select * from t; set @@sql_mode = ''; insert into t values ('À', 'ø', '😂'); insert into t values ('中文À中文', 'asdføfdsa', '字符集😂字符集'); insert into t values (0x4040ffff, 0x4040ffff, 0x4040ffff); select * from t; set @@sql_mode = default; drop table t; create table t(f set(0xD2BB, 0xC8FD), e enum(0xBAEC,0x6A59)); show create table t; drop table t; create table t( e enum(0xBAEC,0x6A59)); show create table t; drop table t; create table t(f set(0xD2BB, 0xC8FD), e enum(0xBAEC,0x6A59)) collate gbk_bin; show create table t; drop table t; create table t( e enum(0xBAEC,0x6A59)) collate gbk_bin; show create table t; set @@sql_mode = '';
tests/integrationtest/t/new_character_set_invalid.test
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00017663175822235644, 0.00017141830176115036, 0.00016926575335673988, 0.00016988784773275256, 0.000003036081807294977 ]
{ "id": 6, "code_window": [ "\n", "func (s *StatsWriter) writeStatsFileAndClear(ctx context.Context, physicalID int64) error {\n", "\tfileName := getStatsFileName(physicalID)\n", "\tcontent, err := proto.Marshal(s.statsFile)\n", "\tif err != nil {\n", "\t\treturn errors.Trace(err)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontent, err := s.flushTemporary()\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 83 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metrics import ( "github.com/pingcap/tidb/pkg/metrics" "github.com/prometheus/client_golang/prometheus" ) var ( // MissCounter is the counter of missing cache. MissCounter prometheus.Counter // HitCounter is the counter of hitting cache. HitCounter prometheus.Counter // UpdateCounter is the counter of updating cache. UpdateCounter prometheus.Counter // DelCounter is the counter of deleting cache. DelCounter prometheus.Counter // EvictCounter is the counter of evicting cache. EvictCounter prometheus.Counter // RejectCounter is the counter of reject cache. RejectCounter prometheus.Counter // CostGauge is the gauge of cost time. CostGauge prometheus.Gauge // CapacityGauge is the gauge of capacity. CapacityGauge prometheus.Gauge ) func init() { initMetricsVars() } // initMetricsVars init copr metrics vars. func initMetricsVars() { metrics.StatsCacheCounter = metrics.NewCounterVec( prometheus.CounterOpts{ Namespace: "tidb", Subsystem: "statistics", Name: "stats_cache_op", Help: "Counter for statsCache operation", }, []string{metrics.LblType}) metrics.StatsCacheGauge = metrics.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "tidb", Subsystem: "statistics", Name: "stats_cache_val", Help: "gauge of stats cache value", }, []string{metrics.LblType}) MissCounter = metrics.StatsCacheCounter.WithLabelValues("miss") HitCounter = metrics.StatsCacheCounter.WithLabelValues("hit") UpdateCounter = metrics.StatsCacheCounter.WithLabelValues("update") DelCounter = metrics.StatsCacheCounter.WithLabelValues("del") EvictCounter = metrics.StatsCacheCounter.WithLabelValues("evict") RejectCounter = metrics.StatsCacheCounter.WithLabelValues("reject") CostGauge = metrics.StatsCacheGauge.WithLabelValues("track") CapacityGauge = metrics.StatsCacheGauge.WithLabelValues("capacity") }
pkg/statistics/handle/cache/internal/metrics/metrics.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0005033437046222389, 0.00022021403128746897, 0.00016753097588662058, 0.00017509340250398964, 0.00011561808059923351 ]
{ "id": 7, "code_window": [ "\t\treturn nil\n", "\t}\n", "\n", "\tchecksum := sha256.Sum256(content)\n", "\n", "\tencryptedContent, iv, err := Encrypt(content, s.cipher)\n", "\tif err != nil {\n", "\t\treturn errors.Trace(err)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tsizeOri := uint64(len(content))\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 94 }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gctuner import ( "math" "runtime/debug" "time" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/memory" atomicutil "go.uber.org/atomic" ) // GlobalMemoryLimitTuner only allow one memory limit tuner in one process var GlobalMemoryLimitTuner = &memoryLimitTuner{} // Go runtime trigger GC when hit memory limit which managed via runtime/debug.SetMemoryLimit. // So we can change memory limit dynamically to avoid frequent GC when memory usage is greater than the limit. type memoryLimitTuner struct { finalizer *finalizer isValidValueSet atomicutil.Bool percentage atomicutil.Float64 adjustPercentageInProgress atomicutil.Bool serverMemLimitBeforeAdjust atomicutil.Uint64 percentageBeforeAdjust atomicutil.Float64 nextGCTriggeredByMemoryLimit atomicutil.Bool } // fallbackPercentage indicates the fallback memory limit percentage when turning. const fallbackPercentage float64 = 1.1 var memoryGoroutineCntInTest = *atomicutil.NewInt64(0) // WaitMemoryLimitTunerExitInTest is used to wait memory limit tuner exit in test. func WaitMemoryLimitTunerExitInTest() { if intest.InTest { for memoryGoroutineCntInTest.Load() > 0 { time.Sleep(100 * time.Millisecond) } } } // tuning check the memory nextGC and judge whether this GC is trigger by memory limit. // Go runtime ensure that it will be called serially. func (t *memoryLimitTuner) tuning() { if !t.isValidValueSet.Load() { return } r := memory.ForceReadMemStats() gogc := util.GetGOGC() ratio := float64(100+gogc) / 100 // This `if` checks whether the **last** GC was triggered by MemoryLimit as far as possible. // If the **last** GC was triggered by MemoryLimit, we'll set MemoryLimit to MAXVALUE to return control back to GOGC // to avoid frequent GC when memory usage fluctuates above and below MemoryLimit. // The logic we judge whether the **last** GC was triggered by MemoryLimit is as follows: // suppose `NextGC` = `HeapInUse * (100 + GOGC) / 100)`, // - If NextGC < MemoryLimit, the **next** GC will **not** be triggered by MemoryLimit thus we do not care about // why the **last** GC is triggered. And MemoryLimit will not be reset this time. // - Only if NextGC >= MemoryLimit , the **next** GC will be triggered by MemoryLimit. Thus, we need to reset // MemoryLimit after the **next** GC happens if needed. if float64(r.HeapInuse)*ratio > float64(debug.SetMemoryLimit(-1)) { if t.nextGCTriggeredByMemoryLimit.Load() && t.adjustPercentageInProgress.CompareAndSwap(false, true) { // It's ok to update `adjustPercentageInProgress`, `serverMemLimitBeforeAdjust` and `percentageBeforeAdjust` not in a transaction. // The update of memory limit is eventually consistent. t.serverMemLimitBeforeAdjust.Store(memory.ServerMemoryLimit.Load()) t.percentageBeforeAdjust.Store(t.GetPercentage()) go func() { if intest.InTest { memoryGoroutineCntInTest.Inc() defer memoryGoroutineCntInTest.Dec() } memory.MemoryLimitGCLast.Store(time.Now()) memory.MemoryLimitGCTotal.Add(1) debug.SetMemoryLimit(t.calcMemoryLimit(fallbackPercentage)) resetInterval := 1 * time.Minute // Wait 1 minute and set back, to avoid frequent GC if intest.InTest { resetInterval = 3 * time.Second } failpoint.Inject("mockUpdateGlobalVarDuringAdjustPercentage", func(val failpoint.Value) { if val, ok := val.(bool); val && ok { time.Sleep(300 * time.Millisecond) t.UpdateMemoryLimit() } }) failpoint.Inject("testMemoryLimitTuner", func(val failpoint.Value) { if val, ok := val.(bool); val && ok { resetInterval = 1 * time.Second } }) time.Sleep(resetInterval) debug.SetMemoryLimit(t.calcMemoryLimit(t.GetPercentage())) for !t.adjustPercentageInProgress.CompareAndSwap(true, false) { continue } }() memory.TriggerMemoryLimitGC.Store(true) } t.nextGCTriggeredByMemoryLimit.Store(true) } else { t.nextGCTriggeredByMemoryLimit.Store(false) memory.TriggerMemoryLimitGC.Store(false) } } // Start starts the memory limit tuner. func (t *memoryLimitTuner) Start() { t.finalizer = newFinalizer(t.tuning) // Start tuning } // Stop stops the memory limit tuner. func (t *memoryLimitTuner) Stop() { t.finalizer.stop() } // SetPercentage set the percentage for memory limit tuner. func (t *memoryLimitTuner) SetPercentage(percentage float64) { t.percentage.Store(percentage) } // GetPercentage get the percentage from memory limit tuner. func (t *memoryLimitTuner) GetPercentage() float64 { return t.percentage.Load() } // UpdateMemoryLimit updates the memory limit. // This function should be called when `tidb_server_memory_limit` or `tidb_server_memory_limit_gc_trigger` is modified. func (t *memoryLimitTuner) UpdateMemoryLimit() { if t.adjustPercentageInProgress.Load() { if t.serverMemLimitBeforeAdjust.Load() == memory.ServerMemoryLimit.Load() && t.percentageBeforeAdjust.Load() == t.GetPercentage() { return } } var memoryLimit = t.calcMemoryLimit(t.GetPercentage()) if memoryLimit == math.MaxInt64 { t.isValidValueSet.Store(false) memoryLimit = initGOMemoryLimitValue } else { t.isValidValueSet.Store(true) } debug.SetMemoryLimit(memoryLimit) } func (*memoryLimitTuner) calcMemoryLimit(percentage float64) int64 { memoryLimit := int64(float64(memory.ServerMemoryLimit.Load()) * percentage) // `tidb_server_memory_limit` * `tidb_server_memory_limit_gc_trigger` if memoryLimit == 0 { memoryLimit = math.MaxInt64 } return memoryLimit } var initGOMemoryLimitValue int64 func init() { initGOMemoryLimitValue = debug.SetMemoryLimit(-1) GlobalMemoryLimitTuner.Start() }
pkg/util/gctuner/memory_limit_tuner.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00018140251631848514, 0.00017156587273348123, 0.00016661018889863044, 0.00017106311861425638, 0.00000409854510508012 ]
{ "id": 7, "code_window": [ "\t\treturn nil\n", "\t}\n", "\n", "\tchecksum := sha256.Sum256(content)\n", "\n", "\tencryptedContent, iv, err := Encrypt(content, s.cipher)\n", "\tif err != nil {\n", "\t\treturn errors.Trace(err)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tsizeOri := uint64(len(content))\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 94 }
#!/bin/sh # # Copyright 2019 PingCAP, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -euE # Populate the mydumper source DBPATH="$TEST_DIR/cp.mydump" mkdir -p $DBPATH echo 'CREATE DATABASE cp_tsr;' > "$DBPATH/cp_tsr-schema-create.sql" echo "CREATE TABLE tbl(i TINYINT PRIMARY KEY, j INT);" > "$DBPATH/cp_tsr.tbl-schema.sql" # the column orders in data file is different from table schema order. echo "INSERT INTO tbl (j, i) VALUES (3, 1),(4, 2);" > "$DBPATH/cp_tsr.tbl.sql" # Set minDeliverBytes to a small enough number to only write only 1 row each time # Set the failpoint to kill the lightning instance as soon as one row is written PKG="github.com/pingcap/tidb/br/pkg/lightning/importer" export GO_FAILPOINTS="$PKG/SlowDownWriteRows=sleep(1000);$PKG/FailAfterWriteRows=panic;$PKG/SetMinDeliverBytes=return(1)" # Check after 1 row is written in tidb backend, the finished progress is updated export GO_FAILPOINTS="${GO_FAILPOINTS};github.com/pingcap/tidb/br/pkg/lightning/PrintStatus=return()" # Start importing the tables. run_sql 'DROP DATABASE IF EXISTS cp_tsr' run_sql 'DROP DATABASE IF EXISTS tidb_lightning_checkpoint_test' set +e run_lightning -d "$DBPATH" --backend tidb --enable-checkpoint=1 2> /dev/null set -e run_sql 'SELECT count(*) FROM `cp_tsr`.tbl' check_contains "count(*): 1" # After FailAfterWriteRows, the finished bytes is 36 as the first row size grep "PrintStatus Failpoint" "$TEST_DIR/lightning.log" | grep -q "finished=36" # restart lightning from checkpoint, the second line should be written successfully # also check after restart from checkpoint, final finished equals to total export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/lightning/PrintStatus=return()" set +e run_lightning -d "$DBPATH" --backend tidb --enable-checkpoint=1 2> /dev/null set -e run_sql 'SELECT j FROM `cp_tsr`.tbl WHERE i = 2;' check_contains "j: 4" grep "PrintStatus Failpoint" "$TEST_DIR/lightning.log" | grep -q "equal=true"
br/tests/lightning_checkpoint_columns/run.sh
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.000183960422873497, 0.0001734180113999173, 0.00016564060933887959, 0.0001728769129840657, 0.000005893210527574411 ]
{ "id": 7, "code_window": [ "\t\treturn nil\n", "\t}\n", "\n", "\tchecksum := sha256.Sum256(content)\n", "\n", "\tencryptedContent, iv, err := Encrypt(content, s.cipher)\n", "\tif err != nil {\n", "\t\treturn errors.Trace(err)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tsizeOri := uint64(len(content))\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 94 }
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "isolation", srcs = [ "base.go", "optimistic.go", "readcommitted.go", "repeatable_read.go", "serializable.go", ], importpath = "github.com/pingcap/tidb/pkg/sessiontxn/isolation", visibility = ["//visibility:public"], deps = [ "//pkg/config", "//pkg/infoschema", "//pkg/kv", "//pkg/parser/ast", "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/planner/core", "//pkg/sessionctx", "//pkg/sessionctx/variable", "//pkg/sessiontxn", "//pkg/sessiontxn/internal", "//pkg/sessiontxn/isolation/metrics", "//pkg/sessiontxn/staleread", "//pkg/table/temptable", "//pkg/util/logutil", "//pkg/util/tracing", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_tikv_client_go_v2//error", "@com_github_tikv_client_go_v2//oracle", "@org_uber_go_zap//:zap", ], ) go_test( name = "isolation_test", timeout = "short", srcs = [ "main_test.go", "optimistic_test.go", "readcommitted_test.go", "repeatable_read_test.go", "serializable_test.go", ], flaky = True, shard_count = 28, deps = [ ":isolation", "//pkg/config", "//pkg/executor", "//pkg/expression", "//pkg/infoschema", "//pkg/kv", "//pkg/parser", "//pkg/parser/ast", "//pkg/planner", "//pkg/session", "//pkg/sessionctx", "//pkg/sessiontxn", "//pkg/testkit", "//pkg/testkit/testfork", "//pkg/testkit/testsetup", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//error", "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//tikv", "@org_uber_go_goleak//:goleak", ], )
pkg/sessiontxn/isolation/BUILD.bazel
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001743134780554101, 0.00017091116751544178, 0.00016684469301253557, 0.00017136616224888712, 0.000002407149850114365 ]
{ "id": 7, "code_window": [ "\t\treturn nil\n", "\t}\n", "\n", "\tchecksum := sha256.Sum256(content)\n", "\n", "\tencryptedContent, iv, err := Encrypt(content, s.cipher)\n", "\tif err != nil {\n", "\t\treturn errors.Trace(err)\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tsizeOri := uint64(len(content))\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 94 }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "os" "github.com/pingcap/errors" "github.com/pingcap/log" "go.uber.org/zap" ) func main() { cfg := NewConfig() err := cfg.Parse(os.Args[1:]) switch errors.Cause(err) { case nil: case flag.ErrHelp: os.Exit(0) default: log.Error("parse cmd flags", zap.Error(err)) os.Exit(2) } table := newTable() err = parseTableSQL(table, cfg.DDLCfg.TableSQL) if err != nil { log.Fatal(err.Error()) } err = parseIndexSQL(table, cfg.DDLCfg.IndexSQL) if err != nil { log.Fatal(err.Error()) } dbs, err := createDBs(cfg.DBCfg, cfg.SysCfg.WorkerCount) if err != nil { log.Fatal(err.Error()) } defer closeDBs(dbs) if len(cfg.StatsCfg.Path) > 0 { statsInfo, err1 := loadStats(table.tblInfo, cfg.StatsCfg.Path) if err1 != nil { log.Fatal(err1.Error()) } for _, idxInfo := range table.tblInfo.Indices { offset := idxInfo.Columns[0].Offset if hist, ok := statsInfo.Indices[idxInfo.ID]; ok && len(hist.Buckets) > 0 { table.columns[offset].hist = &histogram{ Histogram: hist.Histogram, index: hist.Info, } } } for i, colInfo := range table.tblInfo.Columns { if hist, ok := statsInfo.Columns[colInfo.ID]; ok && table.columns[i].hist == nil && len(hist.Buckets) > 0 { table.columns[i].hist = &histogram{ Histogram: hist.Histogram, } } } } err = execSQL(dbs[0], cfg.DDLCfg.TableSQL) if err != nil { log.Fatal(err.Error()) } err = execSQL(dbs[0], cfg.DDLCfg.IndexSQL) if err != nil { log.Fatal(err.Error()) } doProcess(table, dbs, cfg.SysCfg.JobCount, cfg.SysCfg.WorkerCount, cfg.SysCfg.Batch) }
cmd/importer/main.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00019242482085246593, 0.00017450639279559255, 0.00016272166976705194, 0.00017179826681967825, 0.000009848089575825725 ]
{ "id": 8, "code_window": [ "\ts.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{\n", "\t\tName: fileName,\n", "\t\tSha256: checksum[:],\n", "\t\tSizeEnc: uint64(len(encryptedContent)),\n", "\t\tSizeOri: uint64(len(content)),\n", "\t\tCipherIv: iv,\n", "\t})\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\t\tSizeOri: sizeOri,\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 108 }
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. package main import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/gluetikv" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/task" "github.com/pingcap/tidb/br/pkg/trace" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version/build" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/util/metricsutil" "github.com/spf13/cobra" "go.uber.org/zap" "sourcegraph.com/sourcegraph/appdash" ) func runBackupCommand(command *cobra.Command, cmdName string) error { cfg := task.BackupConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } if err := metricsutil.RegisterMetricsForBR(cfg.PD, cfg.KeyspaceName); err != nil { return errors.Trace(err) } ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if cfg.FullBackupType == task.FullBackupTypeEBS { if err := task.RunBackupEBS(ctx, tidbGlue, &cfg); err != nil { log.Error("failed to backup", zap.Error(err)) return errors.Trace(err) } return nil } // No need to cache the coproceesor result config.GetGlobalConfig().TiKVClient.CoprCache.CapacityMB = 0 if err := task.RunBackup(ctx, tidbGlue, cmdName, &cfg); err != nil { log.Error("failed to backup", zap.Error(err)) return errors.Trace(err) } return nil } func runBackupRawCommand(command *cobra.Command, cmdName string) error { cfg := task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseBackupConfigFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if err := task.RunBackupRaw(ctx, gluetikv.Glue{}, cmdName, &cfg); err != nil { log.Error("failed to backup raw kv", zap.Error(err)) return errors.Trace(err) } return nil } func runBackupTxnCommand(command *cobra.Command, cmdName string) error { cfg := task.TxnKvConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseBackupConfigFromFlags(command.Flags()); err != nil { command.SilenceUsage = false return errors.Trace(err) } ctx := GetDefaultContext() if cfg.EnableOpenTracing { var store *appdash.MemoryStore ctx, store = trace.TracerStartSpan(ctx) defer trace.TracerFinishSpan(ctx, store) } if err := task.RunBackupTxn(ctx, gluetikv.Glue{}, cmdName, &cfg); err != nil { log.Error("failed to backup txn kv", zap.Error(err)) return errors.Trace(err) } return nil } // NewBackupCommand return a full backup subcommand. func NewBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "backup", Short: "backup a TiDB/TiKV cluster", SilenceUsage: true, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return errors.Trace(err) } build.LogInfo(build.BR) utils.LogEnvVariables() task.LogArguments(c) // Do not run stat worker in BR. session.DisableStats4Test() // Do not run ddl worker in BR. config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(false) summary.SetUnit(summary.BackupUnit) return nil }, } command.AddCommand( newFullBackupCommand(), newDBBackupCommand(), newTableBackupCommand(), newRawBackupCommand(), newTxnBackupCommand(), ) task.DefineBackupFlags(command.PersistentFlags()) return command } // newFullBackupCommand return a full backup subcommand. func newFullBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "full", Short: "backup all database", // prevents incorrect usage like `--checksum false` instead of `--checksum=false`. // the former, according to pflag parsing rules, means `--checksum=true false`. Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { // empty db/table means full backup. return runBackupCommand(command, task.FullBackupCmd) }, } task.DefineFilterFlags(command, acceptAllTables, false) task.DefineBackupEBSFlags(command.PersistentFlags()) return command } // newDBBackupCommand return a db backup subcommand. func newDBBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "db", Short: "backup a database", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runBackupCommand(command, task.DBBackupCmd) }, } task.DefineDatabaseFlags(command) return command } // newTableBackupCommand return a table backup subcommand. func newTableBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "table", Short: "backup a table", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runBackupCommand(command, task.TableBackupCmd) }, } task.DefineTableFlags(command) return command } // newRawBackupCommand return a raw kv range backup subcommand. func newRawBackupCommand() *cobra.Command { // TODO: remove experimental tag if it's stable command := &cobra.Command{ Use: "raw", Short: "(experimental) backup a raw kv range from TiKV cluster", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runBackupRawCommand(command, task.RawBackupCmd) }, } task.DefineRawBackupFlags(command) return command } // newTxnBackupCommand return a txn kv range backup subcommand. func newTxnBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "txn", Short: "(experimental) backup a txn kv range from TiKV cluster", Args: cobra.NoArgs, RunE: func(command *cobra.Command, _ []string) error { return runBackupTxnCommand(command, task.TxnBackupCmd) }, } task.DefineTxnBackupFlags(command) return command }
br/cmd/br/backup.go
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0003118567692581564, 0.00018096648273058236, 0.00016689003678038716, 0.00017279418534599245, 0.000031179886718746275 ]
{ "id": 8, "code_window": [ "\ts.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{\n", "\t\tName: fileName,\n", "\t\tSha256: checksum[:],\n", "\t\tSizeEnc: uint64(len(encryptedContent)),\n", "\t\tSizeOri: uint64(len(content)),\n", "\t\tCipherIv: iv,\n", "\t})\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\t\tSizeOri: sizeOri,\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 108 }
4,1 5,2 6,3
br/tests/lightning_csv/data/no_auto_incr_id.clustered_cache1.0.csv
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001703801390249282, 0.0001703801390249282, 0.0001703801390249282, 0.0001703801390249282, 0 ]
{ "id": 8, "code_window": [ "\ts.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{\n", "\t\tName: fileName,\n", "\t\tSha256: checksum[:],\n", "\t\tSizeEnc: uint64(len(encryptedContent)),\n", "\t\tSizeOri: uint64(len(content)),\n", "\t\tCipherIv: iv,\n", "\t})\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\t\tSizeOri: sizeOri,\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 108 }
package utils import ( "encoding/hex" "encoding/json" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" ) // MarshalBackupMeta converts the backupmeta strcture to JSON. // Unlike json.Marshal, this function also format some []byte fields for human reading. func MarshalBackupMeta(meta *backuppb.BackupMeta) ([]byte, error) { result, err := makeJSONBackupMeta(meta) if err != nil { return nil, err } return json.Marshal(result) } // UnmarshalBackupMeta converts the prettied JSON format of backupmeta // (made by MarshalBackupMeta) back to the go structure. func UnmarshalBackupMeta(data []byte) (*backuppb.BackupMeta, error) { jMeta := &jsonBackupMeta{} if err := json.Unmarshal(data, jMeta); err != nil { return nil, errors.Trace(err) } return fromJSONBackupMeta(jMeta) } type jsonValue any type jsonFile struct { SHA256 string `json:"sha256,omitempty"` StartKey string `json:"start_key,omitempty"` EndKey string `json:"end_key,omitempty"` *backuppb.File } func makeJSONFile(file *backuppb.File) *jsonFile { return &jsonFile{ SHA256: hex.EncodeToString(file.Sha256), StartKey: hex.EncodeToString(file.StartKey), EndKey: hex.EncodeToString(file.EndKey), File: file, } } func fromJSONFile(jFile *jsonFile) (*backuppb.File, error) { f := jFile.File var err error f.Sha256, err = hex.DecodeString(jFile.SHA256) if err != nil { return nil, errors.Trace(err) } f.StartKey, err = hex.DecodeString(jFile.StartKey) if err != nil { return nil, errors.Trace(err) } f.EndKey, err = hex.DecodeString(jFile.EndKey) if err != nil { return nil, errors.Trace(err) } return f, nil } type jsonRawRange struct { StartKey string `json:"start_key,omitempty"` EndKey string `json:"end_key,omitempty"` *backuppb.RawRange } func makeJSONRawRange(raw *backuppb.RawRange) *jsonRawRange { return &jsonRawRange{ StartKey: hex.EncodeToString(raw.StartKey), EndKey: hex.EncodeToString(raw.EndKey), RawRange: raw, } } func fromJSONRawRange(rng *jsonRawRange) (*backuppb.RawRange, error) { r := rng.RawRange var err error r.StartKey, err = hex.DecodeString(rng.StartKey) if err != nil { return nil, errors.Trace(err) } r.EndKey, err = hex.DecodeString(rng.EndKey) if err != nil { return nil, errors.Trace(err) } return r, nil } type jsonSchema struct { Table jsonValue `json:"table,omitempty"` DB jsonValue `json:"db,omitempty"` *backuppb.Schema } func makeJSONSchema(schema *backuppb.Schema) (*jsonSchema, error) { result := &jsonSchema{Schema: schema} if err := json.Unmarshal(schema.Db, &result.DB); err != nil { return nil, errors.Trace(err) } if schema.Table != nil { if err := json.Unmarshal(schema.Table, &result.Table); err != nil { return nil, errors.Trace(err) } } return result, nil } func fromJSONSchema(jSchema *jsonSchema) (*backuppb.Schema, error) { schema := jSchema.Schema if schema == nil { schema = &backuppb.Schema{} } var err error schema.Db, err = json.Marshal(jSchema.DB) if err != nil { return nil, errors.Trace(err) } if jSchema.Table != nil { schema.Table, err = json.Marshal(jSchema.Table) if err != nil { return nil, errors.Trace(err) } } return schema, nil } type jsonBackupMeta struct { Files []*jsonFile `json:"files,omitempty"` RawRanges []*jsonRawRange `json:"raw_ranges,omitempty"` Schemas []*jsonSchema `json:"schemas,omitempty"` DDLs jsonValue `json:"ddls,omitempty"` *backuppb.BackupMeta } func makeJSONBackupMeta(meta *backuppb.BackupMeta) (*jsonBackupMeta, error) { result := &jsonBackupMeta{ BackupMeta: meta, } for _, file := range meta.Files { result.Files = append(result.Files, makeJSONFile(file)) } for _, rawRange := range meta.RawRanges { result.RawRanges = append(result.RawRanges, makeJSONRawRange(rawRange)) } for _, schema := range meta.Schemas { s, err := makeJSONSchema(schema) if err != nil { return nil, err } result.Schemas = append(result.Schemas, s) } if err := json.Unmarshal(meta.Ddls, &result.DDLs); err != nil { return nil, errors.Trace(err) } return result, nil } func fromJSONBackupMeta(jMeta *jsonBackupMeta) (*backuppb.BackupMeta, error) { meta := jMeta.BackupMeta for _, schema := range jMeta.Schemas { s, err := fromJSONSchema(schema) if err != nil { return nil, err } meta.Schemas = append(meta.Schemas, s) } for _, file := range jMeta.Files { f, err := fromJSONFile(file) if err != nil { return nil, err } meta.Files = append(meta.Files, f) } for _, rawRange := range jMeta.RawRanges { rng, err := fromJSONRawRange(rawRange) if err != nil { return nil, err } meta.RawRanges = append(meta.RawRanges, rng) } var err error meta.Ddls, err = json.Marshal(jMeta.DDLs) if err != nil { return nil, errors.Trace(err) } return meta, nil }
br/pkg/utils/json.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0012556860456243157, 0.00030715696630068123, 0.00016547454288229346, 0.00018323989934287965, 0.0002554325619712472 ]
{ "id": 8, "code_window": [ "\ts.statsFileIndexes = append(s.statsFileIndexes, &backuppb.StatsFileIndex{\n", "\t\tName: fileName,\n", "\t\tSha256: checksum[:],\n", "\t\tSizeEnc: uint64(len(encryptedContent)),\n", "\t\tSizeOri: uint64(len(content)),\n", "\t\tCipherIv: iv,\n", "\t})\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\t\tSizeOri: sizeOri,\n" ], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 108 }
set tidb_enable_global_index=true; drop table if exists test_global; create table test_global ( a int, b int, c int, unique key p_b(b)) partition by range( a ) ( partition p1 values less than (10), partition p2 values less than (20) ); insert into test_global values (1,2,2); insert into test_global values (11,2,2); Error 1062 (23000): Duplicate entry '2' for key 'test_global.p_b' insert into test_global values (11,2,2); Error 1062 (23000): Duplicate entry '2' for key 'test_global.p_b' # NULL will not get 'duplicate key' error here insert into test_global(a,c) values (1,2); insert into test_global(a,c) values (11,2); drop table if exists test_global; create table test_global ( a int, b int, c int, primary key p_b(b) /*T![clustered_index] CLUSTERED */) partition by range( a ) ( partition p1 values less than (10), partition p2 values less than (20) ); Error 1503 (HY000): A CLUSTERED INDEX must include all columns in the table's partitioning function drop table if exists test_global; create table test_global ( a int, b int, c int, primary key p_b_c(b, c) /*T![clustered_index] CLUSTERED */) partition by range( a ) ( partition p1 values less than (10), partition p2 values less than (20) ); Error 1503 (HY000): A CLUSTERED INDEX must include all columns in the table's partitioning function drop table if exists test_global; create table test_global ( a int, b int, c int, primary key (b) /*T![clustered_index] NONCLUSTERED */) partition by range( a ) ( partition p1 values less than (10), partition p2 values less than (20) ); insert into test_global values (1,2,2); insert into test_global values (11,2,2); Error 1062 (23000): Duplicate entry '2' for key 'test_global.PRIMARY' insert into test_global values (11,2,2); Error 1062 (23000): Duplicate entry '2' for key 'test_global.PRIMARY' drop table if exists test_global; create table test_global ( a int, b int, c int) partition by range( a ) ( partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30) ); alter table test_global add unique index idx_b (b); insert into test_global values (1, 1, 1), (8, 8, 8), (11, 11, 11), (12, 12, 12); update test_global set a = 2 where a = 11; update test_global set a = 13 where a = 12; analyze table test_global; select * from test_global use index(idx_b) order by a; a b c 1 1 1 2 11 11 8 8 8 13 12 12
tests/integrationtest/r/ddl/global_index.result
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001787155051715672, 0.00016833543486427516, 0.00016390811651945114, 0.0001656486710999161, 0.000005182841505302349 ]
{ "id": 9, "code_window": [ "\t\tCipherIv: iv,\n", "\t})\n", "\n", "\ts.clearTemporary()\n", "\treturn nil\n", "}\n", "\n", "func (s *StatsWriter) BackupStats(ctx context.Context, jsonTable *statsutil.JSONTable, physicalID int64) error {\n" ], "labels": [ "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 111 }
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test") go_library( name = "br_lib", srcs = [ "backup.go", "cmd.go", "debug.go", "main.go", "operator.go", "restore.go", "stream.go", ], importpath = "github.com/pingcap/tidb/br/cmd/br", visibility = ["//visibility:private"], deps = [ "//br/pkg/conn", "//br/pkg/errors", "//br/pkg/gluetidb", "//br/pkg/gluetikv", "//br/pkg/logutil", "//br/pkg/metautil", "//br/pkg/mock/mockid", "//br/pkg/redact", "//br/pkg/restore", "//br/pkg/rtree", "//br/pkg/streamhelper/config", "//br/pkg/summary", "//br/pkg/task", "//br/pkg/task/operator", "//br/pkg/trace", "//br/pkg/utils", "//br/pkg/version/build", "//pkg/config", "//pkg/parser/model", "//pkg/session", "//pkg/util", "//pkg/util/logutil", "//pkg/util/memory", "//pkg/util/metricsutil", "@com_github_gogo_protobuf//proto", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/import_sstpb", "@com_github_pingcap_log//:log", "@com_github_spf13_cobra//:cobra", "@com_sourcegraph_sourcegraph_appdash//:appdash", "@org_uber_go_zap//:zap", ], ) go_binary( name = "br", embed = [":br_lib"], visibility = ["//visibility:public"], ) go_test( name = "br_test", timeout = "short", srcs = ["main_test.go"], embed = [":br_lib"], flaky = True, )
br/cmd/br/BUILD.bazel
1
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00027463946025818586, 0.0001845502556534484, 0.00016648108430672437, 0.00017022571410052478, 0.00003685706542455591 ]
{ "id": 9, "code_window": [ "\t\tCipherIv: iv,\n", "\t})\n", "\n", "\ts.clearTemporary()\n", "\treturn nil\n", "}\n", "\n", "func (s *StatsWriter) BackupStats(ctx context.Context, jsonTable *statsutil.JSONTable, physicalID int64) error {\n" ], "labels": [ "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 111 }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package worker import ( "context" "time" "github.com/pingcap/tidb/br/pkg/lightning/metric" ) // Pool is the worker pool. type Pool struct { limit int workers chan *Worker name string metrics *metric.Metrics } // Worker is the worker struct. type Worker struct { ID int64 } // NewPool creates a new worker pool. func NewPool(ctx context.Context, limit int, name string) *Pool { workers := make(chan *Worker, limit) for i := 0; i < limit; i++ { workers <- &Worker{ID: int64(i + 1)} } metrics, ok := metric.FromContext(ctx) if ok { metrics.IdleWorkersGauge.WithLabelValues(name).Set(float64(limit)) } return &Pool{ limit: limit, workers: workers, name: name, metrics: metrics, } } // Apply gets a worker from the pool. func (pool *Pool) Apply() *Worker { start := time.Now() worker := <-pool.workers if pool.metrics != nil { pool.metrics.IdleWorkersGauge.WithLabelValues(pool.name).Set(float64(len(pool.workers))) pool.metrics.ApplyWorkerSecondsHistogram.WithLabelValues(pool.name).Observe(time.Since(start).Seconds()) } return worker } // Recycle puts a worker back to the pool. func (pool *Pool) Recycle(worker *Worker) { if worker == nil { panic("invalid restore worker") } pool.workers <- worker if pool.metrics != nil { pool.metrics.IdleWorkersGauge.WithLabelValues(pool.name).Set(float64(len(pool.workers))) } } // HasWorker returns whether the pool has worker. func (pool *Pool) HasWorker() bool { return len(pool.workers) > 0 }
br/pkg/lightning/worker/worker.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0004896328318864107, 0.0002105024759657681, 0.00016777074779383838, 0.00017466070130467415, 0.00009892616799334064 ]
{ "id": 9, "code_window": [ "\t\tCipherIv: iv,\n", "\t})\n", "\n", "\ts.clearTemporary()\n", "\treturn nil\n", "}\n", "\n", "func (s *StatsWriter) BackupStats(ctx context.Context, jsonTable *statsutil.JSONTable, physicalID int64) error {\n" ], "labels": [ "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 111 }
[lightning] task-info-schema-name = 'lightning_task_info' [tikv-importer] backend = 'local' duplicate-resolution = 'replace' add-index-by-sql = false [checkpoint] enable = false [mydumper] batch-size = 1 # ensure each file is its own engine to facilitate cross-engine detection. [mydumper.csv] header = true
br/tests/lightning_duplicate_resolution_replace_multiple_unique_keys_nonclustered_pk/config.toml
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.0001716890255920589, 0.00017164394375868142, 0.00017159886192530394, 0.00017164394375868142, 4.508183337748051e-8 ]
{ "id": 9, "code_window": [ "\t\tCipherIv: iv,\n", "\t})\n", "\n", "\ts.clearTemporary()\n", "\treturn nil\n", "}\n", "\n", "func (s *StatsWriter) BackupStats(ctx context.Context, jsonTable *statsutil.JSONTable, physicalID int64) error {\n" ], "labels": [ "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "br/pkg/metautil/statsfile.go", "type": "replace", "edit_start_line_idx": 111 }
// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "testing" "github.com/pingcap/tidb/pkg/testkit/testsetup" "go.uber.org/goleak" ) func TestMain(m *testing.M) { testsetup.SetupForCommonTest() opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), goleak.IgnoreTopFunction("github.com/bazelbuild/rules_go/go/tools/bzltestutil.RegisterTimeoutHandler.func1"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), } goleak.VerifyTestMain(m, opts...) }
pkg/util/sys/storage/main_test.go
0
https://github.com/pingcap/tidb/commit/8e7658d8db809fe17c8affd5b6787cd03871dff4
[ 0.00017576450773049146, 0.00017248251242563128, 0.0001699088461464271, 0.0001721283479128033, 0.0000022588667434320087 ]