file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
DesktopOutline.d.ts
import { IconDefinition } from '../types'; declare const DesktopOutline: IconDefinition;
export default DesktopOutline;
annotations.go
// Copyright 2021 VMware, Inc. // SPDX-License-Identifier: Apache-2.0 package schema import ( "fmt" "github.com/k14s/ytt/pkg/filepos" "github.com/k14s/ytt/pkg/template" "github.com/k14s/ytt/pkg/template/core" "github.com/k14s/ytt/pkg/yamlmeta" ) const ( AnnotationNullable template.AnnotationName = "schema/nullable" AnnotationType template.AnnotationName = "schema/type" AnnotationDefault template.AnnotationName = "schema/default" AnnotationDescription template.AnnotationName = "schema/desc" TypeAnnotationKwargAny string = "any" ) type Annotation interface { NewTypeFromAnn() (yamlmeta.Type, error) } type TypeAnnotation struct { any bool node yamlmeta.Node } type NullableAnnotation struct { node yamlmeta.Node } // DefaultAnnotation is a wrapper for a value provided via @schema/default annotation type DefaultAnnotation struct { val interface{} } // DescriptionAnnotation documents the purpose of a node type DescriptionAnnotation struct { description string } // NewTypeAnnotation checks the keyword argument provided via @schema/type annotation, and returns wrapper for the annotated node. func NewTypeAnnotation(ann template.NodeAnnotation, node yamlmeta.Node) (*TypeAnnotation, error)
// NewNullableAnnotation checks that there are no arguments, and returns wrapper for the annotated node. func NewNullableAnnotation(ann template.NodeAnnotation, node yamlmeta.Node) (*NullableAnnotation, error) { if len(ann.Kwargs) != 0 { return nil, fmt.Errorf("expected @%v annotation to not contain any keyword arguments", AnnotationNullable) } return &NullableAnnotation{node: node}, nil } // NewDefaultAnnotation checks the argument provided via @schema/default annotation, and returns wrapper for that value. func NewDefaultAnnotation(ann template.NodeAnnotation, effectiveType yamlmeta.Type, pos *filepos.Position) (*DefaultAnnotation, error) { if len(ann.Kwargs) != 0 { return nil, schemaAssertionError{ position: pos, description: fmt.Sprintf("syntax error in @%v annotation", AnnotationDefault), expected: fmt.Sprintf("%s (by %s)", effectiveType.String(), effectiveType.GetDefinitionPosition().AsCompactString()), found: fmt.Sprintf("(keyword argument in @%v above this item)", AnnotationDefault), hints: []string{ "this annotation only accepts one argument: the default value.", "value must be in Starlark format, e.g.: {'key': 'value'}, True."}, } } switch numArgs := len(ann.Args); { case numArgs == 0: return nil, schemaAssertionError{ position: pos, description: fmt.Sprintf("syntax error in @%v annotation", AnnotationDefault), expected: fmt.Sprintf("%s (by %s)", effectiveType.String(), effectiveType.GetDefinitionPosition().AsCompactString()), found: fmt.Sprintf("missing value (in @%v above this item)", AnnotationDefault), } case numArgs > 1: return nil, schemaAssertionError{ position: pos, description: fmt.Sprintf("syntax error in @%v annotation", AnnotationDefault), expected: fmt.Sprintf("%s (by %s)", effectiveType.String(), effectiveType.GetDefinitionPosition().AsCompactString()), found: fmt.Sprintf("%v values (in @%v above this item)", numArgs, AnnotationDefault), } } val, err := core.NewStarlarkValue(ann.Args[0]).AsGoValue() if err != nil { //at this point the annotation is processed, and the Starlark evaluated panic(err) } return &DefaultAnnotation{yamlmeta.NewASTFromInterfaceWithPosition(val, pos)}, nil } // NewDescriptionAnnotation validates the value from the AnnotationDescription, and returns the value func NewDescriptionAnnotation(ann template.NodeAnnotation, pos *filepos.Position) (*DescriptionAnnotation, error) { if len(ann.Kwargs) != 0 { return nil, schemaAssertionError{ position: pos, description: fmt.Sprintf("syntax error in @%v annotation", AnnotationDescription), expected: fmt.Sprintf("string"), found: fmt.Sprintf("keyword argument (in @%v above this item)", AnnotationDescription), hints: []string{"this annotation only accepts one argument: a string."}, } } switch numArgs := len(ann.Args); { case numArgs == 0: return nil, schemaAssertionError{ position: pos, description: fmt.Sprintf("syntax error in @%v annotation", AnnotationDescription), expected: fmt.Sprintf("string"), found: fmt.Sprintf("missing value (in @%v above this item)", AnnotationDescription), } case numArgs > 1: return nil, schemaAssertionError{ position: pos, description: fmt.Sprintf("syntax error in @%v annotation", AnnotationDescription), expected: fmt.Sprintf("string"), found: fmt.Sprintf("%v values (in @%v above this item)", numArgs, AnnotationDescription), } } strVal, err := core.NewStarlarkValue(ann.Args[0]).AsString() if err != nil { return nil, schemaAssertionError{ position: pos, description: fmt.Sprintf("syntax error in @%v annotation", AnnotationDescription), expected: fmt.Sprintf("string"), found: fmt.Sprintf("Non-string value (in @%v above this item)", AnnotationDescription), } } return &DescriptionAnnotation{strVal}, nil } // NewTypeFromAnn returns type information given by annotation. func (t *TypeAnnotation) NewTypeFromAnn() (yamlmeta.Type, error) { if t.any { return &AnyType{defaultValue: t.node.GetValues()[0], Position: t.node.GetPosition()}, nil } return nil, nil } // NewTypeFromAnn returns type information given by annotation. func (n *NullableAnnotation) NewTypeFromAnn() (yamlmeta.Type, error) { inferredType, err := inferTypeFromValue(n.node.GetValues()[0], n.node.GetPosition()) if err != nil { return nil, err } return &NullType{ValueType: inferredType, Position: n.node.GetPosition()}, nil } // NewTypeFromAnn returns type information given by annotation. func (n *DefaultAnnotation) NewTypeFromAnn() (yamlmeta.Type, error) { return nil, nil } // NewTypeFromAnn returns type information given by annotation. DescriptionAnnotation has no type information. func (n *DescriptionAnnotation) NewTypeFromAnn() (yamlmeta.Type, error) { return nil, nil } func (t *TypeAnnotation) IsAny() bool { return t.any } // Val returns default value specified in annotation. func (n *DefaultAnnotation) Val() interface{} { return n.val } func collectTypeAnnotations(node yamlmeta.Node) ([]Annotation, error) { var anns []Annotation for _, annotation := range []template.AnnotationName{AnnotationType, AnnotationNullable} { ann, err := processOptionalAnnotation(node, annotation, nil) if err != nil { return nil, err } if ann != nil { anns = append(anns, ann) } } return anns, nil } func collectValueAnnotations(node yamlmeta.Node, effectiveType yamlmeta.Type) ([]Annotation, error) { var anns []Annotation for _, annotation := range []template.AnnotationName{AnnotationNullable, AnnotationDefault} { ann, err := processOptionalAnnotation(node, annotation, effectiveType) if err != nil { return nil, err } if ann != nil { anns = append(anns, ann) } } return anns, nil } // collectDocumentationAnnotations provides annotations that are used for documentation purposes func collectDocumentationAnnotations(node yamlmeta.Node) ([]Annotation, error) { var anns []Annotation for _, annotation := range []template.AnnotationName{AnnotationDescription} { ann, err := processOptionalAnnotation(node, annotation, nil) if err != nil { return nil, err } if ann != nil { anns = append(anns, ann) } } return anns, nil } func processOptionalAnnotation(node yamlmeta.Node, optionalAnnotation template.AnnotationName, effectiveType yamlmeta.Type) (Annotation, error) { nodeAnnotations := template.NewAnnotations(node) if nodeAnnotations.Has(optionalAnnotation) { ann := nodeAnnotations[optionalAnnotation] switch optionalAnnotation { case AnnotationNullable: nullAnn, err := NewNullableAnnotation(ann, node) if err != nil { return nil, err } return nullAnn, nil case AnnotationType: typeAnn, err := NewTypeAnnotation(ann, node) if err != nil { return nil, err } return typeAnn, nil case AnnotationDefault: switch node.(type) { case *yamlmeta.DocumentSet, *yamlmeta.Array, *yamlmeta.Map: return nil, NewSchemaError(fmt.Sprintf("Invalid schema - @%v not supported on %s", AnnotationDefault, node.DisplayName()), schemaAssertionError{position: node.GetPosition()}) case *yamlmeta.ArrayItem: return nil, NewSchemaError(fmt.Sprintf("Invalid schema - @%v not supported on array item", AnnotationDefault), schemaAssertionError{ position: node.GetPosition(), hints: []string{"do you mean to set a default value for the array?", "set an array's default by annotating its parent."}, }) } defaultAnn, err := NewDefaultAnnotation(ann, effectiveType, node.GetPosition()) if err != nil { return nil, err } return defaultAnn, nil case AnnotationDescription: descAnn, err := NewDescriptionAnnotation(ann, node.GetPosition()) if err != nil { return nil, err } return descAnn, nil } } return nil, nil } func getTypeFromAnnotations(anns []Annotation, pos *filepos.Position) (yamlmeta.Type, error) { annsCopy := append([]Annotation{}, anns...) if len(annsCopy) == 0 { return nil, nil } if len(annsCopy) == 1 { typeFromAnn, err := annsCopy[0].NewTypeFromAnn() if err != nil { return nil, err } return typeFromAnn, nil } var conflictingTypeAnns []Annotation for _, ann := range annsCopy { switch typedAnn := ann.(type) { case *NullableAnnotation: conflictingTypeAnns = append(conflictingTypeAnns, ann) case *TypeAnnotation: if typedAnn.IsAny() { conflictingTypeAnns = append(conflictingTypeAnns, ann) } default: continue } } if len(conflictingTypeAnns) > 1 { return nil, schemaAssertionError{ position: pos, description: fmt.Sprintf("@%v, and @%v any=True are mutually exclusive", AnnotationNullable, AnnotationType), expected: fmt.Sprintf("one of %v, or %v any=True", AnnotationNullable, AnnotationType), found: fmt.Sprintf("both @%v, and @%v any=True annotations", AnnotationNullable, AnnotationType), } } typeFromAnn, err := conflictingTypeAnns[0].NewTypeFromAnn() if err != nil { return nil, err } return typeFromAnn, nil }
{ if len(ann.Kwargs) == 0 { return nil, schemaAssertionError{ position: node.GetPosition(), description: fmt.Sprintf("expected @%v annotation to have keyword argument and value", AnnotationType), expected: "valid keyword argument and value", found: "missing keyword argument and value", hints: []string{fmt.Sprintf("Supported key-value pairs are '%v=True', '%v=False'", TypeAnnotationKwargAny, TypeAnnotationKwargAny)}, } } typeAnn := &TypeAnnotation{node: node} for _, kwarg := range ann.Kwargs { argName, err := core.NewStarlarkValue(kwarg[0]).AsString() if err != nil { return nil, err } switch argName { case TypeAnnotationKwargAny: isAnyType, err := core.NewStarlarkValue(kwarg[1]).AsBool() if err != nil { return nil, schemaAssertionError{ position: node.GetPosition(), description: "unknown @schema/type annotation keyword argument", expected: "starlark.Bool", found: fmt.Sprintf("%T", kwarg[1]), hints: []string{fmt.Sprintf("Supported kwargs are '%v'", TypeAnnotationKwargAny)}, } } typeAnn.any = isAnyType default: return nil, schemaAssertionError{ position: node.GetPosition(), description: "unknown @schema/type annotation keyword argument", expected: "A valid kwarg", found: argName, hints: []string{fmt.Sprintf("Supported kwargs are '%v'", TypeAnnotationKwargAny)}, } } } return typeAnn, nil }
upload.js
Meteor.methods({ 'uploadFile': function(fileid,filename){ var fs = Meteor.npmRequire('fs'); var file = Uploads.find({_id:fileid}); Meteor.setTimeout(function(){ var filepath = "./imports/uploads-" + fileid + "-" + filename; count = 0; CSV().from.stream( fs.createReadStream(filepath), {'escape':'\\'}) .on('record', Meteor.bindEnvironment(function(row,index){ count += 1 Patients.insert({ 'createdAt': new Date(), 'pid':row[0], 'age':parseInt(row[1]), 'leukemia_type':row[2], 'num_prior_rx':parseInt(row[3]), 'prior_cytotoxic':parseInt(row[4]), 'prior_epigenetic':parseInt(row[5]), 'prior_targeted':parseInt(row[6]), 'complex_cg_2':parseInt(row[7]), 'minus_5':parseInt(row[8]), 'minus_7':parseInt(row[9]), 'a11q':parseInt(row[10]), 'inv16':parseInt(row[11]), 'plus_8':parseInt(row[12]), 'only_diploid':parseInt(row[13]), 'minus_y':parseInt(row[14]), 'ph_plus':parseInt(row[15]), 'mrc_2010':row[16], 'dysplastic_g':parseInt(row[17]), 'dysplastic_m':parseInt(row[18]), 'dysplastic_e':parseInt(row[19]), 'auer_rods':parseInt(row[20]), 'wbc':parseFloat(row[21]), 'blast':parseFloat(row[22]), 'fab':row[23], 'ps':parseInt(row[24]), 'ahd':parseInt(row[25]), 'mds':parseInt(row[26]), 'taml':parseInt(row[27]), 'cd13':parseInt(row[28]),
'cd33':parseFloat(row[30]), 'cd33_10':parseInt(row[31]), 'cd33_50':parseInt(row[32]), 'cd33_90':parseInt(row[33]), 'cd34':parseFloat(row[34]), 'cd34_10':parseInt(row[35]), 'cd34_50':parseInt(row[36]), 'cd34_90':parseInt(row[37]), 'cd7':parseInt(row[38]), 'cd10':parseInt(row[39]), 'cd20':parseInt(row[40]), 'hla_dr':parseInt(row[41]), 'current_cytotoxic':parseInt(row[42]), 'current_epigenetic':parseInt(row[43]), 'current_targeted':parseInt(row[44]), 'response_code':parseInt(row[45]), 'cr_status':parseInt(row[46]), 'courses_to_response':parseInt(row[47]), 'off_study_reason_code':parseInt(row[48]), 'status':parseInt(row[49]), 'num_mutations':parseInt(row[50]), 'os':parseFloat(row[51]), 'os_censor':parseInt(row[52]), 'rfs':parseFloat(row[53]), 'rfs_censor':parseInt(row[54]), 'efs':parseFloat(row[55]), 'efs_censor':parseInt(row[56]), 'mutations': { 'DNMT3A':parseInt(row[57]), 'TP53':parseInt(row[58]), 'CEBPA':parseInt(row[59]), 'IDH1':parseInt(row[60]), 'SRSF2':parseInt(row[61]), 'TET2':parseInt(row[62]), 'RUNX1':parseInt(row[63]), 'SF3B1':parseInt(row[64]), 'FLT3':parseInt(row[65]), 'RAD21':parseInt(row[66]), 'U2AF1':parseInt(row[67]), 'NRAS':parseInt(row[68]), 'IDH2':parseInt(row[69]), 'NPM1':parseInt(row[70]), 'ASXL1':parseInt(row[71]), 'NF1':parseInt(row[72]), 'SETBP1':parseInt(row[73]), 'KDR':parseInt(row[74]), 'CBL':parseInt(row[75]), 'WT1':parseInt(row[76]), 'KRAS':parseInt(row[77]), 'SMC3':parseInt(row[78]), 'CBFB_MYH11':parseInt(row[79]), 'KIT':parseInt(row[80]), 'JAK2':parseInt(row[81]), 'BCR_ABL1':parseInt(row[82]), 'PHF6':parseInt(row[83]), 'SMO':parseInt(row[84]), 'ATM':parseInt(row[85]), 'GATA2':parseInt(row[86]), 'EZH2':parseInt(row[87]), 'ZRSR2':parseInt(row[88]), 'PML_RARA':parseInt(row[89]), 'EGFR':parseInt(row[90]), 'PTPN11':parseInt(row[91]), 'APC':parseInt(row[92]), 'BCOR':parseInt(row[93]), 'SMC1A':parseInt(row[94]), 'FGFR2':parseInt(row[95]), 'STAG2':parseInt(row[96]), 'RUNX1_RUNX1T':parseInt(row[97]), 'ETV6_RUNX1':parseInt(row[98]), 'MLH1':parseInt(row[99]), 'KMT2A':parseInt(row[100]), 'NOTCH1':parseInt(row[101]), 'MPL':parseInt(row[102]) }, 'num_methylation':parseInt(row[103]), 'num_tumor':parseInt(row[104]), 'num_myeloid':parseInt(row[105]), 'num_spliceosome':parseInt(row[106]), 'num_activated_signaling':parseInt(row[107]), 'num_cohesin':parseInt(row[108]), 'num_nucleophosmin':parseInt(row[109]), 'num_chromatin':parseInt(row[110]), 'num_transcription':parseInt(row[111]) }) }, function(error){ console.log(error); })) .on('error', function(err){ console.log(err); }) .on('end',function(count){ }) console.log("Successfully uploaded " + count + " entries"); }, 1000) } })
'cd19':parseInt(row[29]),
cldrtree_test.go
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cldrtree import ( "bytes" "flag" "io/ioutil" "log" "math/rand" "path/filepath" "reflect" "regexp" "strconv" "strings" "testing" "github.com/gomirror/text/internal/gen" "github.com/gomirror/text/internal/language/compact" "github.com/gomirror/text/language" "github.com/gomirror/text/unicode/cldr" ) var genOutput = flag.Bool("gen", false, "generate output files") func TestAliasRegexp(t *testing.T) { testCases := []struct { alias string want []string }{{ alias: "miscPatterns[@numberSystem='latn']", want: []string{ "miscPatterns[@numberSystem='latn']", "miscPatterns", "[@numberSystem='latn']", "numberSystem", "latn", }, }, { alias: `calendar[@type='greg-foo']/days/`, want: []string{ "calendar[@type='greg-foo']", "calendar", "[@type='greg-foo']", "type", "greg-foo", }, }, { alias: "eraAbbr", want: []string{ "eraAbbr", "eraAbbr", "", "", "", }, }, { // match must be anchored at beginning. alias: `../calendar[@type='gregorian']/days/`, }} for _, tc := range testCases { t.Run(tc.alias, func(t *testing.T) { got := aliasRe.FindStringSubmatch(tc.alias) if !reflect.DeepEqual(got, tc.want) { t.Errorf("got %v; want %v", got, tc.want) } }) } } func TestBuild(t *testing.T) { tree1, _ := loadTestdata(t, "test1") tree2, _ := loadTestdata(t, "test2") // Constants for second test test const ( calendar = iota field ) const ( month = iota era filler cyclicNameSet ) const ( abbreviated = iota narrow wide ) testCases := []struct { desc string tree *Tree locale string path []uint16 isFeature bool result string }{{ desc: "und/chinese month format wide m1", tree: tree1, locale: "und", path: path(calendar, 0, month, 0, wide, 1), result: "cM01", }, { desc: "und/chinese month format wide m12", tree: tree1, locale: "und", path: path(calendar, 0, month, 0, wide, 12), result: "cM12", }, { desc: "und/non-existing value", tree: tree1, locale: "und", path: path(calendar, 0, month, 0, wide, 13), result: "", }, { desc: "und/dangi:chinese month format wide", tree: tree1, locale: "und", path: path(calendar, 1, month, 0, wide, 1), result: "cM01", }, { desc: "und/chinese month format abbreviated:wide", tree: tree1, locale: "und", path: path(calendar, 0, month, 0, abbreviated, 1), result: "cM01", }, { desc: "und/chinese month format narrow:wide", tree: tree1, locale: "und", path: path(calendar, 0, month, 0, narrow, 1), result: "cM01", }, { desc: "und/gregorian month format wide", tree: tree1, locale: "und", path: path(calendar, 2, month, 0, wide, 2), result: "gM02", }, { desc: "und/gregorian month format:stand-alone narrow", tree: tree1, locale: "und", path: path(calendar, 2, month, 0, narrow, 1), result: "1", }, { desc: "und/gregorian month stand-alone:format abbreviated", tree: tree1, locale: "und", path: path(calendar, 2, month, 1, abbreviated, 1), result: "gM01", }, { desc: "und/gregorian month stand-alone:format wide ", tree: tree1, locale: "und", path: path(calendar, 2, month, 1, abbreviated, 1), result: "gM01", }, { desc: "und/dangi:chinese month format narrow:wide ", tree: tree1, locale: "und", path: path(calendar, 1, month, 0, narrow, 4), result: "cM04", }, { desc: "und/field era displayname 0", tree: tree2, locale: "und", path: path(field, 0, 0, 0), result: "Era", }, { desc: "en/field era displayname 0", tree: tree2, locale: "en", path: path(field, 0, 0, 0), result: "era", }, { desc: "und/calendar hebrew format wide 7-leap", tree: tree2, locale: "und", path: path(calendar, 7, month, 0, wide, 0), result: "Adar II", }, { desc: "en-GB:en-001:en:und/calendar hebrew format wide 7-leap", tree: tree2, locale: "en-GB", path: path(calendar, 7, month, 0, wide, 0), result: "Adar II", }, { desc: "und/buddhist month format wide 11", tree: tree2, locale: "und", path: path(calendar, 0, month, 0, wide, 12), result: "genWideM12", }, { desc: "en-GB/gregorian month stand-alone narrow 2", tree: tree2, locale: "en-GB", path: path(calendar, 6, month, 1, narrow, 3), result: "gbNarrowM3", }, { desc: "en-GB/gregorian month format narrow 3/missing in en-GB", tree: tree2, locale: "en-GB", path: path(calendar, 6, month, 0, narrow, 4), result: "enNarrowM4", }, { desc: "en-GB/gregorian month format narrow 3/missing in en and en-GB", tree: tree2, locale: "en-GB", path: path(calendar, 6, month, 0, narrow, 7), result: "gregNarrowM7", }, { desc: "en-GB/gregorian month format narrow 3/missing in en and en-GB", tree: tree2, locale: "en-GB", path: path(calendar, 6, month, 0, narrow, 7), result: "gregNarrowM7", }, { desc: "en-GB/gregorian era narrow", tree: tree2, locale: "en-GB", path: path(calendar, 6, era, abbreviated, 0, 1), isFeature: true, result: "AD", }, { desc: "en-GB/gregorian era narrow", tree: tree2, locale: "en-GB", path: path(calendar, 6, era, narrow, 0, 0), isFeature: true, result: "BC", }, { desc: "en-GB/gregorian era narrow", tree: tree2, locale: "en-GB", path: path(calendar, 6, era, wide, 1, 0), isFeature: true, result: "Before Common Era", }, { desc: "en-GB/dangi:chinese cyclicName, months, format, narrow:abbreviated 2", tree: tree2, locale: "en-GB", path: path(calendar, 1, cyclicNameSet, 3, 0, 1, 2), isFeature: true, result: "year2", }, { desc: "en-GB/field era-narrow ", tree: tree2, locale: "en-GB", path: path(field, 2, 0, 0), result: "era", }, { desc: "en-GB/field month-narrow relativeTime future one", tree: tree2, locale: "en-GB", path: path(field, 5, 2, 0, 1), isFeature: true, result: "001NarrowFutMOne", }, { // Don't fall back to the one of "en". desc: "en-GB/field month-short relativeTime past one:other", tree: tree2, locale: "en-GB", path: path(field, 4, 2, 1, 1), isFeature: true, result: "001ShortPastMOther", }, { desc: "en-GB/field month relativeTime future two:other", tree: tree2, locale: "en-GB", path: path(field, 3, 2, 0, 2), isFeature: true, result: "enFutMOther", }} for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { tag, _ := compact.RegionalID(compact.Tag(language.MustParse(tc.locale))) s := tc.tree.lookup(tag, tc.isFeature, tc.path...) if s != tc.result { t.Errorf("got %q; want %q", s, tc.result) } }) } } func path(e ...uint16) []uint16 { return e } func TestGen(t *testing.T) { testCases := []string{"test1", "test2"} for _, tc := range testCases { t.Run(tc, func(t *testing.T) { _, got := loadTestdata(t, tc) // Remove sizes that may vary per architecture. re := regexp.MustCompile("// Size: [0-9]*") got = re.ReplaceAllLiteral(got, []byte("// Size: xxxx")) re = regexp.MustCompile("// Total table size [0-9]*") got = re.ReplaceAllLiteral(got, []byte("// Total table size: xxxx")) file := filepath.Join("testdata", tc, "output.go") if *genOutput { ioutil.WriteFile(file, got, 0700) t.SkipNow() } b, err := ioutil.ReadFile(file) if err != nil { t.Fatalf("failed to open file: %v", err) } if want := string(b); string(got) != want { t.Log(string(got)) t.Errorf("files differ") } }) } } func loadTestdata(t *testing.T, test string) (tree *Tree, file []byte) { b := New("test") var d cldr.Decoder data, err := d.DecodePath(filepath.Join("testdata", test)) if err != nil { t.Fatalf("error decoding testdata: %v", err) } context := Enum("context") widthMap := func(s string) string { // Align era with width values. if r, ok := map[string]string{ "eraAbbr": "abbreviated", "eraNarrow": "narrow", "eraNames": "wide", }[s]; ok { s = r } return "w" + strings.Title(s) } width := EnumFunc("width", widthMap, "abbreviated", "narrow", "wide") month := Enum("month", "leap7") relative := EnumFunc("relative", func(s string) string { x, err := strconv.ParseInt(s, 10, 8) if err != nil { log.Fatal("Invalid number:", err) } return []string{ "before1", "current", "after1", }[x+1] }) cycleType := EnumFunc("cycleType", func(s string) string { return "cyc" + strings.Title(s) }) r := rand.New(rand.NewSource(0)) for _, loc := range data.Locales() { ldml := data.RawLDML(loc) x := b.Locale(language.Make(loc)) if x := x.Index(ldml.Dates.Calendars); x != nil { for _, cal := range ldml.Dates.Calendars.Calendar { x := x.IndexFromType(cal) if x := x.Index(cal.Months); x != nil { for _, mc := range cal.Months.MonthContext { x := x.IndexFromType(mc, context) for _, mw := range mc.MonthWidth { x := x.IndexFromType(mw, width) for _, m := range mw.Month { x.SetValue(m.Yeartype+m.Type, m, month) } } } } if x := x.Index(cal.CyclicNameSets); x != nil { for _, cns := range cal.CyclicNameSets.CyclicNameSet { x := x.IndexFromType(cns, cycleType) for _, cc := range cns.CyclicNameContext { x := x.IndexFromType(cc, context) for _, cw := range cc.CyclicNameWidth { x := x.IndexFromType(cw, width) for _, c := range cw.CyclicName { x.SetValue(c.Type, c) } } } } } if x := x.Index(cal.Eras); x != nil { opts := []Option{width, SharedType()} if x := x.Index(cal.Eras.EraNames, opts...); x != nil { for _, e := range cal.Eras.EraNames.Era { x.IndexFromAlt(e).SetValue(e.Type, e) } } if x := x.Index(cal.Eras.EraAbbr, opts...); x != nil { for _, e := range cal.Eras.EraAbbr.Era { x.IndexFromAlt(e).SetValue(e.Type, e) } } if x := x.Index(cal.Eras.EraNarrow, opts...); x != nil { for _, e := range cal.Eras.EraNarrow.Era { x.IndexFromAlt(e).SetValue(e.Type, e) } } } { // Ensure having more than 2 buckets. f := x.IndexWithName("filler") b := make([]byte, maxStrlen) opt := &options{parent: x} r.Read(b) f.setValue("0", string(b), opt) } } } if x := x.Index(ldml.Dates.Fields); x != nil { for _, f := range ldml.Dates.Fields.Field { x := x.IndexFromType(f) for _, d := range f.DisplayName { x.Index(d).SetValue("", d) } for _, r := range f.Relative { x.Index(r).SetValue(r.Type, r, relative) } for _, rt := range f.RelativeTime { x := x.Index(rt).IndexFromType(rt) for _, p := range rt.RelativeTimePattern { x.SetValue(p.Count, p) } } for _, rp := range f.RelativePeriod { x.Index(rp).SetValue("", rp) } } } } tree, err = build(b) if err != nil
w := gen.NewCodeWriter() generate(b, tree, w) generateTestData(b, w) buf := &bytes.Buffer{} if _, err = w.WriteGo(buf, "test", ""); err != nil { t.Log(buf.String()) t.Fatal("error generating code:", err) } return tree, buf.Bytes() }
{ t.Fatal("error building tree:", err) }
flvconcat.py
from __future__ import division import logging from collections import namedtuple from io import IOBase from itertools import chain, islice from threading import Thread from ..buffers import RingBuffer from ..packages.flashmedia import FLVError from ..packages.flashmedia.tag import (AudioData, AACAudioData, VideoData, AVCVideoData, VideoCommandFrame, Header, ScriptData, Tag) from ..packages.flashmedia.tag import (AAC_PACKET_TYPE_SEQUENCE_HEADER, AVC_PACKET_TYPE_SEQUENCE_HEADER, AUDIO_CODEC_ID_AAC, VIDEO_CODEC_ID_AVC, TAG_TYPE_AUDIO, TAG_TYPE_VIDEO) __all__ = ["extract_flv_header_tags", "FLVTagConcat", "FLVTagConcatIO"] log = logging.getLogger(__name__) FLVHeaderTags = namedtuple("FLVHeaderTags", "metadata aac vc") def iter_flv_tags(fd=None, buf=None, strict=False, skip_header=False): if not (fd or buf): return offset = 0 if not skip_header: if fd: Header.deserialize(fd) elif buf: header, offset = Header.deserialize_from(buf, offset) while fd or buf and offset < len(buf): try: if fd: tag = Tag.deserialize(fd, strict=strict) elif buf: tag, offset = Tag.deserialize_from(buf, offset, strict=strict) except (IOError, FLVError) as err: if "Insufficient tag header" in str(err): break raise IOError(err) yield tag def extract_flv_header_tags(stream): fd = stream.open() metadata = aac_header = avc_header = None for tag_index, tag in enumerate(iter_flv_tags(fd)): if isinstance(tag.data, ScriptData) and tag.data.name == "onMetaData": metadata = tag elif (isinstance(tag.data, VideoData) and isinstance(tag.data.data, AVCVideoData)): if tag.data.data.type == AVC_PACKET_TYPE_SEQUENCE_HEADER: avc_header = tag elif (isinstance(tag.data, AudioData) and isinstance(tag.data.data, AACAudioData)): if tag.data.data.type == AAC_PACKET_TYPE_SEQUENCE_HEADER: aac_header = tag if aac_header and avc_header and metadata: break # Give up after 10 tags if tag_index == 9: break return FLVHeaderTags(metadata, aac_header, avc_header) class FLVTagConcat(object): def __init__(self, duration=None, tags=[], has_video=True, has_audio=True, flatten_timestamps=False, sync_headers=False): self.duration = duration self.flatten_timestamps = flatten_timestamps self.has_audio = has_audio self.has_video = has_video self.sync_headers = sync_headers self.tags = tags if not (has_audio and has_video): self.sync_headers = False self.audio_header_written = False self.flv_header_written = False self.video_header_written = False self.timestamps_add = {} self.timestamps_orig = {} self.timestamps_sub = {} @property def headers_written(self): return self.audio_header_written and self.video_header_written def verify_tag(self, tag): if tag.filter: raise IOError("Tag has filter flag set, probably encrypted") # Only AAC and AVC has detectable headers if isinstance(tag.data, AudioData) and tag.data.codec != AUDIO_CODEC_ID_AAC: self.audio_header_written = True if isinstance(tag.data, VideoData) and tag.data.codec != VIDEO_CODEC_ID_AVC: self.video_header_written = True # Make sure there is no timestamp gap between audio and video when syncing if self.sync_headers and self.timestamps_sub and not self.headers_written: self.timestamps_sub = {} if isinstance(tag.data, AudioData): if isinstance(tag.data.data, AACAudioData): if tag.data.data.type == AAC_PACKET_TYPE_SEQUENCE_HEADER: if self.audio_header_written: return self.audio_header_written = True else: if self.sync_headers and not self.headers_written: return if not self.audio_header_written: return else: if self.sync_headers and not self.headers_written: return elif isinstance(tag.data, VideoData): if isinstance(tag.data.data, AVCVideoData): if tag.data.data.type == AVC_PACKET_TYPE_SEQUENCE_HEADER: if self.video_header_written: return self.video_header_written = True else: if self.sync_headers and not self.headers_written: return if not self.video_header_written: return elif isinstance(tag.data.data, VideoCommandFrame): return else: if self.sync_headers and not self.headers_written: return elif isinstance(tag.data, ScriptData): if tag.data.name == "onMetaData": if self.duration: tag.data.value["duration"] = self.duration elif "duration" in tag.data.value: del tag.data.value["duration"] else: return False return True def adjust_tag_gap(self, tag): timestamp_gap = tag.timestamp - self.timestamps_orig.get(tag.type, 0) timestamp_sub = self.timestamps_sub.get(tag.type) if timestamp_gap > 1000 and timestamp_sub is not None: self.timestamps_sub[tag.type] += timestamp_gap self.timestamps_orig[tag.type] = tag.timestamp def adjust_tag_timestamp(self, tag): timestamp_offset_sub = self.timestamps_sub.get(tag.type) if timestamp_offset_sub is None and tag not in self.tags: self.timestamps_sub[tag.type] = tag.timestamp timestamp_offset_sub = self.timestamps_sub.get(tag.type) timestamp_offset_add = self.timestamps_add.get(tag.type) if timestamp_offset_add: tag.timestamp = max(0, tag.timestamp + timestamp_offset_add) elif timestamp_offset_sub: tag.timestamp = max(0, tag.timestamp - timestamp_offset_sub) def analyze_tags(self, tag_iterator): tags = list(islice(tag_iterator, 10)) audio_tags = len(list(filter(lambda t: t.type == TAG_TYPE_AUDIO, tags))) video_tags = len(list(filter(lambda t: t.type == TAG_TYPE_VIDEO, tags))) self.has_audio = audio_tags > 0 self.has_video = video_tags > 0 if not (self.has_audio and self.has_video): self.sync_headers = False return tags def iter_tags(self, fd=None, buf=None, skip_header=None): if skip_header is None: skip_header = not not self.tags tags_iterator = filter(None, self.tags) flv_iterator = iter_flv_tags(fd=fd, buf=buf, skip_header=skip_header) for tag in chain(tags_iterator, flv_iterator): yield tag def iter_chunks(self, fd=None, buf=None, skip_header=None): """Reads FLV tags from fd or buf and returns them with adjusted timestamps.""" timestamps = dict(self.timestamps_add) tag_iterator = self.iter_tags(fd=fd, buf=buf, skip_header=skip_header) if not self.flv_header_written: analyzed_tags = self.analyze_tags(tag_iterator) else:
for tag in chain(analyzed_tags, tag_iterator): if not self.flv_header_written: flv_header = Header(has_video=self.has_video, has_audio=self.has_audio) yield flv_header.serialize() self.flv_header_written = True if self.verify_tag(tag): self.adjust_tag_gap(tag) self.adjust_tag_timestamp(tag) if self.duration: norm_timestamp = tag.timestamp / 1000 if norm_timestamp > self.duration: break yield tag.serialize() timestamps[tag.type] = tag.timestamp if not self.flatten_timestamps: self.timestamps_add = timestamps self.tags = [] class FLVTagConcatWorker(Thread): def __init__(self, iterator, stream): self.error = None self.stream = stream self.stream_iterator = iterator self.concater = FLVTagConcat(stream.duration, stream.tags, **stream.concater_params) Thread.__init__(self) self.daemon = True def run(self): for fd in self.stream_iterator: try: chunks = self.concater.iter_chunks( fd, skip_header=self.stream.skip_header ) for chunk in chunks: self.stream.buffer.write(chunk) if not self.running: return except IOError as err: self.error = err break self.stop() def stop(self): self.running = False self.stream.buffer.close() def start(self): self.running = True return Thread.start(self) class FLVTagConcatIO(IOBase): __worker__ = FLVTagConcatWorker def __init__(self, session, duration=None, tags=[], skip_header=None, timeout=30, **concater_params): self.session = session self.timeout = timeout self.concater_params = concater_params self.duration = duration self.skip_header = skip_header self.tags = tags def open(self, iterator): self.buffer = RingBuffer(self.session.get_option("ringbuffer-size")) self.worker = self.__worker__(iterator, self) self.worker.start() def close(self): self.worker.stop() if self.worker.is_alive(): self.worker.join() def read(self, size=-1): if not self.buffer: return b"" if self.worker.error: raise self.worker.error return self.buffer.read(size, block=self.worker.is_alive(), timeout=self.timeout)
analyzed_tags = []
test_arithmetic_operations.py
from functools import partial import pytest from ..vm.vm_test_helpers import run_test run_arithmetic_vm_test = partial( run_test, "tests/fixtures/LegacyTests/Constantinople/VMTests/vmArithmeticTest", ) @pytest.mark.parametrize( "test_file", [ "add0.json", "add1.json", "add2.json", "add3.json", "add4.json", ], ) def test_add(test_file: str) -> None:
@pytest.mark.parametrize( "test_file", [ "sub0.json", "sub1.json", "sub2.json", "sub3.json", "sub4.json", ], ) def test_sub(test_file: str) -> None: run_arithmetic_vm_test(test_file) @pytest.mark.parametrize( "test_file", [ "mul0.json", "mul1.json", "mul2.json", "mul3.json", "mul4.json", "mul5.json", "mul6.json", "mul7.json", ], ) def test_mul(test_file: str) -> None: run_arithmetic_vm_test(test_file) @pytest.mark.parametrize( "test_file", [ "div1.json", "divBoostBug.json", "divByNonZero0.json", "divByNonZero1.json", "divByNonZero2.json", "divByNonZero3.json", "divByZero.json", "divByZero_2.json", ], ) def test_div(test_file: str) -> None: run_arithmetic_vm_test(test_file) @pytest.mark.parametrize( "test_file", [ "sdiv0.json", "sdiv1.json", "sdiv2.json", "sdiv3.json", "sdiv4.json", "sdiv5.json", "sdiv6.json", "sdiv7.json", "sdiv8.json", "sdiv9.json", "sdivByZero0.json", "sdivByZero1.json", "sdivByZero2.json", "sdiv_i256min.json", "sdiv_i256min2.json", "sdiv_i256min3.json", "sdiv_dejavu.json", ], ) def test_sdiv(test_file: str) -> None: run_arithmetic_vm_test(test_file) @pytest.mark.parametrize( "test_file", [ "mod0.json", "mod1.json", "mod2.json", "mod3.json", "mod4.json", "modByZero.json", ], ) def test_mod(test_file: str) -> None: run_arithmetic_vm_test(test_file) @pytest.mark.parametrize( "test_file", [ "smod0.json", "smod1.json", "smod2.json", "smod3.json", "smod4.json", "smod5.json", "smod6.json", "smod7.json", "smod8_byZero.json", "smod_i256min1.json", "smod_i256min2.json", ], ) def test_smod(test_file: str) -> None: run_arithmetic_vm_test(test_file) @pytest.mark.parametrize( "test_file", [ "addmod0.json", "addmod1.json", "addmod1_overflow2.json", "addmod1_overflow3.json", "addmod1_overflow4.json", "addmod1_overflowDiff.json", "addmod2.json", "addmod2_0.json", "addmod2_1.json", "addmod3.json", "addmod3_0.json", ], ) def test_addmod(test_file: str) -> None: run_arithmetic_vm_test(test_file) @pytest.mark.parametrize( "test_file", [ "mulmod0.json", "mulmod1.json", "mulmod1_overflow.json", "mulmod1_overflow2.json", "mulmod1_overflow3.json", "mulmod1_overflow4.json", "mulmod2.json", "mulmod2_0.json", "mulmod2_1.json", "mulmod3.json", "mulmod3_0.json", "mulmod4.json", ], ) def test_mulmod(test_file: str) -> None: run_arithmetic_vm_test(test_file) @pytest.mark.parametrize( "test_file, check_gas_left", [ ("exp0.json", True), ("exp1.json", True), ("exp2.json", True), ("exp3.json", True), ("exp4.json", True), ("exp5.json", True), ("exp6.json", True), ("exp7.json", True), ("exp8.json", True), ("expXY.json", False), ("expXY_success.json", False), ], ) def test_exp(test_file: str, check_gas_left: bool) -> None: run_arithmetic_vm_test(test_file, check_gas_left=check_gas_left) @pytest.mark.parametrize("exponent", ([2, 4, 8, 16, 32, 64, 128, 256])) def test_exp_power_2(exponent: int) -> None: run_arithmetic_vm_test(f"expPowerOf2_{exponent}.json") def test_exp_power_256() -> None: for i in range(1, 34): run_arithmetic_vm_test(f"expPowerOf256_{i}.json") for i in range(34): run_arithmetic_vm_test(f"expPowerOf256Of256_{i}.json") @pytest.mark.parametrize( "test_file", [ "signextend_0_BigByte.json", "signextend_00.json", "signextend_AlmostBiggestByte.json", "signextend_BigByte_0.json", "signextend_BigByteBigByte.json", "signextend_BigBytePlus1_2.json", "signextend_bigBytePlus1.json", "signextend_BitIsNotSet.json", "signextend_BitIsNotSetInHigherByte.json", "signextend_bitIsSet.json", "signextend_BitIsSetInHigherByte.json", "signextend_Overflow_dj42.json", "signextendInvalidByteNumber.json", ], ) def test_signextend(test_file: str) -> None: run_arithmetic_vm_test(test_file) def test_stop() -> None: run_arithmetic_vm_test("stop.json")
run_arithmetic_vm_test(test_file)
graph_actions.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """High level operations on graphs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import sys import threading import time import numpy as np from six import reraise from tensorflow.contrib.framework.python.ops import ops as contrib_ops from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.learn.python.learn import monitors as monitors_lib from tensorflow.contrib.learn.python.learn.utils import checkpoints from tensorflow.core.framework import summary_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import coordinator from tensorflow.python.training import monitored_session from tensorflow.python.training import queue_runner from tensorflow.python.training import saver as tf_saver from tensorflow.python.training import session_manager as session_manager_lib from tensorflow.python.training import summary_io from tensorflow.python.training import supervisor as tf_supervisor # Singleton for SummaryWriter per logdir folder. _SUMMARY_WRITERS = {} # Lock protecting _SUMMARY_WRITERS _summary_writer_lock = threading.Lock() def clear_summary_writers(): """Clear cached summary writers. Currently only used for unit tests.""" return summary_io.SummaryWriterCache.clear() def get_summary_writer(logdir): """Returns single SummaryWriter per logdir in current run. Args: logdir: str, folder to write summaries. Returns: Existing `SummaryWriter` object or new one if never wrote to given directory. """ return summary_io.SummaryWriterCache.get(logdir) def _make_saver(graph, keep_checkpoint_max=5): vars_to_save = graph.get_collection(ops.GraphKeys.VARIABLES) if vars_to_save: return tf_saver.Saver(vars_to_save, sharded=True, max_to_keep=keep_checkpoint_max) else: return None def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None): logging.info('Loading model from checkpoint: %s.', checkpoint_path) saver = saver or _make_saver(graph) if saver: saver.restore(session, checkpoint_path) else: logging.info('No variables found in graph, not creating Saver() object.') def _run_with_monitors(session, step, tensors, feed_dict, monitors): """Runs session for given tensors with monitor callbacks.""" for monitor in monitors: tensors += monitor.step_begin(step) tensors = list(set(tensors)) outputs = session.run(tensors, feed_dict=feed_dict) outputs = dict(zip( [t.name if isinstance(t, ops.Tensor) else t for t in tensors], outputs)) should_stop = False for monitor in monitors: induce_stop = monitor.step_end(step, outputs) should_stop = should_stop or induce_stop return outputs, should_stop def _monitored_train(graph, output_dir, train_op, loss_op, global_step_tensor=None, init_op=None, init_feed_dict=None, init_fn=None, log_every_steps=10, supervisor_is_chief=True, supervisor_master='', supervisor_save_model_secs=600, supervisor_save_model_steps=None, keep_checkpoint_max=5, supervisor_save_summaries_steps=100, feed_fn=None, steps=None, fail_on_nan_loss=True, hooks=None, max_steps=None):
# TODO(ispir): Deprecate train in favor of supervised_train def train(graph, output_dir, train_op, loss_op, global_step_tensor=None, init_op=None, init_feed_dict=None, init_fn=None, log_every_steps=10, supervisor_is_chief=True, supervisor_master='', supervisor_save_model_secs=600, keep_checkpoint_max=5, supervisor_save_summaries_steps=100, feed_fn=None, steps=None, fail_on_nan_loss=True, monitors=None, max_steps=None): """Train a model. Given `graph`, a directory to write outputs to (`output_dir`), and some ops, run a training loop. The given `train_op` performs one step of training on the model. The `loss_op` represents the objective function of the training. It is expected to increment the `global_step_tensor`, a scalar integer tensor counting training steps. This function uses `Supervisor` to initialize the graph (from a checkpoint if one is available in `output_dir`), write summaries defined in the graph, and write regular checkpoints as defined by `supervisor_save_model_secs`. Training continues until `global_step_tensor` evaluates to `max_steps`, or, if `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the program is terminated with exit code 1. Args: graph: A graph to train. It is expected that this graph is not in use elsewhere. output_dir: A directory to write outputs to. train_op: An op that performs one training step when run. loss_op: A scalar loss tensor. global_step_tensor: A tensor representing the global step. If none is given, one is extracted from the graph using the same logic as in `Supervisor`. init_op: An op that initializes the graph. If `None`, use `Supervisor`'s default. init_feed_dict: A dictionary that maps `Tensor` objects to feed values. This feed dictionary will be used when `init_op` is evaluated. init_fn: Optional callable passed to Supervisor to initialize the model. log_every_steps: Output logs regularly. The logs contain timing data and the current loss. supervisor_is_chief: Whether the current process is the chief supervisor in charge of restoring the model and running standard services. supervisor_master: The master string to use when preparing the session. supervisor_save_model_secs: Save a checkpoint every `supervisor_save_model_secs` seconds when training. keep_checkpoint_max: The maximum number of recent checkpoint files to keep. As new files are created, older files are deleted. If None or 0, all checkpoint files are kept. This is simply passed as the max_to_keep arg to tf.Saver constructor. supervisor_save_summaries_steps: Save summaries every `supervisor_save_summaries_steps` seconds when training. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. steps: Trains for this many steps (e.g. current global step + `steps`). fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op` evaluates to `NaN`. If false, continue training as if nothing happened. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. max_steps: Number of total steps for which to train model. If `None`, train forever. Two calls fit(steps=100) means 200 training iterations. On the other hand two calls of fit(max_steps=100) means, second call will not do any iteration since first call did all 100 steps. Returns: The final loss value. Raises: ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor` is not provided. See `tf.contrib.framework.get_global_step` for how we look up the latter if not provided explicitly. NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever evaluates to `NaN`. ValueError: If both `steps` and `max_steps` are not `None`. """ while True: try: return _train_internal(graph, output_dir, train_op, loss_op, global_step_tensor, init_op, init_feed_dict, init_fn, log_every_steps, supervisor_is_chief, supervisor_master, supervisor_save_model_secs, keep_checkpoint_max, supervisor_save_summaries_steps, feed_fn, steps, fail_on_nan_loss, monitors, max_steps) except errors.AbortedError: # Happens when PS restarts, keep training. logging.warning('Training got Aborted error. Keep training.') def _train_internal(graph, output_dir, train_op, loss_op, global_step_tensor, init_op, init_feed_dict, init_fn, log_every_steps, supervisor_is_chief, supervisor_master, supervisor_save_model_secs, keep_checkpoint_max, supervisor_save_summaries_steps, feed_fn, steps, fail_on_nan_loss, monitors, max_steps): """See train.""" if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') if not output_dir: raise ValueError('Output directory should be non-empty %s.' % output_dir) if train_op is None: raise ValueError('Missing train_op.') if loss_op is None: raise ValueError('Missing loss_op.') with graph.as_default(): global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) if global_step_tensor is None: raise ValueError('No "global_step" was provided or found in the graph.') # Get current step. try: start_step = checkpoints.load_variable( output_dir, global_step_tensor.name) except (errors.NotFoundError, ValueError): start_step = 0 summary_writer = (get_summary_writer(output_dir) if supervisor_is_chief else None) # Add default chief monitors if none were provided. if not monitors: monitors = monitors_lib.get_default_monitors( loss_op=loss_op, summary_op=logging_ops.get_summary_op(), save_summary_steps=supervisor_save_summaries_steps, summary_writer=summary_writer) if supervisor_is_chief else [] # TODO(ipolosukhin): Replace all functionality of Supervisor # with Chief-Exclusive Monitors. if not supervisor_is_chief: # Prune list of monitor to the ones runnable on all workers. monitors = [monitor for monitor in monitors if monitor.run_on_all_workers] if max_steps is None: max_steps = (start_step + steps) if steps else None # Start monitors, can create graph parts. for monitor in monitors: monitor.begin(max_steps=max_steps) supervisor = tf_supervisor.Supervisor( graph, init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT, init_feed_dict=init_feed_dict, is_chief=supervisor_is_chief, logdir=output_dir, saver=_make_saver(graph, keep_checkpoint_max), global_step=global_step_tensor, summary_op=None, summary_writer=summary_writer, save_model_secs=supervisor_save_model_secs, init_fn=init_fn) session = supervisor.PrepareSession(master=supervisor_master, start_standard_services=True) supervisor.StartQueueRunners(session) with session: get_current_step = lambda: session.run(global_step_tensor) start_step = get_current_step() last_step = start_step last_log_step = start_step loss_value = None logging.info('Training steps [%d,%s)', last_step, 'inf' if max_steps is None else str(max_steps)) excinfo = None try: while not supervisor.ShouldStop() and ( (max_steps is None) or (last_step < max_steps)): start_time = time.time() feed_dict = feed_fn() if feed_fn is not None else None outputs, should_stop = _run_with_monitors( session, last_step + 1, [train_op, loss_op], feed_dict, monitors) loss_value = outputs[loss_op.name] if np.isnan(loss_value): failure_message = 'Model diverged with loss = NaN.' if fail_on_nan_loss: logging.error(failure_message) raise monitors_lib.NanLossDuringTrainingError() else: logging.warning(failure_message) if should_stop: break this_step = get_current_step() if this_step <= last_step: logging.error( 'Global step was not incremented by train op at step %s' ': new step %d', last_step, this_step) last_step = this_step is_last_step = (max_steps is not None) and (last_step >= max_steps) if is_last_step or (last_step - last_log_step >= log_every_steps): logging.info( 'training step %d, loss = %.5f (%.3f sec/batch).', last_step, loss_value, float(time.time() - start_time)) last_log_step = last_step except errors.OutOfRangeError as e: logging.warn('Got exception during tf.learn training loop possibly ' 'due to exhausted input queue %s.', e) except StopIteration: logging.info('Exhausted input iterarator.') except BaseException as e: # pylint: disable=broad-except # Hold on to any other exceptions while we try recording a final # checkpoint and summary. excinfo = sys.exc_info() finally: try: # Call supervisor.Stop() from within a try block because it re-raises # exceptions thrown by the supervised threads. supervisor.Stop(close_summary_writer=False) # Save one last checkpoint and summaries # TODO(wicke): This should be handled by Supervisor # In case we encountered an exception in the try block before we updated # last_step, update it here (again). last_step = get_current_step() if supervisor_is_chief: ckpt_path = supervisor.save_path logging.info('Saving checkpoint for step %d to checkpoint: %s.', last_step, ckpt_path) supervisor.saver.save(session, ckpt_path, global_step=last_step) # Finish monitors. for monitor in monitors: monitor.end() # catch OutOfRangeError which is thrown when queue is out of data (and for # other reasons as well). except errors.OutOfRangeError as e: logging.warn('OutOfRangeError in tf.learn final checkpoint possibly ' 'due to exhausted input queue. Note: summary_op is not ' 'expected to trigger dequeues. %s.', e) except BaseException as e: # pylint: disable=broad-except # If we don't already have an exception to re-raise, raise this one. if not excinfo: raise # Otherwise, log this one and raise the other in the finally block. logging.error('Got exception during tf.learn final checkpoint %s.', e) finally: if excinfo: reraise(*excinfo) return loss_value def _get_first_op_from_collection(collection_name): elements = ops.get_collection(collection_name) if elements: return elements[0] return None def _get_saver(): """Lazy init and return saver.""" saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS) if saver is None and variables.all_variables(): saver = tf_saver.Saver() ops.add_to_collection(ops.GraphKeys.SAVERS, saver) return saver def _get_ready_op(): ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP) if ready_op is None: ready_op = variables.report_uninitialized_variables() ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op) return ready_op def _get_local_init_op(): local_init_op = _get_first_op_from_collection( ops.GraphKeys.LOCAL_INIT_OP) if local_init_op is None: op_list = [variables.initialize_local_variables(), data_flow_ops.initialize_all_tables()] if op_list: local_init_op = control_flow_ops.group(*op_list) ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op) return local_init_op def _eval_results_to_str(eval_results): return ', '.join('%s = %s' % (k, v) for k, v in eval_results.items()) def _write_summary_results(output_dir, eval_results, current_global_step): """Writes eval results into summary file in given dir.""" logging.info('Saving evaluation summary for %d step: %s', current_global_step, _eval_results_to_str(eval_results)) summary_writer = get_summary_writer(output_dir) summary = summary_pb2.Summary() for key in eval_results: if eval_results[key] is None: continue value = summary.value.add() value.tag = key if (isinstance(eval_results[key], np.float32) or isinstance(eval_results[key], float)): value.simple_value = float(eval_results[key]) else: logging.warn('Skipping summary for %s, must be a float or np.float32.', key) summary_writer.add_summary(summary, current_global_step) summary_writer.flush() def evaluate(graph, output_dir, checkpoint_path, eval_dict, update_op=None, global_step_tensor=None, supervisor_master='', log_every_steps=10, feed_fn=None, max_steps=None): """Evaluate a model loaded from a checkpoint. Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval loop for `max_steps` steps, or until an exception (generally, an end-of-input signal from a reader operation) is raised from running `eval_dict`. In each step of evaluation, all tensors in the `eval_dict` are evaluated, and every `log_every_steps` steps, they are logged. At the very end of evaluation, a summary is evaluated (finding the summary ops using `Supervisor`'s logic) and written to `output_dir`. Args: graph: A `Graph` to train. It is expected that this graph is not in use elsewhere. output_dir: A string containing the directory to write a summary to. checkpoint_path: A string containing the path to a checkpoint to restore. Can be `None` if the graph doesn't require loading any variables. eval_dict: A `dict` mapping string names to tensors to evaluate. It is evaluated in every logging step. The result of the final evaluation is returned. If `update_op` is None, then it's evaluated in every step. If `max_steps` is `None`, this should depend on a reader that will raise an end-of-input exception when the inputs are exhausted. update_op: A `Tensor` which is run in every step. global_step_tensor: A `Variable` containing the global step. If `None`, one is extracted from the graph using the same logic as in `Supervisor`. Used to place eval summaries on training curves. supervisor_master: The master string to use when preparing the session. log_every_steps: Integer. Output logs every `log_every_steps` evaluation steps. The logs contain the `eval_dict` and timing information. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. max_steps: Integer. Evaluate `eval_dict` this many times. Returns: A tuple `(eval_results, global_step)`: eval_results: A `dict` mapping `string` to numeric values (`int`, `float`) that are the result of running eval_dict in the last step. `None` if no eval steps were run. global_step: The global step this evaluation corresponds to. Raises: ValueError: if `output_dir` is empty. """ if not output_dir: raise ValueError('Output directory should be non-empty %s.' % output_dir) with graph.as_default(): global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) # Create or get summary op, global_step and saver. saver = _get_saver() local_init_op = _get_local_init_op() ready_op = _get_ready_op() session_manager = session_manager_lib.SessionManager( local_init_op=local_init_op, ready_op=ready_op) session, initialized = session_manager.recover_session( master=supervisor_master, saver=saver, checkpoint_dir=checkpoint_path) # Start queue runners. coord = coordinator.Coordinator() threads = queue_runner.start_queue_runners(session, coord) with session: if not initialized: logging.warning('Failed to initialize from %s.', checkpoint_path) # TODO(ipolosukhin): This should be failing, but old code relies on that. session.run(variables.initialize_all_variables()) if checkpoint_path: _restore_from_checkpoint(session, graph, checkpoint_path, saver) current_global_step = session.run(global_step_tensor) eval_results = None # TODO(amodei): Fix this to run through the eval set exactly once. step = 0 eval_step = None feed_dict = None logging.info('Eval steps [%d,%s) for training step %d.', step, 'inf' if max_steps is None else str(max_steps), current_global_step) try: try: while (max_steps is None) or (step < max_steps): step += 1 start_time = time.time() feed_dict = feed_fn() if feed_fn is not None else None if update_op is not None: session.run(update_op, feed_dict=feed_dict) else: eval_results = session.run(eval_dict, feed_dict=feed_dict) eval_step = step # TODO(wicke): We should assert that the global step hasn't changed. if step % log_every_steps == 0: if eval_step is None or step != eval_step: eval_results = session.run(eval_dict, feed_dict=feed_dict) eval_step = step duration = time.time() - start_time logging.info('Results after %d steps (%.3f sec/batch): %s.', step, float(duration), _eval_results_to_str(eval_results)) finally: if eval_results is None or step != eval_step: eval_results = session.run(eval_dict, feed_dict=feed_dict) eval_step = step # Stop session first, before queue runners. session.close() # Stop queue runners. try: coord.request_stop() coord.join(threads, stop_grace_period_secs=120) except (RuntimeError, errors.CancelledError) as e: logging.warning('Coordinator didn\'t stop cleanly: %s', e) # catch OutOfRangeError which is thrown when queue is out of data (and for # other reasons as well). except errors.OutOfRangeError as e: if max_steps is None: logging.info('Input queue is exhausted.') else: logging.warn('Input queue is exhausted: %s.', e) # catch StopIteration which is thrown is DataReader is out of data. except StopIteration as e: if max_steps is None: logging.info('Input iterator is exhausted.') else: logging.warn('Input iterator is exhausted: %s.', e) # Save summaries for this evaluation. _write_summary_results(output_dir, eval_results, current_global_step) return eval_results, current_global_step def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1): """Run `output_dict` tensors `n` times, with the same `feed_dict` each run. Args: output_dict: A `dict` mapping string names to tensors to run. Must all be from the same graph. feed_dict: `dict` of input values to feed each run. restore_checkpoint_path: A string containing the path to a checkpoint to restore. n: Number of times to repeat. Returns: A list of `n` `dict` objects, each containing values read from `output_dict` tensors. """ return run_feeds( output_dict=output_dict, feed_dicts=itertools.repeat(feed_dict, n), restore_checkpoint_path=restore_checkpoint_path) # TODO(ptucker): Add save_checkpoint_path. def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None): """Run `output_dict` tensors with each input in `feed_dicts`. If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise, init all variables. Args: output_dict: A `dict` mapping string names to `Tensor` objects to run. Tensors must all be from the same graph. feed_dicts: Iterable of `dict` objects of input values to feed. restore_checkpoint_path: A string containing the path to a checkpoint to restore. Yields: A sequence of dicts of values read from `output_dict` tensors, one item yielded for each item in `feed_dicts`. Keys are the same as `output_dict`, values are the results read from the corresponding `Tensor` in `output_dict`. Raises: ValueError: if `output_dict` or `feed_dicts` is None or empty. """ if not output_dict: raise ValueError('output_dict is invalid: %s.' % output_dict) if not feed_dicts: raise ValueError('feed_dicts is invalid: %s.' % feed_dicts) graph = contrib_ops.get_graph_from_inputs(output_dict.values()) with graph.as_default() as g: with tf_session.Session('') as session: if restore_checkpoint_path: _restore_from_checkpoint(session, g, restore_checkpoint_path) else: session.run(variables.initialize_all_variables()) session.run(variables.initialize_local_variables()) session.run(data_flow_ops.initialize_all_tables()) coord = coordinator.Coordinator() threads = None try: threads = queue_runner.start_queue_runners(session, coord=coord) for f in feed_dicts: yield session.run(output_dict, f) finally: coord.request_stop() if threads: coord.join(threads, stop_grace_period_secs=120) def run_feeds(*args, **kwargs): """See run_feeds_iter(). Returns a `list` instead of an iterator.""" return list(run_feeds_iter(*args, **kwargs)) def infer(restore_checkpoint_path, output_dict, feed_dict=None): """Restore graph from `restore_checkpoint_path` and run `output_dict` tensors. If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise, init all variables. Args: restore_checkpoint_path: A string containing the path to a checkpoint to restore. output_dict: A `dict` mapping string names to `Tensor` objects to run. Tensors must all be from the same graph. feed_dict: `dict` object mapping `Tensor` objects to input values to feed. Returns: Dict of values read from `output_dict` tensors. Keys are the same as `output_dict`, values are the results read from the corresponding `Tensor` in `output_dict`. Raises: ValueError: if `output_dict` or `feed_dicts` is None or empty. """ return run_feeds(output_dict=output_dict, feed_dicts=[feed_dict] if feed_dict is not None else [None], restore_checkpoint_path=restore_checkpoint_path)[0]
"""Train a model via monitored_session. Given `graph`, a directory to write outputs to (`output_dir`), and some ops, run a training loop. The given `train_op` performs one step of training on the model. The `loss_op` represents the objective function of the training. It is expected to increment the `global_step_tensor`, a scalar integer tensor counting training steps. This function uses `Supervisor` to initialize the graph (from a checkpoint if one is available in `output_dir`), write summaries defined in the graph, and write regular checkpoints as defined by `supervisor_save_model_secs`. Training continues until `global_step_tensor` evaluates to `max_steps`, or, if `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the program is terminated with exit code 1. Args: graph: A graph to train. It is expected that this graph is not in use elsewhere. output_dir: A directory to write outputs to. train_op: An op that performs one training step when run. loss_op: A scalar loss tensor. global_step_tensor: A tensor representing the global step. If none is given, one is extracted from the graph using the same logic as in `Supervisor`. init_op: An op that initializes the graph. If `None`, use `Supervisor`'s default. init_feed_dict: A dictionary that maps `Tensor` objects to feed values. This feed dictionary will be used when `init_op` is evaluated. init_fn: Optional callable passed to Supervisor to initialize the model. log_every_steps: Output logs regularly. The logs contain timing data and the current loss. A `0` or negative value disables logging. supervisor_is_chief: Whether the current process is the chief supervisor in charge of restoring the model and running standard services. supervisor_master: The master string to use when preparing the session. supervisor_save_model_secs: Save checkpoints every this many seconds. Can not be specified with `supervisor_save_model_steps`. supervisor_save_model_steps: Save checkpoints every this many steps. Can not be specified with `supervisor_save_model_secs`. keep_checkpoint_max: The maximum number of recent checkpoint files to keep. As new files are created, older files are deleted. If None or 0, all checkpoint files are kept. This is simply passed as the max_to_keep arg to `tf.Saver` constructor. supervisor_save_summaries_steps: Save summaries every `supervisor_save_summaries_steps` seconds when training. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. steps: Trains for this many steps (e.g. current global step + `steps`). fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op` evaluates to `NaN`. If false, continue training as if nothing happened. hooks: List of `SessionRunHook` subclass instances. Used for callbacks inside the training loop. max_steps: Number of total steps for which to train model. If `None`, train forever. Two calls fit(steps=100) means 200 training iterations. On the other hand two calls of fit(max_steps=100) means, second call will not do any iteration since first call did all 100 steps. Returns: The final loss value. Raises: ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor` is not provided. See `tf.contrib.framework.get_global_step` for how we look up the latter if not provided explicitly. NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever evaluates to `NaN`. ValueError: If both `steps` and `max_steps` are not `None`. """ if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') if not output_dir: raise ValueError('Output directory should be non-empty %s.' % output_dir) if train_op is None: raise ValueError('Missing train_op.') if loss_op is None: raise ValueError('Missing loss_op.') if hooks is None: hooks = [] if not isinstance(hooks, list): raise ValueError('Hooks should be a list.') with graph.as_default(): global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) if global_step_tensor is None: raise ValueError('No "global_step" was provided or found in the graph.') if max_steps is not None: try: start_step = checkpoints.load_variable(output_dir, global_step_tensor.name) if max_steps <= start_step: logging.info('Skipping training since max_steps has already saved.') return None except: # pylint: disable=bare-except pass # Adapted SessionRunHooks such as ExportMonitor depend on the # CheckpointSaverHook to be executed before they should be executed. # The `hooks` param comprises of deprecated monitor hooks # (such as ExportMonitor). Appending them after the basic_session_run_hooks. all_hooks = [] with graph.as_default(): all_hooks.append(basic_session_run_hooks.NanTensorHook( loss_op, fail_on_nan_loss=fail_on_nan_loss)) if log_every_steps > 0: all_hooks.append(basic_session_run_hooks.LoggingTensorHook({ 'loss': loss_op.name, 'step': global_step_tensor.name }, every_n_iter=log_every_steps)) def make_saver(): return tf_saver.Saver( sharded=True, max_to_keep=keep_checkpoint_max, defer_build=True) scaffold = monitored_session.Scaffold( init_op=init_op, init_feed_dict=init_feed_dict, init_fn=init_fn, saver=monitored_session.Scaffold.get_or_default('saver', ops.GraphKeys.SAVERS, make_saver)) if not supervisor_is_chief: session_creator = monitored_session.WorkerSessionCreator( scaffold=scaffold, master=supervisor_master) else: session_creator = monitored_session.ChiefSessionCreator( scaffold=scaffold, checkpoint_dir=output_dir, master=supervisor_master) summary_writer = summary_io.SummaryWriterCache.get(output_dir) all_hooks.append( basic_session_run_hooks.StepCounterHook( summary_writer=summary_writer)) all_hooks.append( basic_session_run_hooks.SummarySaverHook( save_steps=supervisor_save_summaries_steps, summary_writer=summary_writer, scaffold=scaffold)) if (supervisor_save_model_secs is not None or supervisor_save_model_steps is not None): all_hooks.append( basic_session_run_hooks.CheckpointSaverHook( output_dir, save_secs=supervisor_save_model_secs, save_steps=supervisor_save_model_steps, scaffold=scaffold)) if steps is not None or max_steps is not None: all_hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps)) all_hooks.extend(hooks) with monitored_session.MonitoredSession( session_creator=session_creator, hooks=all_hooks) as super_sess: loss = None while not super_sess.should_stop(): _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else None) return loss
main.go
package main import ( "fmt" "time" "github.com/go-cache/cache" "github.com/go-redis/redis" ) type user struct { Name string Age string
func main() { conf := redis.Options{ Addr: "redis:6379", Password: "sdfsdf", } CacheDriver := cache.NewClient(conf) CacheDriver.Tag("user_all", "user_list").Put("user_id:1", &user{Name: "jack", Age: "18"}, time.Hour) CacheDriver.Tag("user_all").Put("user_id:2", &user{Name: "tom", Age: "19"}, time.Hour) CacheDriver.Tag("user_list").Put("user_id:3", &user{Name: "john", Age: "17"}, time.Hour) fmt.Println(CacheDriver.RedisClient.Keys("*")) CacheDriver.Tag("user_all").Clear() fmt.Println(CacheDriver.RedisClient.Keys("*")) fmt.Println(CacheDriver.RedisClient.Get("user_id:3")) }
}
plugins.dev.config.js
const pluginsConfig = require('../base/plugins.config.js'); const webpack = require('webpack'); pluginsConfig.push( new webpack.HotModuleReplacementPlugin(),
new webpack.NoEmitOnErrorsPlugin() ); module.exports = pluginsConfig;
mod.rs
pub mod index;pub mod search;
volatile-fat-ptr.rs
// run-pass #![allow(stable_features)] #![feature(volatile)] use std::ptr::{read_volatile, write_volatile}; fn main()
{ let mut x: &'static str = "test"; unsafe { let a = read_volatile(&x); assert_eq!(a, "test"); write_volatile(&mut x, "foo"); assert_eq!(x, "foo"); } }
log10.js
import { helper } from '@ember/component/helper'; /** * Executes `Math.log10` on the number passed to the helper. * * ```hbs * {{log10 a}} * ``` * * @param {number} number The number to pass to `Math.log10` * @return {number} The log10 of the passed number */ export function log10([number]) {
return Math.log10(number); } export default helper(log10);
scratch.js
let isDev = true; const test = [ isDev && 'foo', 'bar', isDev && 'baz', ].filter(item => item); //? process.cwd(); //? const { resolve, dirname, basename, relative } = require('path');
const testpath = '/user/lu/some/path/to/file.js'; const basedir = '/user/lu/some'; const testdir = dirname(testpath); //? relative(basedir, testdir); //? resolve(basedir, '../assets'); //?
Flow.py
#!/usr/bin/env python # -*- coding: utf-8; -*- """ Copyright 2018 University of Liège Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Flow.py Python interface between Flow and CUPyDO. Authors A. Crovato """ # ---------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------- import sys import numpy as np from cupydo.genericSolvers import FluidSolver # ---------------------------------------------------------------------- # FlowSolver class # ---------------------------------------------------------------------- class Flow(FluidSolver): def __init__(self, _module, _nthreads): # load the python module and initialize the solver module = __import__(_module) floP = module.getParams() self.__initFlow(floP, _nthreads) # count fsi nodes and get their positions self.nNodes = self.boundary.nodes.size() self.nHaloNode = 0 self.nPhysicalNodes = self.nNodes - self.nHaloNode self.nodalInitPosX, self.nodalInitPosY, self.nodalInitPosZ = self.getNodalInitialPositions() # init save frequency (fsi) if 'saveFreq' in floP: self.saveFreq = floP['saveFreq'] else: self.saveFreq = sys.maxsize # generic init FluidSolver.__init__(self) def __initFlow(self, p, _nthreads): """Initilize flow classes Adrien Crovato """ import flow import tbox import tbox.gmsh as gmsh # basic checks if p['Dim'] != 2 and p['Dim'] != 3: raise Exception('Problem dimension should be 2 or 3, but ' + p['Dim'] + ' was given!\n') # basic config if p['Format'] == 'vtk': try: import tboxVtk Writer = tboxVtk.VtkExport print "Found VTK libraries! Results will be saved in VTK format.\n" except: Writer = tbox.GmshExport print "VTK libraries not found! Results will be saved in gmsh format.\n" else: Writer = tbox.GmshExport print "Results will be saved in gmsh format.\n" # mesh the geometry self.msh = gmsh.MeshLoader(p['File'],__file__).execute(**p['Pars']) gmshWriter = tbox.GmshExport(self.msh) if p['Dim'] == 2: mshCrck = tbox.MshCrack(self.msh, p['Dim']) mshCrck.addCrack(p['Wake']) mshCrck.addBoundaries([p['Fluid'], p['Farfield'][-1], p['Body']]) mshCrck.run(gmshWriter) else: mshCrck = tbox.MshCrack(self.msh, p['Dim']) mshCrck.addCrack(p['Wake']) mshCrck.addBoundaries([p['Fluid'], p['Symmetry'], p['Farfield'][-1]] + p['Body']) mshCrck.addExcluded(p['WakeTip']) mshCrck.run(gmshWriter) del gmshWriter del mshCrck self.mshWriter = Writer(self.msh) # initialize mesh deformation handler self.mshDef = tbox.MshDeform(self.msh, p['Dim']) self.mshDef.nthreads = _nthreads self.mshDef.setField(p['Fluid']) self.mshDef.setFixed(p['Farfield']) self.mshDef.setMoving([p['Fsi']]) if p['Dim'] == 3: self.mshDef.setSymmetry([p['Symmetry']], 1) self.mshDef.setInternal([p['Wake'], p['Wake']+'_']) # initialize the problem p['AoA'] = p['AoA']*np.pi/180 # convert to radians phiInfFun = flow.Fun0PosPhiInf(p['Dim'], p['AoA']) if p['Dim'] == 2: velInfFun = tbox.Fct1C(-np.cos(p['AoA']), -np.sin(p['AoA']), 0.) else: velInfFun = tbox.Fct1C(-np.cos(p['AoA']), 0., -np.sin(p['AoA'])) self.dynP = p['P_dyn'] pbl = flow.Problem(self.msh, p['Dim'], p['AoA'], p['M_inf'], p['S_ref'], p['c_ref'], p['x_ref'], p['y_ref'], p['z_ref']) # add medium if p['M_inf'] == 0: self.fCp = flow.Fun0EleCpL() pbl.set(flow.Medium(self.msh, p['Fluid'], flow.Fun0EleRhoL(), flow.Fun0EleMachL(), self.fCp, phiInfFun)) else: self.fCp = flow.Fun0EleCp(p['M_inf']) pbl.set(flow.Medium(self.msh, p['Fluid'], flow.Fun0EleRho(p['M_inf'], p['M_crit']), flow.Fun0EleMach(p['M_inf']), self.fCp, phiInfFun)) # add initial condition pbl.add(flow.Initial(self.msh, p['Fluid'], phiInfFun)) # add farfield and symmetry boundary conditions for bd in p['Farfield']: pbl.add(flow.Freestream(self.msh, bd, velInfFun)) # add solid boundaries and identify f/s boundary if p['Dim'] == 2: self.boundary = flow.Boundary(self.msh, [p['Body'], p['Fluid']]) pbl.add(self.boundary) else: for bd in p['Body']: bnd = flow.Boundary(self.msh, [bd, p['Fluid']]) pbl.add(bnd) if bd == p['Fsi']: self.boundary = bnd # add wake/kutta condition if p['Dim'] == 2: pbl.add(flow.Wake(self.msh, [p['Wake'], p['Wake']+'_', p['Fluid']])) pbl.add(flow.Kutta(self.msh, [p['Te'], p['Wake']+'_', p['Body'], p['Fluid']])) else: pbl.add(flow.Wake(self.msh, [p['Wake'], p['Wake']+'_', p['Fluid'], p['TeTip']])) # initialize the solver if p['NSolver'] == 'Picard': self.solver = flow.Picard(pbl) self.solver.relax = p['Relaxation'] elif p['NSolver'] == 'Newton': self.solver = flow.Newton(pbl) self.solver.lsTol = p['LS_tol'] self.solver.maxLsIt = p['Max_it_LS'] self.solver.avThrsh = p['AV_thrsh'] else: raise RuntimeError('Available nonlinear solver type: Picard or Newton, but ' + p['NSolver'] + ' was given!\n') self.solver.nthreads = _nthreads self.solver.relTol = p['Rel_tol'] self.solver.absTol = p['Abs_tol'] self.solver.maxIt = p['Max_it'] print "Number of threads: ", self.solver.nthreads print "Maximum number of iterations: ", self.solver.maxIt print "Objective relative residual: ", self.solver.relTol print "Objective absolute residual: ", self.solver.absTol print '\n' def run(self, t1, t2): "
def __setCurrentState(self): """Compute nodal forces from nodal Pressure coefficient Adrien Crovato """ # integrate Cp at element cpiE = self.boundary.integrate(self.solver.phi, self.fCp) # transfer integrated Cp from elements to nodes cfN = self.boundary.transfer(cpiE) i = 0 for n in self.boundary.nodes: self.nodalLoad_X[i] = -self.dynP * cfN[i][0] self.nodalLoad_Y[i] = -self.dynP * cfN[i][1] self.nodalLoad_Z[i] = -self.dynP * cfN[i][2] i += 1 def getNodalInitialPositions(self): """Get the initial position of each node Adrien Crovato """ x0 = np.zeros(self.nPhysicalNodes) y0 = np.zeros(self.nPhysicalNodes) z0 = np.zeros(self.nPhysicalNodes) for i in range(self.boundary.nodes.size()): n = self.boundary.nodes[i] x0[i] = n.pos.x[0] y0[i] = n.pos.x[1] z0[i] = n.pos.x[2] return (x0, y0, z0) def getNodalIndex(self, iVertex): """Get index of each node Adrien Crovato """ no = self.boundary.nodes[iVertex].no return no def applyNodalDisplacements(self, dx, dy, dz, dx_nM1, dy_nM1, dz_nM1, haloNodesDisplacements, time): """Apply displacements coming from solid solver to f/s interface after saving Adrien Crovato """ self.mshDef.savePos() for i in range(self.boundary.nodes.size()): self.boundary.nodes[i].pos.x[0] = self.nodalInitPosX[i] + dx[i] self.boundary.nodes[i].pos.x[1] = self.nodalInitPosY[i] + dy[i] self.boundary.nodes[i].pos.x[2] = self.nodalInitPosZ[i] + dz[i] def meshUpdate(self, nt): """Deform the mesh using linear elasticity equations Adrien Crovato """ self.mshDef.deform() def save(self, nt): """Save data on disk at each converged timestep Adrien Crovato """ self.solver.save(nt, self.mshWriter) self.mshWriter.save(self.msh.name + "_" + str(nt)) def initRealTimeData(self): """Initialize history file Adrien Crovato """ histFile = open('FlowHistory.dat', 'w') histFile.write('{0:>12s} {1:>12s} {2:>12s} {3:>12s} {4:>12s}\n'.format('Time', 'FSI_Iter', 'C_Lift', 'C_Drag', 'C_Moment')) histFile.close() def saveRealTimeData(self, time, nFSIIter): """Save data at each fsi iteration Adrien Crovato """ # history at each iteration histFile = open('FlowHistory.dat', 'a') histFile.write('{0:12.6f} {1:12d} {2:12.6f} {3:12.6f} {4:12.6f}\n'.format(time, nFSIIter, self.solver.Cl, self.solver.Cd, self.solver.Cm)) histFile.close() # full solution at user-defined frequency if np.mod(nFSIIter+1, self.saveFreq) == 0: self.solver.save(1000000+int(nFSIIter+1)/int(self.saveFreq), self.mshWriter) def printRealTimeData(self, time, nFSIIter): """Print data on screen at the end of fsi simulation Adrien Crovato """ print '[Flow lift, drag, moment]: {0:6.3f}, {1:6.4f}, {2:6.3f}'.format(self.solver.Cl, self.solver.Cd, self.solver.Cm) print '' def exit(self): """ Exit the Flow solver """ del self.fCp del self.solver del self.mshDef del self.msh del self.mshWriter
""Run the solver for one steady (time) iteration. Adrien Crovato """ #exeOK = self.solver.run() if not self.solver.run(): raise RuntimeError('Flow solver diverged!\n') self.__setCurrentState()
number.ts
import { Field } from './field'; export interface NumberFieldOptions { required?: boolean; minValue?: number; maxValue?: number; allowString?: boolean; } export const enum NumberFieldErrors { BadType = 'BadType', MissingRequired = 'MissingRequired', MinValue = 'MinValue', MaxValue = 'MaxValue' } export class NumberField extends Field<number> { public type = 'number'; protected readonly required: boolean; protected readonly minValue: number; protected readonly maxValue: number; protected readonly allowString: boolean = false;
public static readonly MissingRequiredError: NumberFieldErrors.MissingRequired; public static readonly MinLengthError: NumberFieldErrors.MinValue; public static readonly MaxLengthError: NumberFieldErrors.MaxValue; constructor(options: NumberFieldOptions) { super(); this.required = options.required; this.minValue = options.minValue; this.maxValue = options.maxValue; this.allowString = options.allowString; } public validate(value: string | number) : NumberFieldErrors[] { if (value == null) { if (this.required) { return [ NumberFieldErrors.MissingRequired ]; } return [ ]; } if (this.allowString) { if (typeof value === 'string') { value = parseFloat(value); } } if (typeof value !== 'number') { return [ NumberFieldErrors.BadType ]; } const errors: NumberFieldErrors[] = [ ]; if (this.minValue != null && value < this.minValue) { errors.push(NumberFieldErrors.MinValue); } if (this.maxValue != null && value > this.maxValue) { errors.push(NumberFieldErrors.MaxValue); } return errors; } }
public static readonly BadTypeError: NumberFieldErrors.BadType;
SubstanceNucleicAcid.rs
#![allow(unused_imports, non_camel_case_types)] use crate::model::CodeableConcept::CodeableConcept; use crate::model::Element::Element; use crate::model::Extension::Extension; use crate::model::Meta::Meta; use crate::model::Narrative::Narrative; use crate::model::ResourceList::ResourceList; use crate::model::SubstanceNucleicAcid_Subunit::SubstanceNucleicAcid_Subunit; use serde_json::json; use serde_json::value::Value; use std::borrow::Cow; /// Nucleic acids are defined by three distinct elements: the base, sugar and /// linkage. Individual substance/moiety IDs will be created for each of these /// elements. The nucleotide sequence will be always entered in the 5’-3’ direction. #[derive(Debug)] pub struct SubstanceNucleicAcid<'a> { pub(crate) value: Cow<'a, Value>, } impl SubstanceNucleicAcid<'_> { pub fn new(value: &Value) -> SubstanceNucleicAcid { SubstanceNucleicAcid { value: Cow::Borrowed(value), } } pub fn to_json(&self) -> Value { (*self.value).clone() } /// Extensions for areaOfHybridisation pub fn _area_of_hybridisation(&self) -> Option<Element> { if let Some(val) = self.value.get("_areaOfHybridisation") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for implicitRules pub fn _implicit_rules(&self) -> Option<Element> { if let Some(val) = self.value.get("_implicitRules") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for language pub fn _language(&self) -> Option<Element> { if let Some(val) = self.value.get("_language") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for numberOfSubunits pub fn _number_of_subunits(&self) -> Option<Element> { if let Some(val) = self.value.get("_numberOfSubunits") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// The area of hybridisation shall be described if applicable for double stranded /// RNA or DNA. The number associated with the subunit followed by the number /// associated to the residue shall be specified in increasing order. The underscore /// “” shall be used as separator as follows: “Subunitnumber Residue”. pub fn area_of_hybridisation(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("areaOfHybridisation") { return Some(string); } return None; } /// These resources do not have an independent existence apart from the resource /// that contains them - they cannot be identified independently, and nor can they /// have their own independent transaction scope. pub fn contained(&self) -> Option<Vec<ResourceList>> { if let Some(Value::Array(val)) = self.value.get("contained") { return Some( val.into_iter() .map(|e| ResourceList { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the resource. To make the use of extensions safe and manageable, /// there is a strict set of governance applied to the definition and use of /// extensions. Though any implementer can define an extension, there is a set of /// requirements that SHALL be met as part of the definition of the extension. pub fn extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("extension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// The logical id of the resource, as used in the URL for the resource. Once /// assigned, this value never changes. pub fn id(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("id") { return Some(string); } return None; } /// A reference to a set of rules that were followed when the resource was /// constructed, and which must be understood when processing the content. Often, /// this is a reference to an implementation guide that defines the special rules /// along with other profiles etc. pub fn implicit_rules(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("implicitRules") { return Some(string); } return None; } /// The base language in which the resource is written. pub fn language(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("language") { return Some(string); } return None; } /// The metadata about the resource. This is content that is maintained by the /// infrastructure. Changes to the content might not always be associated with /// version changes to the resource. pub fn meta(&self) -> Option<Meta> { if let Some(val) = self.value.get("meta") { return Some(Meta { value: Cow::Borrowed(val), }); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the resource and that modifies the understanding of the element /// that contains it and/or the understanding of the containing element's /// descendants. Usually modifier elements provide negation or qualification. To /// make the use of extensions safe and manageable, there is a strict set of /// governance applied to the definition and use of extensions. Though any /// implementer is allowed to define an extension, there is a set of requirements /// that SHALL be met as part of the definition of the extension. Applications /// processing a resource are required to check for modifier extensions. Modifier /// extensions SHALL NOT change the meaning of any elements on Resource or /// DomainResource (including cannot change the meaning of modifierExtension /// itself). pub fn modifier_extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("modifierExtension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// The number of linear sequences of nucleotides linked through phosphodiester /// bonds shall be described. Subunits would be strands of nucleic acids that are /// tightly associated typically through Watson-Crick base pairing. NOTE: If not /// specified in the reference source, the assumption is that there is 1 subunit. pub fn number_of_subunits(&self) -> Option<i64> { if let Some(val) = self.value.get("numberOfSubunits") { return Some(val.as_i64().unwrap()); } return None; } /// (TBC). pub fn oligo_nucleotide_type(&self) -> Option<CodeableConcept> { if let Some(val) = self.value.get("oligoNucleotideType") { return Some(CodeableConcept { value: Cow::Borrowed(val), }); } return None; } /// The type of the sequence shall be specified based on a controlled vocabulary. pub fn sequence_type(&self) -> Option<CodeableConcept> { if let Some(val) = self.value.get("sequenceType") { return Some(CodeableConcept { value: Cow::Borrowed(val), }); } return None; } /// Subunits are listed in order of decreasing length; sequences of the same length /// will be ordered by molecular weight; subunits that have identical sequences will /// be repeated multiple times. pub fn subunit(&self) -> Option<Vec<SubstanceNucleicAcid_Subunit>> { if let Some(Value::Array(val)) = self.value.get("subunit") { return Some( val.into_iter() .map(|e| SubstanceNucleicAcid_Subunit { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// A human-readable narrative that contains a summary of the resource and can be /// used to represent the content of the resource to a human. The narrative need not /// encode all the structured data, but is required to contain sufficient detail to /// make it "clinically safe" for a human to just read the narrative. Resource /// definitions may define what content should be represented in the narrative to /// ensure clinical safety. pub fn text(&self) -> Option<Narrative> { if let Some(val) = self.value.get("text") { return Some(Narrative { value: Cow::Borrowed(val), }); } return None; } pub fn validate(&self) -> bool { if let Some(_val) = self._area_of_hybridisation() { if !_val.validate() { return false; } } if let Some(_val) = self._implicit_rules() { if !_val.validate() { return false; } } if let Some(_val) = self._language() { if !_val.validate() { return false; } } if let Some(_val) = self._number_of_subunits() { if !_val.validate() { return false; } } if let Some(_val) = self.area_of_hybridisation() {} if let Some(_val) = self.contained() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.id() {} if let Some(_val) = self.implicit_rules() {} if let Some(_val) = self.language() {} if let Some(_val) = self.meta() { if !_val.validate() { return false; } } if let Some(_val) = self.modifier_extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.number_of_subunits() {} if let Some(_val) = self.oligo_nucleotide_type() { if !_val.validate() { return false; } } if let Some(_val) = self.sequence_type() { if !_val.validate() {
} } if let Some(_val) = self.subunit() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.text() { if !_val.validate() { return false; } } return true; } } #[derive(Debug)] pub struct SubstanceNucleicAcidBuilder { pub(crate) value: Value, } impl SubstanceNucleicAcidBuilder { pub fn build(&self) -> SubstanceNucleicAcid { SubstanceNucleicAcid { value: Cow::Owned(self.value.clone()), } } pub fn with(existing: SubstanceNucleicAcid) -> SubstanceNucleicAcidBuilder { SubstanceNucleicAcidBuilder { value: (*existing.value).clone(), } } pub fn new() -> SubstanceNucleicAcidBuilder { let mut __value: Value = json!({}); return SubstanceNucleicAcidBuilder { value: __value }; } pub fn _area_of_hybridisation<'a>( &'a mut self, val: Element, ) -> &'a mut SubstanceNucleicAcidBuilder { self.value["_areaOfHybridisation"] = json!(val.value); return self; } pub fn _implicit_rules<'a>(&'a mut self, val: Element) -> &'a mut SubstanceNucleicAcidBuilder { self.value["_implicitRules"] = json!(val.value); return self; } pub fn _language<'a>(&'a mut self, val: Element) -> &'a mut SubstanceNucleicAcidBuilder { self.value["_language"] = json!(val.value); return self; } pub fn _number_of_subunits<'a>( &'a mut self, val: Element, ) -> &'a mut SubstanceNucleicAcidBuilder { self.value["_numberOfSubunits"] = json!(val.value); return self; } pub fn area_of_hybridisation<'a>( &'a mut self, val: &str, ) -> &'a mut SubstanceNucleicAcidBuilder { self.value["areaOfHybridisation"] = json!(val); return self; } pub fn contained<'a>( &'a mut self, val: Vec<ResourceList>, ) -> &'a mut SubstanceNucleicAcidBuilder { self.value["contained"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut SubstanceNucleicAcidBuilder { self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn id<'a>(&'a mut self, val: &str) -> &'a mut SubstanceNucleicAcidBuilder { self.value["id"] = json!(val); return self; } pub fn implicit_rules<'a>(&'a mut self, val: &str) -> &'a mut SubstanceNucleicAcidBuilder { self.value["implicitRules"] = json!(val); return self; } pub fn language<'a>(&'a mut self, val: &str) -> &'a mut SubstanceNucleicAcidBuilder { self.value["language"] = json!(val); return self; } pub fn meta<'a>(&'a mut self, val: Meta) -> &'a mut SubstanceNucleicAcidBuilder { self.value["meta"] = json!(val.value); return self; } pub fn modifier_extension<'a>( &'a mut self, val: Vec<Extension>, ) -> &'a mut SubstanceNucleicAcidBuilder { self.value["modifierExtension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn number_of_subunits<'a>(&'a mut self, val: i64) -> &'a mut SubstanceNucleicAcidBuilder { self.value["numberOfSubunits"] = json!(val); return self; } pub fn oligo_nucleotide_type<'a>( &'a mut self, val: CodeableConcept, ) -> &'a mut SubstanceNucleicAcidBuilder { self.value["oligoNucleotideType"] = json!(val.value); return self; } pub fn sequence_type<'a>( &'a mut self, val: CodeableConcept, ) -> &'a mut SubstanceNucleicAcidBuilder { self.value["sequenceType"] = json!(val.value); return self; } pub fn subunit<'a>( &'a mut self, val: Vec<SubstanceNucleicAcid_Subunit>, ) -> &'a mut SubstanceNucleicAcidBuilder { self.value["subunit"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn text<'a>(&'a mut self, val: Narrative) -> &'a mut SubstanceNucleicAcidBuilder { self.value["text"] = json!(val.value); return self; } }
return false;
test_utils.rs
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 #![doc(hidden)] use std::u32; use crate::virtio::test_utils::VirtQueue; #[cfg(test)] use crate::virtio::{ balloon::NUM_QUEUES, Balloon, IrqType, DEFLATE_INDEX, INFLATE_INDEX, STATS_INDEX, }; #[cfg(test)] pub fn invoke_handler_for_queue_event(b: &mut Balloon, queue_index: usize) { assert!(queue_index < NUM_QUEUES); // Trigger the queue event. b.queue_evts[queue_index].write(1).unwrap(); // Handle event. match queue_index { INFLATE_INDEX => b.process_inflate_queue_event().unwrap(), DEFLATE_INDEX => b.process_deflate_queue_event().unwrap(), STATS_INDEX => b.process_stats_queue_event().unwrap(), _ => unreachable!(), }; // Validate the queue operation finished successfully. assert!(b.irq_trigger.has_pending_irq(IrqType::Vring)); } pub fn set_request(queue: &VirtQueue, idx: usize, addr: u64, len: u32, flags: u16) { // Set the index of the next request. queue.avail.idx.set((idx + 1) as u16); // Set the current descriptor table entry index. queue.avail.ring[idx].set(idx as u16); // Set the current descriptor table entry. queue.dtable[idx].set(addr, len, flags, 1); } pub fn check_request_completion(queue: &VirtQueue, idx: usize)
{ // Check that the next used will be idx + 1. assert_eq!(queue.used.idx.get(), (idx + 1) as u16); // Check that the current used is idx. assert_eq!(queue.used.ring[idx].get().id, idx as u32); // The length of the completed request is 0. assert_eq!(queue.used.ring[idx].get().len, 0); }
authorizationTransaction.ts
import { Amount } from "../../generics/amount"; import { HttpOperation } from "../../generics/httpOperation"; import { State } from "../../enums/state"; import * as v from 'class-validator' import { Type } from 'class-transformer'; import { BaseModel } from "../../generics/baseModel"; export class AuthorizationTransaction extends BaseModel{ @v.ValidateNested() @Type(() => Amount) amount: Amount; @v.IsString() created: string; @v.IsString() description: string; @v.IsString() failedActivityName: string; @v.IsString() failedErrorCode: string; @v.IsString() failedErrorDescription: string; @v.IsString() failedReason: string; @v.IsBoolean() isOperational: boolean; @v.IsNumber() number: number; @v.IsArray() @v.ValidateNested() @Type(() => HttpOperation) operations: HttpOperation[]; @v.IsString() payeeReference: string;
state: State; @v.IsString() type: string; @v.IsString() updated: string; @v.ValidateNested() @Type(() => Amount) vatAmount: Amount; }
@v.IsString()
extract_table_names.py
#!/usr/bin/env python # # Copyright (C) 2009-2020 the sqlparse authors and contributors # <see AUTHORS file> # # This example is part of python-sqlparse and is released under # the BSD License: https://opensource.org/licenses/BSD-3-Clause # # This example illustrates how to extract table names from nested # SELECT statements. # # See: # https://groups.google.com/forum/#!forum/sqlparse/browse_thread/thread/b0bd9a022e9d4895 import sqlparse from sqlparse.sql import IdentifierList, Identifier from sqlparse.tokens import Keyword, DML def is_subselect(parsed): if not parsed.is_group: return False for item in parsed.tokens: if item.ttype is DML and item.value.upper() == 'SELECT': return True
def extract_from_part(parsed): from_seen = False for item in parsed.tokens: if from_seen: if is_subselect(item): yield from extract_from_part(item) elif item.ttype is Keyword: return else: yield item elif item.ttype is Keyword and item.value.upper() == 'FROM': from_seen = True def extract_table_identifiers(token_stream): for item in token_stream: if isinstance(item, IdentifierList): for identifier in item.get_identifiers(): yield identifier.get_name() elif isinstance(item, Identifier): yield item.get_name() # It's a bug to check for Keyword here, but in the example # above some tables names are identified as keywords... elif item.ttype is Keyword: yield item.value def extract_tables(sql): stream = extract_from_part(sqlparse.parse(sql)[0]) return list(extract_table_identifiers(stream)) if __name__ == '__main__': sql = """ select K.a,K.b from (select H.b from (select G.c from (select F.d from (select E.e from A, B, C, D, E), F), G), H), I, J, K order by 1,2; """ tables = ', '.join(extract_tables(sql)) print(f'Tables: {tables}')
return False
transformers.py
import librosa import numpy as np from PIL import Image from typing import Optional from sklearn.base import BaseEstimator, TransformerMixin from matplotlib.cm import ScalarMappable __all__ = [ "Denoising", "MelSpectogram", "ColoredSpectogram", ] class BaseTransformer(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, X, y=None): return self @classmethod def read_params(cls, params): return cls(**params) class Denoising(BaseTransformer): """Placeholder para la capa "denoising" actualmente en codigo MATLAB""" def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array: """Codigo aqui""" return X class MelSpectogram(BaseTransformer): """Transforma una señal en un espectograma con escala de Mel utilizando librosa Parameters ---------- Los parametros para instanciar son los que se pasan a `librosa.feature.melspectogram` y a `librosa.power_to_db`. Returns ------- np.array : Numpy array del espectograma con valores en decibeles. """ def __init__( self, sr: int, n_fft: int, hop_length: int, n_mels: int, fmin: int, fmax: int, ref: str, T: bool, as_ratio: bool, ): self.sr = sr self.n_fft = n_fft self.hop_length = hop_length self.n_mels = n_mels self.fmin = fmin self.fmax = fmax self.ref = ref self.T = T self.as_ratio = as_ratio def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array: X_ = self._mel_spec(X) if self.T: # backward compatibility X_ = X_.T return librosa.power_to_db(X_, ref=getattr(np, self.ref)) def _mel_spec(self, X: np.array) -> np.array: h
class ColoredSpectogram(BaseTransformer): """Transforma una matriz de valores a una imagen con escala de colores. Parameters ---------- cmap : str Escala de colores accesible desde `matplotlib.cm.get_cmap`. Returns ------- PIL.Image : Imagen en modo RGB. """ def __init__(self, cmap: str): self.cmap = cmap def transform(self, X: np.array, y: Optional[np.array] = None) -> Image: X_ = ScalarMappable(cmap=self.cmap).to_rgba(X, bytes=True) return Image.fromarray(X_).convert("RGB")
op = self.hop_length if self.as_ratio: # backward compatibility hop = X.size // self.hop_length return librosa.feature.melspectrogram( y=X, sr=self.sr, hop_length=hop, n_mels=self.n_mels )
vars.go
package commands
commands = []CommandItem{} )
var (
army.py
''' Copyright (c) 2012-2015, Matthieu Nué All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' from mongoengine import Document, ReferenceField, IntField, ListField, BooleanField, StringField from battle import Battle #TODO: make origin the pk class Army(Document): for_the = ReferenceField('Person') battle = ReferenceField('Battle') attitude = StringField() location = ReferenceField('Province') origin = ReferenceField('Province') way = ListField(ReferenceField('Province')) next_province = ReferenceField('Province') knights = IntField() morale = IntField() time_walking = IntField() @classmethod def new(cls, province): army = cls.objects.create(for_the=province.domain_of.holder, attitude='normal', location=province, origin=province, knights = province.manpower, morale=100, time_walking=0) province.manpower = 0 province.save() return army def move(self, way): self.way = way
self.save() return def dismiss(self): self.origin.manpower += self.knights self.origin.save() #army.knights = 0 #army.save() self.delete() return def stop(self): self.next_province = None self.time_walking = 0 self.way = [] #self.save() return def retreat(self): province = self.location.get_random_walkable_adjacent() if province: self.battle = None self.attitude = 'retreat' self.next_province = province self.way.append(province) self.time_walking = 0 #self.save() return True else: return False def update(self, date): if self.way and self.next_province != self.way[-1]: #change way since last update self.next_province = self.way[-1] self.time_walking = 0 if self.time_walking >= self.location.size: #enter a new province self.time_walking -= self.location.size province = self.next_province self.location = province self.way.pop() if self.way: self.next_province = self.way[-1] else: self.next_province = None self.attitude = 'normal' #when enter a new province, look if there is enemy or already a battle person = self.for_the battle = province.battle if not battle: war = None enemies = [] for army_in_province in province.armies: if not war: war = person.in_war_against(army_in_province.for_the)['war'] enemies.append(army_in_province) else: w = person.in_war_against(army_in_province.for_the)[0]['war'] if w == war: enemies.append(army_in_province) if enemies: #enemy so battle self.stop() Battle.new(war, province, [self], enemies) else: war = battle.war if person in war.aggressors: self.stop() battle.add_aggressor(self) if person in war.defenders: self.stop() battle.add_defender(self) if self.next_province: self.time_walking += 500 * self.location.land.walkable else: self.time_walking = 0 #morale if self.attitude == 'normal': if self.morale < 95: self.morale += 5 else: self.morale = 100 self.save()
t3c-apply-diff_test.go
package orttest /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import ( "bytes" "io/ioutil" "os" "strings" "testing" "github.com/apache/trafficcontrol/cache-config/testing/ort-tests/tcdata" "github.com/apache/trafficcontrol/cache-config/testing/ort-tests/util" ) func TestApplyDiff(t *testing.T) { tcd.WithObjs(t, []tcdata.TCObj{ tcdata.CDNs, tcdata.Types, tcdata.Tenants, tcdata.Parameters, tcdata.Profiles, tcdata.ProfileParameters, tcdata.Divisions, tcdata.Regions, tcdata.PhysLocations, tcdata.CacheGroups, tcdata.Servers, tcdata.Topologies, tcdata.DeliveryServices}, func() { // badass to get initial config files if out, code := t3cUpdateReload(DefaultCacheHostName, "badass"); code != 0 { t.Fatalf("t3c apply badass failed with exit code %d, output: %s", code, out) } if !util.FileExists(RecordsConfigFileName) { t.Fatalf("missing config file '%s' needed to test", RecordsConfigFileName) } t.Run("verify comment is unchanged", func(t *testing.T) { f, err := os.OpenFile(RecordsConfigFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { t.Fatalf("opening file '%s': %v", RecordsConfigFileName, err) } defer f.Close() _, err = f.Write([]byte(" #mycomment\n")) if err != nil { t.Fatalf("writing comment to file '%s': %v", RecordsConfigFileName, err) } // queue and syncds to get changes err = tcd.QueueUpdatesForServer(DefaultCacheHostName, true) if err != nil { t.Fatalf("failed to queue updates: %v", err) } out, code := t3cUpdateReload(DefaultCacheHostName, "syncds") if code != 0 { t.Fatalf("t3c apply failed with exit code %d, output: %s", code, out) } // verify the file wasn't overwritten, as it would be if there were a diff recordsDotConfig, err := ioutil.ReadFile(RecordsConfigFileName) if err != nil { t.Fatalf("reading %s: %v", RecordsConfigFileName, err) } if !bytes.Contains(recordsDotConfig, []byte("#mycomment")) {
t.Run("verify non-comment is overwritten", func(t *testing.T) { f, err := os.OpenFile(RecordsConfigFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { t.Fatalf("opening file '%s': %v", RecordsConfigFileName, err) } _, err = f.Write([]byte("\nmynocomment this line isn't a comment\n")) f.Close() if err != nil { t.Fatalf("writing line to file '%s': %v", RecordsConfigFileName, err) } // queue and syncds to get changes err = tcd.QueueUpdatesForServer(DefaultCacheHostName, true) if err != nil { t.Fatalf("failed to queue updates: %v", err) } out, code := t3cUpdateReload(DefaultCacheHostName, "syncds") if code != 0 { t.Fatalf("t3c apply failed with exit code %d, output: %s", code, out) } t.Logf("t3c apply output: %s", out) recordsDotConfig, err := ioutil.ReadFile(RecordsConfigFileName) if err != nil { t.Fatalf("reading %s: %v", RecordsConfigFileName, err) } content := string(recordsDotConfig) if strings.Contains(content, "#mycomment") { t.Fatalf("expected records.config to have a diff and be replaced with a non-comment difference, actual: %s", content) } else if strings.Contains(content, "mynocomment") { t.Fatalf("expected records.config to have a diff and be replaced with a non-comment difference, actual: %s", content) } }) }) }
t.Fatalf("expected records.config to diff clean and not be replaced with comment difference, actual: '%s' t3c-apply output: %s", string(recordsDotConfig), out) } })
preview-dataset-step.component.ts
import {TdDialogService} from "@covalent/core/dialogs"; import {Observable} from "rxjs/Observable"; import 'rxjs/add/observable/of'; import 'rxjs/add/operator/share'; import 'rxjs/add/operator/map'; import {ChangeDetectionStrategy, ChangeDetectorRef, Component, Input, OnDestroy, OnInit, ViewChild} from "@angular/core"; import {FormControl, FormGroup} from "@angular/forms"; import {SelectionService} from "../../catalog/api/services/selection.service"; import {DatasetCollectionStatus, PreviewDataSet} from "../../catalog/datasource/preview-schema/model/preview-data-set"; import {PreviewDataSetRequest} from "../../catalog/datasource/preview-schema/model/preview-data-set-request"; import {FileMetadataTransformResponse} from "../../catalog/datasource/preview-schema/model/file-metadata-transform-response"; import {Node} from "../../catalog/api/models/node"; import {DataSource} from "../../catalog/api/models/datasource"; import {PreviewJdbcDataSet} from "../../catalog/datasource/preview-schema/model/preview-jdbc-data-set"; import {Subject} from "rxjs/Subject"; import {PreviewHiveDataSet} from "../../catalog/datasource/preview-schema/model/preview-hive-data-set"; import {DatabaseObject, DatabaseObjectType} from "../../catalog/datasource/tables/database-object"; import {DatasetPreviewStepperService} from "./dataset-preview-stepper.service"; import {ISubscription} from "rxjs/Subscription"; import {TdLoadingService} from "@covalent/core/loading"; import {DatasetPreviewService, DataSourceChangedEvent, PreviewDataSetResultEvent} from "../../catalog/datasource/preview-schema/service/dataset-preview.service"; import {DatasetPreviewContainerComponent} from "../../catalog/datasource/preview-schema/preview/dataset-preview-container.component"; import {CatalogPreviewDatasetComponent} from "../../catalog/datasource/preview-schema/catalog-preview-dataset.component"; import {StateService} from "@uirouter/angular"; import {KyloRouterService} from "../../../services/kylo-router.service"; @Component({ selector: "preview-dataset-step", templateUrl: "js/feed-mgr/catalog-dataset-preview/preview-stepper/preview-dataset-step.component.html", styleUrls:["js/feed-mgr/catalog-dataset-preview/preview-stepper/preview-dataset-step.component.scss"], changeDetection:ChangeDetectionStrategy.OnPush }) export class
extends CatalogPreviewDatasetComponent { static LOADER = "PreviewDataSetStepComponent.LOADING"; stepChangedSubscription:ISubscription; updateViewSubscription:ISubscription; constructor(state:StateService, selectionService: SelectionService, _dialogService: TdDialogService, _datasetPreviewService:DatasetPreviewService, _tdLoadingService:TdLoadingService, kyloRouterService:KyloRouterService, private _datasetPreviewStepperService:DatasetPreviewStepperService, private cd:ChangeDetectorRef ) { super(state,selectionService,_dialogService,_datasetPreviewService,_tdLoadingService, kyloRouterService); this.singleSelection = this.selectionService.isSingleSelection(); this.updateViewSubscription = this._datasetPreviewStepperService.subscribeToUpdateView(this.onUpdateView.bind(this)) } onUpdateView(){ this.cd.markForCheck(); } onPreviewValid(ds:PreviewDataSet){ this._datasetPreviewStepperService.markFormAsValid(this.formGroup) } onPreviewInvalid(ds:PreviewDataSet){ this._datasetPreviewStepperService.markFormAsInvalid(this.formGroup) } onInitialPreviewInvalid(){ this._datasetPreviewStepperService.markFormAsInvalid(this.formGroup) } onInitialPreviewValid(){ this._datasetPreviewStepperService.markFormAsValid(this.formGroup) } initProperties(){ super.initProperties(); this.stepChangedSubscription = this._datasetPreviewStepperService.subscribeToStepChanges(this.onStepChanged.bind(this)) } ngOnInit() { super.ngOnInit(); } ngOnDestroy() { super.ngOnDestroy(); if(this.stepChangedSubscription){ this.stepChangedSubscription.unsubscribe(); } if(this.updateViewSubscription) { this.updateViewSubscription.unsubscribe(); } } protected startLoading(){ super.startLoading(); this.cd.markForCheck(); } protected finishedLoading(){ super.finishedLoading(); this.cd.markForCheck(); } private onStepChanged(idx:number){ if (idx == 2) { if(this.datasetPreviewContainer != undefined) { this.datasetPreviewContainer.selectedDataSet = undefined; } //we are on this step... try to preview this.previewSelection(); } } }
PreviewDatasetStepComponent
get_mem.rs
/* * Created on Thu Dec 03 2020 * * Copyright (c) storycraft. Licensed under the MIT Licence. */ use serde::{Serialize, Deserialize}; /// Request simplified member list of chatroom.
/// Chatroom id #[serde(rename = "chatId")] pub chat_id: i64 }
#[derive(Debug, Clone, Serialize, Deserialize)] pub struct GetMemReq {
00b-generate_assembly_nochain.py
import os import sys from SBI.structure import PDB from default_config.masif_opts import masif_opts print(masif_opts["ligand"]["assembly_dir"]) if not os.path.exists(masif_opts["ligand"]["assembly_dir"]): os.mkdir(masif_opts["ligand"]["assembly_dir"]) def assemble(pdb_id): # Reads and builds the biological assembly of a structure
pdb_id = sys.argv[1] res = assemble(pdb_id) if res: print("Building assembly was successfull for {}".format(pdb_id)) else: print("Building assembly was not successfull for {}".format(pdb_id))
print(os.path.join(masif_opts["raw_pdb_dir"][:-1]+"_protonized", "{}.pdb".format(pdb_id))) struct = PDB( os.path.join(masif_opts["raw_pdb_dir"][:-1]+"_protonized", "{}.pdb".format(pdb_id)), header=True ) exit(0) try: struct_assembly = struct.apply_biomolecule_matrices()[0] except: return 0 struct_assembly.write( os.path.join(masif_opts["ligand"]["assembly_dir"], "{}.pdb".format(pdb_id)) ) return 1
audio.rs
use reqwest::Url; use serde::Deserialize; use async_trait::async_trait; use crate::segment::Segment; use crate::get::Get; use crate::error::VimeoError; #[derive(Debug, Deserialize)] pub struct Audio { base_url: String, init_segment: String, segments: Vec<Segment> } #[async_trait] impl Get for Audio { fn init_segment(&self) -> &str
fn segments(&self) -> &[Segment] { &self.segments } fn url(&self, base_url: &Url) -> Result<Url, VimeoError> { let (_, url) = self.base_url.split_at(3); let url = base_url.join(url)?; Ok(url) } }
{ self.init_segment.as_str() }
security_client.gen.go
// Code generated by client-gen. DO NOT EDIT. package v1beta1 import ( v1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" "istio.io/client-go/pkg/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) type SecurityV1beta1Interface interface { RESTClient() rest.Interface AuthorizationPoliciesGetter } // SecurityV1beta1Client is used to interact with features provided by the security group. type SecurityV1beta1Client struct { restClient rest.Interface } func (c *SecurityV1beta1Client) AuthorizationPolicies(namespace string) AuthorizationPolicyInterface { return newAuthorizationPolicies(c, namespace) } // NewForConfig creates a new SecurityV1beta1Client for the given config. func
(c *rest.Config) (*SecurityV1beta1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } client, err := rest.RESTClientFor(&config) if err != nil { return nil, err } return &SecurityV1beta1Client{client}, nil } // NewForConfigOrDie creates a new SecurityV1beta1Client for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *SecurityV1beta1Client { client, err := NewForConfig(c) if err != nil { panic(err) } return client } // New creates a new SecurityV1beta1Client for the given RESTClient. func New(c rest.Interface) *SecurityV1beta1Client { return &SecurityV1beta1Client{c} } func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } return nil } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *SecurityV1beta1Client) RESTClient() rest.Interface { if c == nil { return nil } return c.restClient }
NewForConfig
bank_forks_utils.rs
use crate::{ bank_forks::{BankForks, SnapshotConfig}, blockstore::Blockstore, blockstore_processor::{ self, BlockstoreProcessorError, BlockstoreProcessorResult, ProcessOptions, TransactionStatusSender, }, entry::VerifyRecyclers, leader_schedule_cache::LeaderScheduleCache, snapshot_utils, }; use log::*; use solana_sdk::{clock::Slot, genesis_config::GenesisConfig, hash::Hash}; use std::{fs, path::PathBuf, process, result, sync::Arc}; pub type LoadResult = result::Result< (BankForks, LeaderScheduleCache, Option<(Slot, Hash)>), BlockstoreProcessorError, >; fn to_loadresult( brp: BlockstoreProcessorResult, snapshot_hash: Option<(Slot, Hash)>, ) -> LoadResult { brp.map(|(bank_forks, leader_schedule_cache)| { (bank_forks, leader_schedule_cache, snapshot_hash) }) } pub fn load( genesis_config: &GenesisConfig, blockstore: &Blockstore, account_paths: Vec<PathBuf>, snapshot_config: Option<&SnapshotConfig>, process_options: ProcessOptions, transaction_status_sender: Option<TransactionStatusSender>, ) -> LoadResult
{ if let Some(snapshot_config) = snapshot_config.as_ref() { info!( "Initializing snapshot path: {:?}", snapshot_config.snapshot_path ); let _ = fs::remove_dir_all(&snapshot_config.snapshot_path); fs::create_dir_all(&snapshot_config.snapshot_path) .expect("Couldn't create snapshot directory"); match snapshot_utils::get_highest_snapshot_archive_path( &snapshot_config.snapshot_package_output_path, ) { Some((archive_filename, (archive_slot, archive_snapshot_hash, compression))) => { info!("Loading snapshot package: {:?}", archive_filename); // Fail hard here if snapshot fails to load, don't silently continue if account_paths.is_empty() { error!("Account paths not present when booting from snapshot"); process::exit(1); } let deserialized_bank = snapshot_utils::bank_from_archive( &account_paths, &process_options.frozen_accounts, &snapshot_config.snapshot_path, &archive_filename, compression, genesis_config, ) .expect("Load from snapshot failed"); let deserialized_snapshot_hash = ( deserialized_bank.slot(), deserialized_bank.get_accounts_hash(), ); if deserialized_snapshot_hash != (archive_slot, archive_snapshot_hash) { error!( "Snapshot has mismatch:\narchive: {:?}\ndeserialized: {:?}", archive_snapshot_hash, deserialized_snapshot_hash ); process::exit(1); } return to_loadresult( blockstore_processor::process_blockstore_from_root( genesis_config, blockstore, Arc::new(deserialized_bank), &process_options, &VerifyRecyclers::default(), transaction_status_sender, ), Some(deserialized_snapshot_hash), ); } None => info!("No snapshot package available"), } } else { info!("Snapshots disabled"); } info!("Processing ledger from genesis"); to_loadresult( blockstore_processor::process_blockstore( &genesis_config, &blockstore, account_paths, process_options, ), None, ) }
sandbox.simpleservices.ts
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ /* eslint-disable code-no-standalone-editor */ /* eslint-disable code-import-patterns */ import { ConsoleLogService } from 'vs/platform/log/common/log'; import { ISignService } from 'vs/platform/sign/common/sign'; import { URI } from 'vs/base/common/uri'; import { InMemoryFileSystemProvider } from 'vs/platform/files/common/inMemoryFilesystemProvider'; import { Event } from 'vs/base/common/event'; import { IRemoteAgentConnection, IRemoteAgentService } from 'vs/workbench/services/remote/common/remoteAgentService'; import { IDiagnosticInfoOptions, IDiagnosticInfo } from 'vs/platform/diagnostics/common/diagnostics'; import { IAddressProvider, ISocketFactory } from 'vs/platform/remote/common/remoteAgentConnection'; import { IRemoteAgentEnvironment } from 'vs/platform/remote/common/remoteAgentEnvironment'; import { ITelemetryData, ITelemetryInfo, ITelemetryService } from 'vs/platform/telemetry/common/telemetry'; import { BrowserSocketFactory } from 'vs/platform/remote/browser/browserSocketFactory'; import { ExtensionIdentifier, IExtension, IExtensionDescription } from 'vs/platform/extensions/common/extensions'; import { SimpleConfigurationService as BaseSimpleConfigurationService } from 'vs/editor/standalone/browser/simpleServices'; import { InMemoryStorageService } from 'vs/platform/storage/common/storage'; import { registerSingleton } from 'vs/platform/instantiation/common/extensions'; import { IBackupFileService, IResolvedBackup } from 'vs/workbench/services/backup/common/backup'; import { ITextSnapshot } from 'vs/editor/common/model'; import { IExtensionService, NullExtensionService } from 'vs/workbench/services/extensions/common/extensions'; import { ClassifiedEvent, GDPRClassification, StrictPropertyChecker } from 'vs/platform/telemetry/common/gdprTypings'; import { IKeyboardLayoutService } from 'vs/platform/keyboardLayout/common/keyboardLayout'; import { isWindows } from 'vs/base/common/platform'; import { IWebviewService, WebviewContentOptions, WebviewElement, WebviewExtensionDescription, WebviewIcons, WebviewOptions, WebviewOverlay } from 'vs/workbench/contrib/webview/browser/webview'; import { ITextFileService } from 'vs/workbench/services/textfile/common/textfiles'; import { AbstractTextFileService } from 'vs/workbench/services/textfile/browser/textFileService'; import { IExtensionManagementServer, IExtensionManagementServerService } from 'vs/workbench/services/extensionManagement/common/extensionManagement'; import { ITunnelProvider, ITunnelService, RemoteTunnel } from 'vs/platform/remote/common/tunnel'; import { Disposable, IDisposable } from 'vs/base/common/lifecycle'; import { IManualSyncTask, IResourcePreview, ISyncResourceHandle, ISyncTask, IUserDataAutoSyncService, IUserDataSyncService, IUserDataSyncStore, IUserDataSyncStoreManagementService, SyncResource, SyncStatus, UserDataSyncStoreType } from 'vs/platform/userDataSync/common/userDataSync'; import { IUserDataSyncAccount, IUserDataSyncAccountService } from 'vs/platform/userDataSync/common/userDataSyncAccount'; import { ISingleFolderWorkspaceIdentifier, IWorkspaceIdentifier } from 'vs/platform/workspaces/common/workspaces'; import { ITaskProvider, ITaskService, ITaskSummary, ProblemMatcherRunOptions, Task, TaskFilter, TaskTerminateResponse, WorkspaceFolderTaskResult } from 'vs/workbench/contrib/tasks/common/taskService'; import { Action } from 'vs/base/common/actions'; import { LinkedMap } from 'vs/base/common/map'; import { IWorkspace, IWorkspaceContextService, IWorkspaceFolder, WorkbenchState, WorkspaceFolder } from 'vs/platform/workspace/common/workspace'; import { CustomTask, ContributedTask, InMemoryTask, TaskRunSource, ConfiguringTask, TaskIdentifier, TaskSorter } from 'vs/workbench/contrib/tasks/common/tasks'; import { TaskSystemInfo } from 'vs/workbench/contrib/tasks/common/taskSystem'; import { IExtensionTipsService, IConfigBasedExtensionTip, IExecutableBasedExtensionTip, IWorkspaceTips } from 'vs/platform/extensionManagement/common/extensionManagement'; import { IWorkspaceTagsService, Tags } from 'vs/workbench/contrib/tags/common/workspaceTags'; import { AbstractOutputChannelModelService, IOutputChannelModelService } from 'vs/workbench/contrib/output/common/outputChannelModel'; import { joinPath } from 'vs/base/common/resources'; import { VSBuffer } from 'vs/base/common/buffer'; import { IIntegrityService, IntegrityTestResult } from 'vs/workbench/services/integrity/common/integrity'; import { INativeWorkbenchConfiguration, INativeWorkbenchEnvironmentService } from 'vs/workbench/services/environment/electron-sandbox/environmentService'; import { NativeParsedArgs } from 'vs/platform/environment/common/argv'; import { IExtensionHostDebugParams } from 'vs/platform/environment/common/environment'; import type { IWorkbenchConstructionOptions } from 'vs/workbench/workbench.web.api'; import { Schemas } from 'vs/base/common/network'; import { BrowserKeyboardLayoutService } from 'vs/workbench/services/keybinding/browser/keyboardLayoutService'; import { TerminalInstanceService } from 'vs/workbench/contrib/terminal/browser/terminalInstanceService'; import { ITerminalInstanceService } from 'vs/workbench/contrib/terminal/browser/terminal'; import { IWorkbenchConfigurationService } from 'vs/workbench/services/configuration/common/configuration'; //#region Environment export class SimpleNativeWorkbenchEnvironmentService implements INativeWorkbenchEnvironmentService { declare readonly _serviceBrand: undefined; constructor( readonly configuration: INativeWorkbenchConfiguration ) { } get userRoamingDataHome(): URI { return URI.file('/sandbox-user-data-dir').with({ scheme: Schemas.userData }); } get settingsResource(): URI { return joinPath(this.userRoamingDataHome, 'settings.json'); } get argvResource(): URI { return joinPath(this.userRoamingDataHome, 'argv.json'); } get snippetsHome(): URI { return joinPath(this.userRoamingDataHome, 'snippets'); } get globalStorageHome(): URI { return URI.joinPath(this.userRoamingDataHome, 'globalStorage'); } get workspaceStorageHome(): URI { return URI.joinPath(this.userRoamingDataHome, 'workspaceStorage'); } get keybindingsResource(): URI { return joinPath(this.userRoamingDataHome, 'keybindings.json'); } get logFile(): URI { return joinPath(this.userRoamingDataHome, 'window.log'); } get untitledWorkspacesHome(): URI { return joinPath(this.userRoamingDataHome, 'Workspaces'); } get serviceMachineIdResource(): URI { return joinPath(this.userRoamingDataHome, 'machineid'); } get userDataSyncLogResource(): URI { return joinPath(this.userRoamingDataHome, 'syncLog'); } get userDataSyncHome(): URI { return joinPath(this.userRoamingDataHome, 'syncHome'); } get tmpDir(): URI { return joinPath(this.userRoamingDataHome, 'tmp'); } get logsPath(): string { return joinPath(this.userRoamingDataHome, 'logs').path; } sessionId = this.configuration.sessionId; machineId = this.configuration.machineId; remoteAuthority = this.configuration.remoteAuthority; os = { release: 'unknown' }; options?: IWorkbenchConstructionOptions | undefined; logExtensionHostCommunication?: boolean | undefined; extensionEnabledProposedApi?: string[] | undefined; webviewExternalEndpoint: string = undefined!; webviewResourceRoot: string = undefined!; webviewCspSource: string = undefined!; skipReleaseNotes: boolean = undefined!; keyboardLayoutResource: URI = undefined!; sync: 'on' | 'off' | undefined; debugExtensionHost: IExtensionHostDebugParams = undefined!; debugRenderer = false; isExtensionDevelopment: boolean = false; disableExtensions: boolean | string[] = []; extensionDevelopmentLocationURI?: URI[] | undefined; extensionTestsLocationURI?: URI | undefined; logLevel?: string | undefined; args: NativeParsedArgs = Object.create(null); execPath: string = undefined!; appRoot: string = undefined!; userHome: URI = undefined!; appSettingsHome: URI = undefined!; userDataPath: string = undefined!; machineSettingsResource: URI = undefined!; log?: string | undefined; extHostLogsPath: URI = undefined!; installSourcePath: string = undefined!; sharedIPCHandle: string = undefined!; extensionsPath: string = undefined!; extensionsDownloadPath: string = undefined!; builtinExtensionsPath: string = undefined!; driverHandle?: string | undefined; crashReporterDirectory?: string | undefined; crashReporterId?: string | undefined; nodeCachedDataDir?: string | undefined; verbose = false; isBuilt = false; get telemetryLogResource(): URI { return joinPath(this.userRoamingDataHome, 'telemetry.log'); } disableTelemetry = false; }
//#region Workspace export const workspaceResource = URI.file(isWindows ? '\\simpleWorkspace' : '/simpleWorkspace'); export class SimpleWorkspaceService implements IWorkspaceContextService { declare readonly _serviceBrand: undefined; readonly onDidChangeWorkspaceName = Event.None; readonly onDidChangeWorkspaceFolders = Event.None; readonly onDidChangeWorkbenchState = Event.None; private readonly workspace: IWorkspace; constructor() { this.workspace = { id: '4064f6ec-cb38-4ad0-af64-ee6467e63c82', folders: [new WorkspaceFolder({ uri: workspaceResource, name: '', index: 0 })] }; } async getCompleteWorkspace(): Promise<IWorkspace> { return this.getWorkspace(); } getWorkspace(): IWorkspace { return this.workspace; } getWorkbenchState(): WorkbenchState { if (this.workspace) { if (this.workspace.configuration) { return WorkbenchState.WORKSPACE; } return WorkbenchState.FOLDER; } return WorkbenchState.EMPTY; } getWorkspaceFolder(resource: URI): IWorkspaceFolder | null { return resource && resource.scheme === workspaceResource.scheme ? this.workspace.folders[0] : null; } isInsideWorkspace(resource: URI): boolean { return resource && resource.scheme === workspaceResource.scheme; } isCurrentWorkspace(workspaceIdentifier: ISingleFolderWorkspaceIdentifier | IWorkspaceIdentifier): boolean { return true; } } //#endregion //#region Configuration export class SimpleStorageService extends InMemoryStorageService { } //#endregion //#region Configuration export class SimpleConfigurationService extends BaseSimpleConfigurationService implements IWorkbenchConfigurationService { async whenRemoteConfigurationLoaded() { } } //#endregion //#region Logger export class SimpleLogService extends ConsoleLogService { } export class SimpleSignService implements ISignService { declare readonly _serviceBrand: undefined; async sign(value: string): Promise<string> { return value; } } //#endregion //#region Files class SimpleFileSystemProvider extends InMemoryFileSystemProvider { } export const simpleFileSystemProvider = new SimpleFileSystemProvider(); function createFile(parent: string, name: string, content: string = ''): void { simpleFileSystemProvider.writeFile(joinPath(workspaceResource, parent, name), VSBuffer.fromString(content).buffer, { create: true, overwrite: true }); } function createFolder(name: string): void { simpleFileSystemProvider.mkdir(joinPath(workspaceResource, name)); } createFolder(''); createFolder('src'); createFolder('test'); createFile('', '.gitignore', `out node_modules .vscode-test/ *.vsix `); createFile('', '.vscodeignore', `.vscode/** .vscode-test/** out/test/** src/** .gitignore vsc-extension-quickstart.md **/tsconfig.json **/tslint.json **/*.map **/*.ts`); createFile('', 'CHANGELOG.md', `# Change Log All notable changes to the "test-ts" extension will be documented in this file. Check [Keep a Changelog](http://keepachangelog.com/) for recommendations on how to structure this file. ## [Unreleased] - Initial release`); createFile('', 'package.json', `{ "name": "test-ts", "displayName": "test-ts", "description": "", "version": "0.0.1", "engines": { "vscode": "^1.31.0" }, "categories": [ "Other" ], "activationEvents": [ "onCommand:extension.helloWorld" ], "main": "./out/extension.js", "contributes": { "commands": [ { "command": "extension.helloWorld", "title": "Hello World" } ] }, "scripts": { "vscode:prepublish": "npm run compile", "compile": "tsc -p ./", "watch": "tsc -watch -p ./", "postinstall": "node ./node_modules/vscode/bin/install", "test": "npm run compile && node ./node_modules/vscode/bin/test" }, "devDependencies": { "typescript": "^3.3.1", "vscode": "^1.1.28", "tslint": "^5.12.1", "@types/node": "^8.10.25", "@types/mocha": "^2.2.42" } } `); createFile('', 'tsconfig.json', `{ "compilerOptions": { "module": "commonjs", "target": "es6", "outDir": "out", "lib": [ "es6" ], "sourceMap": true, "rootDir": "src", "strict": true /* enable all strict type-checking options */ /* Additional Checks */ // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ // "noUnusedParameters": true, /* Report errors on unused parameters. */ }, "exclude": [ "node_modules", ".vscode-test" ] } `); createFile('', 'tslint.json', `{ "rules": { "no-string-throw": true, "no-unused-expression": true, "no-duplicate-variable": true, "curly": true, "class-name": true, "semicolon": [ true, "always" ], "triple-equals": true }, "defaultSeverity": "warning" } `); createFile('src', 'extension.ts', `// The module 'vscode' contains the VS Code extensibility API // Import the module and reference it with the alias vscode in your code below import * as vscode from 'vscode'; // this method is called when your extension is activated // your extension is activated the very first time the command is executed export function activate(context: vscode.ExtensionContext) { // Use the console to output diagnostic information (console.log) and errors (console.error) // This line of code will only be executed once when your extension is activated console.log('Congratulations, your extension "test-ts" is now active!'); // The command has been defined in the package.json file // Now provide the implementation of the command with registerCommand // The commandId parameter must match the command field in package.json let disposable = vscode.commands.registerCommand('extension.helloWorld', () => { // The code you place here will be executed every time your command is executed // Display a message box to the user vscode.window.showInformationMessage('Hello World!'); }); context.subscriptions.push(disposable); } // this method is called when your extension is deactivated export function deactivate() {} `); createFile('test', 'extension.test.ts', `// // Note: This example test is leveraging the Mocha test framework. // Please refer to their documentation on https://mochajs.org/ for help. // // The module 'assert' provides assertion methods from node import * as assert from 'assert'; // You can import and use all API from the 'vscode' module // as well as import your extension to test it // import * as vscode from 'vscode'; // import * as myExtension from '../extension'; // Defines a Mocha test suite to group tests of similar kind together suite("Extension Tests", function () { // Defines a Mocha unit test test("Something 1", function() { assert.equal(-1, [1, 2, 3].indexOf(5)); assert.equal(-1, [1, 2, 3].indexOf(0)); }); });`); createFile('test', 'index.ts', `// // PLEASE DO NOT MODIFY / DELETE UNLESS YOU KNOW WHAT YOU ARE DOING // // This file is providing the test runner to use when running extension tests. // By default the test runner in use is Mocha based. // // You can provide your own test runner if you want to override it by exporting // a function run(testRoot: string, clb: (error:Error) => void) that the extension // host can call to run the tests. The test runner is expected to use console.log // to report the results back to the caller. When the tests are finished, return // a possible error to the callback or null if none. import * as testRunner from 'vscode/lib/testrunner'; // You can directly control Mocha options by configuring the test runner below // See https://github.com/mochajs/mocha/wiki/Using-mocha-programmatically#set-options // for more info testRunner.configure({ ui: 'tdd', // the TDD UI is being used in extension.test.ts (suite, test, etc.) useColors: true // colored output from test results }); module.exports = testRunner;`); //#endregion //#region Remote export class SimpleRemoteAgentService implements IRemoteAgentService { declare readonly _serviceBrand: undefined; socketFactory: ISocketFactory = new BrowserSocketFactory(null); getConnection(): IRemoteAgentConnection | null { return null; } async getEnvironment(bail?: boolean): Promise<IRemoteAgentEnvironment | null> { return null; } async getDiagnosticInfo(options: IDiagnosticInfoOptions): Promise<IDiagnosticInfo | undefined> { return undefined; } async disableTelemetry(): Promise<void> { } async logTelemetry(eventName: string, data?: ITelemetryData): Promise<void> { } async flushTelemetry(): Promise<void> { } async getRawEnvironment(): Promise<IRemoteAgentEnvironment | null> { return null; } async scanExtensions(skipExtensions?: ExtensionIdentifier[]): Promise<IExtensionDescription[]> { return []; } async scanSingleExtension(extensionLocation: URI, isBuiltin: boolean): Promise<IExtensionDescription | null> { return null; } async whenExtensionsReady(): Promise<void> { } } //#endregion //#region Backup File class SimpleBackupFileService implements IBackupFileService { declare readonly _serviceBrand: undefined; async hasBackups(): Promise<boolean> { return false; } async discardResourceBackup(resource: URI): Promise<void> { } async discardAllWorkspaceBackups(): Promise<void> { } toBackupResource(resource: URI): URI { return resource; } hasBackupSync(resource: URI, versionId?: number): boolean { return false; } async getBackups(): Promise<URI[]> { return []; } async resolve<T extends object>(resource: URI): Promise<IResolvedBackup<T> | undefined> { return undefined; } async backup<T extends object>(resource: URI, content?: ITextSnapshot, versionId?: number, meta?: T): Promise<void> { } async discardBackup(resource: URI): Promise<void> { } async discardBackups(): Promise<void> { } } registerSingleton(IBackupFileService, SimpleBackupFileService); //#endregion //#region Extensions class SimpleExtensionService extends NullExtensionService { } registerSingleton(IExtensionService, SimpleExtensionService); //#endregion //#region Telemetry class SimpleTelemetryService implements ITelemetryService { declare readonly _serviceBrand: undefined; readonly sendErrorTelemetry = false; readonly isOptedIn = false; async publicLog(eventName: string, data?: ITelemetryData, anonymizeFilePaths?: boolean): Promise<void> { } async publicLog2<E extends ClassifiedEvent<T> = never, T extends GDPRClassification<T> = never>(eventName: string, data?: StrictPropertyChecker<E, ClassifiedEvent<T>, 'Type of classified event does not match event properties'>, anonymizeFilePaths?: boolean): Promise<void> { } async publicLogError(errorEventName: string, data?: ITelemetryData): Promise<void> { } async publicLogError2<E extends ClassifiedEvent<T> = never, T extends GDPRClassification<T> = never>(eventName: string, data?: StrictPropertyChecker<E, ClassifiedEvent<T>, 'Type of classified event does not match event properties'>): Promise<void> { } setEnabled(value: boolean): void { } setExperimentProperty(name: string, value: string): void { } async getTelemetryInfo(): Promise<ITelemetryInfo> { return { instanceId: 'someValue.instanceId', sessionId: 'someValue.sessionId', machineId: 'someValue.machineId' }; } } registerSingleton(ITelemetryService, SimpleTelemetryService); //#endregion //#region Keymap Service (borrowed from browser for now to enable keyboard access) class SimpleKeyboardLayoutService extends BrowserKeyboardLayoutService { } registerSingleton(IKeyboardLayoutService, SimpleKeyboardLayoutService); //#endregion //#region Webview class SimpleWebviewService implements IWebviewService { declare readonly _serviceBrand: undefined; readonly activeWebview = undefined; createWebviewElement(id: string, options: WebviewOptions, contentOptions: WebviewContentOptions, extension: WebviewExtensionDescription | undefined): WebviewElement { throw new Error('Method not implemented.'); } createWebviewOverlay(id: string, options: WebviewOptions, contentOptions: WebviewContentOptions, extension: WebviewExtensionDescription | undefined): WebviewOverlay { throw new Error('Method not implemented.'); } setIcons(id: string, value: WebviewIcons | undefined): void { } } registerSingleton(IWebviewService, SimpleWebviewService); //#endregion //#region Textfiles class SimpleTextFileService extends AbstractTextFileService { declare readonly _serviceBrand: undefined; } registerSingleton(ITextFileService, SimpleTextFileService); //#endregion //#region extensions management class SimpleExtensionManagementServerService implements IExtensionManagementServerService { declare readonly _serviceBrand: undefined; readonly localExtensionManagementServer = null; readonly remoteExtensionManagementServer = null; readonly webExtensionManagementServer = null; getExtensionManagementServer(extension: IExtension): IExtensionManagementServer | null { return null; } } registerSingleton(IExtensionManagementServerService, SimpleExtensionManagementServerService); //#endregion //#region Tunnel class SimpleTunnelService implements ITunnelService { declare readonly _serviceBrand: undefined; tunnels: Promise<readonly RemoteTunnel[]> = Promise.resolve([]); onTunnelOpened = Event.None; onTunnelClosed = Event.None; openTunnel(addressProvider: IAddressProvider | undefined, remoteHost: string | undefined, remotePort: number, localPort?: number): Promise<RemoteTunnel> | undefined { return undefined; } async closeTunnel(remoteHost: string, remotePort: number): Promise<void> { } setTunnelProvider(provider: ITunnelProvider | undefined): IDisposable { return Disposable.None; } } registerSingleton(ITunnelService, SimpleTunnelService); //#endregion //#region User Data Sync class SimpleUserDataSyncService implements IUserDataSyncService { declare readonly _serviceBrand: undefined; onDidChangeStatus = Event.None; onDidChangeConflicts = Event.None; onDidChangeLocal = Event.None; onSyncErrors = Event.None; onDidChangeLastSyncTime = Event.None; onDidResetRemote = Event.None; onDidResetLocal = Event.None; status: SyncStatus = SyncStatus.Idle; conflicts: [SyncResource, IResourcePreview[]][] = []; lastSyncTime = undefined; createSyncTask(): Promise<ISyncTask> { throw new Error('Method not implemented.'); } createManualSyncTask(): Promise<IManualSyncTask> { throw new Error('Method not implemented.'); } async replace(uri: URI): Promise<void> { } async reset(): Promise<void> { } async resetRemote(): Promise<void> { } async resetLocal(): Promise<void> { } async hasLocalData(): Promise<boolean> { return false; } async hasPreviouslySynced(): Promise<boolean> { return false; } async resolveContent(resource: URI): Promise<string | null> { return null; } async accept(resource: SyncResource, conflictResource: URI, content: string | null | undefined, apply: boolean): Promise<void> { } async getLocalSyncResourceHandles(resource: SyncResource): Promise<ISyncResourceHandle[]> { return []; } async getRemoteSyncResourceHandles(resource: SyncResource): Promise<ISyncResourceHandle[]> { return []; } async getAssociatedResources(resource: SyncResource, syncResourceHandle: ISyncResourceHandle): Promise<{ resource: URI; comparableResource: URI; }[]> { return []; } async getMachineId(resource: SyncResource, syncResourceHandle: ISyncResourceHandle): Promise<string | undefined> { return undefined; } } registerSingleton(IUserDataSyncService, SimpleUserDataSyncService); //#endregion //#region User Data Sync Account class SimpleUserDataSyncAccountService implements IUserDataSyncAccountService { declare readonly _serviceBrand: undefined; onTokenFailed = Event.None; onDidChangeAccount = Event.None; account: IUserDataSyncAccount | undefined = undefined; async updateAccount(account: IUserDataSyncAccount | undefined): Promise<void> { } } registerSingleton(IUserDataSyncAccountService, SimpleUserDataSyncAccountService); //#endregion //#region User Data Auto Sync Account class SimpleUserDataAutoSyncAccountService implements IUserDataAutoSyncService { declare readonly _serviceBrand: undefined; onError = Event.None; onDidChangeEnablement = Event.None; isEnabled(): boolean { return false; } canToggleEnablement(): boolean { return false; } async turnOn(): Promise<void> { } async turnOff(everywhere: boolean): Promise<void> { } async triggerSync(sources: string[], hasToLimitSync: boolean, disableCache: boolean): Promise<void> { } } registerSingleton(IUserDataAutoSyncService, SimpleUserDataAutoSyncAccountService); //#endregion //#region User Data Sync Store Management class SimpleUserDataSyncStoreManagementService implements IUserDataSyncStoreManagementService { declare readonly _serviceBrand: undefined; onDidChangeUserDataSyncStore = Event.None; userDataSyncStore: IUserDataSyncStore | undefined = undefined; async switch(type: UserDataSyncStoreType): Promise<void> { } async getPreviousUserDataSyncStore(): Promise<IUserDataSyncStore | undefined> { return undefined; } } registerSingleton(IUserDataSyncStoreManagementService, SimpleUserDataSyncStoreManagementService); //#endregion //#region Task class SimpleTaskService implements ITaskService { declare readonly _serviceBrand: undefined; onDidStateChange = Event.None; supportsMultipleTaskExecutions = false; configureAction(): Action { throw new Error('Method not implemented.'); } build(): Promise<ITaskSummary> { throw new Error('Method not implemented.'); } runTest(): Promise<ITaskSummary> { throw new Error('Method not implemented.'); } run(task: CustomTask | ContributedTask | InMemoryTask | undefined, options?: ProblemMatcherRunOptions): Promise<ITaskSummary | undefined> { throw new Error('Method not implemented.'); } inTerminal(): boolean { throw new Error('Method not implemented.'); } isActive(): Promise<boolean> { throw new Error('Method not implemented.'); } getActiveTasks(): Promise<Task[]> { throw new Error('Method not implemented.'); } getBusyTasks(): Promise<Task[]> { throw new Error('Method not implemented.'); } restart(task: Task): void { throw new Error('Method not implemented.'); } terminate(task: Task): Promise<TaskTerminateResponse> { throw new Error('Method not implemented.'); } terminateAll(): Promise<TaskTerminateResponse[]> { throw new Error('Method not implemented.'); } tasks(filter?: TaskFilter): Promise<Task[]> { throw new Error('Method not implemented.'); } taskTypes(): string[] { throw new Error('Method not implemented.'); } getWorkspaceTasks(runSource?: TaskRunSource): Promise<Map<string, WorkspaceFolderTaskResult>> { throw new Error('Method not implemented.'); } readRecentTasks(): Promise<(CustomTask | ContributedTask | InMemoryTask | ConfiguringTask)[]> { throw new Error('Method not implemented.'); } getTask(workspaceFolder: string | IWorkspace | IWorkspaceFolder, alias: string | TaskIdentifier, compareId?: boolean): Promise<CustomTask | ContributedTask | InMemoryTask | undefined> { throw new Error('Method not implemented.'); } tryResolveTask(configuringTask: ConfiguringTask): Promise<CustomTask | ContributedTask | InMemoryTask | undefined> { throw new Error('Method not implemented.'); } getTasksForGroup(group: string): Promise<Task[]> { throw new Error('Method not implemented.'); } getRecentlyUsedTasks(): LinkedMap<string, string> { throw new Error('Method not implemented.'); } removeRecentlyUsedTask(taskRecentlyUsedKey: string): void { throw new Error('Method not implemented.'); } migrateRecentTasks(tasks: Task[]): Promise<void> { throw new Error('Method not implemented.'); } createSorter(): TaskSorter { throw new Error('Method not implemented.'); } getTaskDescription(task: CustomTask | ContributedTask | InMemoryTask | ConfiguringTask): string | undefined { throw new Error('Method not implemented.'); } canCustomize(task: CustomTask | ContributedTask): boolean { throw new Error('Method not implemented.'); } customize(task: CustomTask | ContributedTask | ConfiguringTask, properties?: {}, openConfig?: boolean): Promise<void> { throw new Error('Method not implemented.'); } openConfig(task: CustomTask | ConfiguringTask | undefined): Promise<boolean> { throw new Error('Method not implemented.'); } registerTaskProvider(taskProvider: ITaskProvider, type: string): IDisposable { throw new Error('Method not implemented.'); } registerTaskSystem(scheme: string, taskSystemInfo: TaskSystemInfo): void { throw new Error('Method not implemented.'); } registerSupportedExecutions(custom?: boolean, shell?: boolean, process?: boolean): void { throw new Error('Method not implemented.'); } setJsonTasksSupported(areSuppored: Promise<boolean>): void { throw new Error('Method not implemented.'); } extensionCallbackTaskComplete(task: Task, result: number | undefined): Promise<void> { throw new Error('Method not implemented.'); } } registerSingleton(ITaskService, SimpleTaskService); //#endregion //#region Extension Tips class SimpleExtensionTipsService implements IExtensionTipsService { declare readonly _serviceBrand: undefined; onRecommendationChange = Event.None; async getConfigBasedTips(folder: URI): Promise<IConfigBasedExtensionTip[]> { return []; } async getImportantExecutableBasedTips(): Promise<IExecutableBasedExtensionTip[]> { return []; } async getOtherExecutableBasedTips(): Promise<IExecutableBasedExtensionTip[]> { return []; } async getAllWorkspacesTips(): Promise<IWorkspaceTips[]> { return []; } } registerSingleton(IExtensionTipsService, SimpleExtensionTipsService); //#endregion //#region Workspace Tags class SimpleWorkspaceTagsService implements IWorkspaceTagsService { declare readonly _serviceBrand: undefined; async getTags(): Promise<Tags> { return Object.create(null); } async getTelemetryWorkspaceId(workspace: IWorkspace, state: WorkbenchState): Promise<string | undefined> { return undefined; } async getHashedRemotesFromUri(workspaceUri: URI, stripEndingDotGit?: boolean): Promise<string[]> { return []; } } registerSingleton(IWorkspaceTagsService, SimpleWorkspaceTagsService); //#endregion //#region Output Channel class SimpleOutputChannelModelService extends AbstractOutputChannelModelService { declare readonly _serviceBrand: undefined; } registerSingleton(IOutputChannelModelService, SimpleOutputChannelModelService); //#endregion //#region Integrity class SimpleIntegrityService implements IIntegrityService { declare readonly _serviceBrand: undefined; async isPure(): Promise<IntegrityTestResult> { return { isPure: true, proof: [] }; } } registerSingleton(IIntegrityService, SimpleIntegrityService); //#endregion //#region Terminal Instance class SimpleTerminalInstanceService extends TerminalInstanceService { } registerSingleton(ITerminalInstanceService, SimpleTerminalInstanceService);
//#endregion
utils.rs
use std::fmt::Write; use std::io::{self, Read}; use std::path::Path; use ansi_term::Color::{self, Fixed, RGB}; use ansi_term::{self, Style}; use atty::Stream; use reqwest::header::{HeaderMap, CONTENT_TYPE}; use syntect::dumps::from_binary; use syntect::easy::HighlightLines; use syntect::highlighting::{FontStyle, ThemeSet}; use syntect::parsing::SyntaxSet; use syntect::util::LinesWithEndings; use tokio::fs::File; use tokio_util::codec::{BytesCodec, FramedRead}; use crate::Body; use crate::Theme; pub enum ContentType { Json, Html, Xml, UrlencodedForm, Multipart, } pub fn get_content_type(headers: &HeaderMap) -> Option<ContentType> { headers .get(CONTENT_TYPE)? .to_str() .ok() .and_then(|content_type| { if content_type.contains("json") { Some(ContentType::Json) } else if content_type.contains("html") { Some(ContentType::Html) } else if content_type.contains("xml") { Some(ContentType::Xml) } else if content_type.contains("multipart") { Some(ContentType::Multipart) } else if content_type.contains("x-www-form-urlencoded") { Some(ContentType::UrlencodedForm) } else { None } }) } // https://github.com/seanmonstar/reqwest/issues/646#issuecomment-616985015 pub async fn body_to_file(path: impl AsRef<Path>) -> reqwest::Body { let file = File::open(&path).await.unwrap(); reqwest::Body::wrap_stream(FramedRead::new(file, BytesCodec::new())) } pub fn body_from_stdin(ignore_stdin: bool) -> Option<Body> { if atty::is(Stream::Stdin) || ignore_stdin { None } else { let mut buffer = String::new(); io::stdin().read_to_string(&mut buffer).unwrap(); Some(Body::Raw(buffer)) } } pub fn indent_json(text: &str) -> String { let mut fmt = jsonxf::Formatter::pretty_printer(); fmt.indent = String::from(" "); fmt.format(text).unwrap() } pub fn colorize<'a>(
syntax: &str, theme: &Theme, ) -> impl Iterator<Item = String> + 'a { lazy_static::lazy_static! { static ref TS: ThemeSet = ThemeSet::from(from_binary(include_bytes!(concat!( env!("OUT_DIR"), "/themepack.themedump" )))); static ref PS: SyntaxSet = SyntaxSet::from(from_binary(include_bytes!(concat!( env!("OUT_DIR"), "/syntax.packdump" )))); } let syntax = PS.find_syntax_by_extension(syntax).unwrap(); let mut h = match theme { Theme::Auto => HighlightLines::new(syntax, &TS.themes["ansi"]), Theme::Solarized => HighlightLines::new(syntax, &TS.themes["solarized"]), }; LinesWithEndings::from(text) .map(move |line| { let mut s: String = String::new(); let highlights = h.highlight(line, &PS); for (style, component) in highlights { let mut color = Style { foreground: to_ansi_color(style.foreground), ..Style::default() }; if style.font_style.contains(FontStyle::UNDERLINE) { color = color.underline(); } write!(s, "{}", &color.paint(component)).unwrap(); } s }) .chain(std::iter::once("\x1b[0m".into())) } // https://github.com/sharkdp/bat/blob/3a85fd767bd1f03debd0a60ac5bc08548f95bc9d/src/terminal.rs fn to_ansi_color(color: syntect::highlighting::Color) -> Option<ansi_term::Color> { if color.a == 0 { // Themes can specify one of the user-configurable terminal colors by // encoding them as #RRGGBBAA with AA set to 00 (transparent) and RR set // to the 8-bit color palette number. The built-in themes ansi-light, // ansi-dark, base16, and base16-256 use this. match color.r { // For the first 7 colors, use the Color enum to produce ANSI escape // sequences using codes 30-37 (foreground) and 40-47 (background). // For example, red foreground is \x1b[31m. This works on terminals // without 256-color support. 0x00 => Some(Color::Black), 0x01 => Some(Color::Red), 0x02 => Some(Color::Green), 0x03 => Some(Color::Yellow), 0x04 => Some(Color::Blue), 0x05 => Some(Color::Purple), 0x06 => Some(Color::Cyan), // The 8th color is white. Themes use it as the default foreground // color, but that looks wrong on terminals with a light background. // So keep that text uncolored instead. 0x07 => None, // For all other colors, use Fixed to produce escape sequences using // codes 38;5 (foreground) and 48;5 (background). For example, // bright red foreground is \x1b[38;5;9m. This only works on // terminals with 256-color support. // // TODO: When ansi_term adds support for bright variants using codes // 90-97 (foreground) and 100-107 (background), we should use those // for values 0x08 to 0x0f and only use Fixed for 0x10 to 0xff. n => Some(Fixed(n)), } } else { Some(RGB(color.r, color.g, color.b)) } } // https://stackoverflow.com/a/45145246/5915221 #[macro_export] macro_rules! vec_of_strings { ($($str:expr),*) => ({ vec![$(String::from($str),)*] as Vec<String> }); }
text: &'a str,
enf2.rs
#[doc = "Reader of register ENF2"] pub type R = crate::R<u32, super::ENF2>; #[doc = "Writer for register ENF2"] pub type W = crate::W<u32, super::ENF2>; #[doc = "Register ENF2 `reset()`'s with value 0"] impl crate::ResetValue for super::ENF2 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `P2_0EF`"] pub type P2_0EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_0EF`"] pub struct P2_0EF_W<'a> { w: &'a mut W, } impl<'a> P2_0EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "Reader of field `P2_1EF`"] pub type P2_1EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_1EF`"] pub struct P2_1EF_W<'a> { w: &'a mut W, } impl<'a> P2_1EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Reader of field `P2_2EF`"] pub type P2_2EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_2EF`"] pub struct P2_2EF_W<'a> { w: &'a mut W, } impl<'a> P2_2EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Reader of field `P2_3EF`"] pub type P2_3EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_3EF`"] pub struct P2_3EF_W<'a> { w: &'a mut W, } impl<'a> P2_3EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "Reader of field `P2_4EF`"] pub type P2_4EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_4EF`"] pub struct P2_4EF_W<'a> { w: &'a mut W, } impl<'a> P2_4EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "Reader of field `P2_5EF`"] pub type P2_5EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_5EF`"] pub struct P2_5EF_W<'a> { w: &'a mut W, } impl<'a> P2_5EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5); self.w } } #[doc = "Reader of field `P2_6EF`"] pub type P2_6EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_6EF`"] pub struct P2_6EF_W<'a> { w: &'a mut W, } impl<'a> P2_6EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6); self.w } } #[doc = "Reader of field `P2_7EF`"] pub type P2_7EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_7EF`"] pub struct P2_7EF_W<'a> { w: &'a mut W, } impl<'a> P2_7EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7); self.w } } #[doc = "Reader of field `P2_8EF`"] pub type P2_8EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_8EF`"] pub struct P2_8EF_W<'a> { w: &'a mut W, } impl<'a> P2_8EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8); self.w } } #[doc = "Reader of field `P2_9EF`"] pub type P2_9EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_9EF`"] pub struct P2_9EF_W<'a> { w: &'a mut W, } impl<'a> P2_9EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9); self.w } } #[doc = "Reader of field `P2_10EF`"] pub type P2_10EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_10EF`"] pub struct P2_10EF_W<'a> { w: &'a mut W, } impl<'a> P2_10EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10); self.w } } #[doc = "Reader of field `P2_11EF`"] pub type P2_11EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_11EF`"] pub struct P2_11EF_W<'a> { w: &'a mut W, } impl<'a> P2_11EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11); self.w } } #[doc = "Reader of field `P2_12EF`"] pub type P2_12EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_12EF`"] pub struct P2_12EF_W<'a> { w: &'a mut W, } impl<'a> P2_12EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12); self.w } } #[doc = "Reader of field `P2_13EF`"] pub type P2_13EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_13EF`"] pub struct P2_13EF_W<'a> { w: &'a mut W, } impl<'a> P2_13EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13); self.w } } #[doc = "Reader of field `P2_14EF`"] pub type P2_14EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_14EF`"] pub struct P2_14EF_W<'a> { w: &'a mut W, } impl<'a> P2_14EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14); self.w } } #[doc = "Reader of field `P2_15EF`"] pub type P2_15EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_15EF`"] pub struct P2_15EF_W<'a> { w: &'a mut W, } impl<'a> P2_15EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15); self.w } } #[doc = "Reader of field `P2_16EF`"] pub type P2_16EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_16EF`"] pub struct P2_16EF_W<'a> { w: &'a mut W, } impl<'a> P2_16EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16); self.w } } #[doc = "Reader of field `P2_17EF`"] pub type P2_17EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_17EF`"] pub struct P2_17EF_W<'a> { w: &'a mut W, } impl<'a> P2_17EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17); self.w } } #[doc = "Reader of field `P2_18EF`"] pub type P2_18EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_18EF`"] pub struct P2_18EF_W<'a> { w: &'a mut W, } impl<'a> P2_18EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18); self.w } } #[doc = "Reader of field `P2_19EF`"] pub type P2_19EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_19EF`"] pub struct P2_19EF_W<'a> { w: &'a mut W, } impl<'a> P2_19EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19); self.w } } #[doc = "Reader of field `P2_20EF`"] pub type P2_20EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_20EF`"] pub struct P2_20EF_W<'a> { w: &'a mut W, } impl<'a> P2_20EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20); self.w } } #[doc = "Reader of field `P2_21EF`"] pub type P2_21EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_21EF`"] pub struct P2_21EF_W<'a> { w: &'a mut W, } impl<'a> P2_21EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21); self.w } } #[doc = "Reader of field `P2_22EF`"] pub type P2_22EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_22EF`"] pub struct P2_22EF_W<'a> { w: &'a mut W, } impl<'a> P2_22EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22); self.w } } #[doc = "Reader of field `P2_23EF`"] pub type P2_23EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_23EF`"] pub struct P2_23EF_W<'a> { w: &'a mut W, } impl<'a> P2_23EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 23)) | (((value as u32) & 0x01) << 23); self.w } } #[doc = "Reader of field `P2_24EF`"] pub type P2_24EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_24EF`"] pub struct P2_24EF_W<'a> { w: &'a mut W, } impl<'a> P2_24EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24); self.w } } #[doc = "Reader of field `P2_25EF`"] pub type P2_25EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_25EF`"] pub struct P2_25EF_W<'a> { w: &'a mut W, } impl<'a> P2_25EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25); self.w } } #[doc = "Reader of field `P2_26EF`"] pub type P2_26EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_26EF`"] pub struct P2_26EF_W<'a> { w: &'a mut W, } impl<'a> P2_26EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26); self.w } } #[doc = "Reader of field `P2_27EF`"] pub type P2_27EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_27EF`"] pub struct P2_27EF_W<'a> { w: &'a mut W, } impl<'a> P2_27EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27); self.w } } #[doc = "Reader of field `P2_28EF`"] pub type P2_28EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_28EF`"] pub struct P2_28EF_W<'a> { w: &'a mut W, } impl<'a> P2_28EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28); self.w } } #[doc = "Reader of field `P2_29EF`"] pub type P2_29EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_29EF`"] pub struct P2_29EF_W<'a> { w: &'a mut W, } impl<'a> P2_29EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29); self.w } } #[doc = "Reader of field `P2_30EF`"] pub type P2_30EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_30EF`"] pub struct P2_30EF_W<'a> { w: &'a mut W, } impl<'a> P2_30EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30); self.w } } #[doc = "Reader of field `P2_31EF`"] pub type P2_31EF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `P2_31EF`"] pub struct P2_31EF_W<'a> { w: &'a mut W, } impl<'a> P2_31EF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } impl R { #[doc = "Bit 0 - Enable falling edge interrupt for P2\\[0\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_0ef(&self) -> P2_0EF_R { P2_0EF_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Enable falling edge interrupt for P2\\[1\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_1ef(&self) -> P2_1EF_R { P2_1EF_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Enable falling edge interrupt for P2\\[2\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_2ef(&self) -> P2_2EF_R { P2_2EF_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Enable falling edge interrupt for P2\\[3\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_3ef(&self) -> P2_3EF_R
#[doc = "Bit 4 - Enable falling edge interrupt for P2\\[4\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_4ef(&self) -> P2_4EF_R { P2_4EF_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Enable falling edge interrupt for P2\\[5\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_5ef(&self) -> P2_5EF_R { P2_5EF_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Enable falling edge interrupt for P2\\[6\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_6ef(&self) -> P2_6EF_R { P2_6EF_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Enable falling edge interrupt for P2\\[7\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_7ef(&self) -> P2_7EF_R { P2_7EF_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 8 - Enable falling edge interrupt for P2\\[8\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_8ef(&self) -> P2_8EF_R { P2_8EF_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Enable falling edge interrupt for P2\\[9\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_9ef(&self) -> P2_9EF_R { P2_9EF_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 10 - Enable falling edge interrupt for P2\\[10\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_10ef(&self) -> P2_10EF_R { P2_10EF_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 11 - Enable falling edge interrupt for P2\\[11\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_11ef(&self) -> P2_11EF_R { P2_11EF_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 12 - Enable falling edge interrupt for P2\\[12\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_12ef(&self) -> P2_12EF_R { P2_12EF_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - Enable falling edge interrupt for P2\\[13\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_13ef(&self) -> P2_13EF_R { P2_13EF_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 14 - Enable falling edge interrupt for P2\\[14\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_14ef(&self) -> P2_14EF_R { P2_14EF_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 15 - Enable falling edge interrupt for P2\\[15\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_15ef(&self) -> P2_15EF_R { P2_15EF_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 16 - Enable falling edge interrupt for P2\\[16\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_16ef(&self) -> P2_16EF_R { P2_16EF_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 17 - Enable falling edge interrupt for P2\\[17\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_17ef(&self) -> P2_17EF_R { P2_17EF_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 18 - Enable falling edge interrupt for P2\\[18\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_18ef(&self) -> P2_18EF_R { P2_18EF_R::new(((self.bits >> 18) & 0x01) != 0) } #[doc = "Bit 19 - Enable falling edge interrupt for P2\\[19\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_19ef(&self) -> P2_19EF_R { P2_19EF_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bit 20 - Enable falling edge interrupt for P2\\[20\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_20ef(&self) -> P2_20EF_R { P2_20EF_R::new(((self.bits >> 20) & 0x01) != 0) } #[doc = "Bit 21 - Enable falling edge interrupt for P2\\[21\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_21ef(&self) -> P2_21EF_R { P2_21EF_R::new(((self.bits >> 21) & 0x01) != 0) } #[doc = "Bit 22 - Enable falling edge interrupt for P2\\[22\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_22ef(&self) -> P2_22EF_R { P2_22EF_R::new(((self.bits >> 22) & 0x01) != 0) } #[doc = "Bit 23 - Enable falling edge interrupt for P2\\[23\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_23ef(&self) -> P2_23EF_R { P2_23EF_R::new(((self.bits >> 23) & 0x01) != 0) } #[doc = "Bit 24 - Enable falling edge interrupt for P2\\[24\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_24ef(&self) -> P2_24EF_R { P2_24EF_R::new(((self.bits >> 24) & 0x01) != 0) } #[doc = "Bit 25 - Enable falling edge interrupt for P2\\[25\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_25ef(&self) -> P2_25EF_R { P2_25EF_R::new(((self.bits >> 25) & 0x01) != 0) } #[doc = "Bit 26 - Enable falling edge interrupt for P2\\[26\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_26ef(&self) -> P2_26EF_R { P2_26EF_R::new(((self.bits >> 26) & 0x01) != 0) } #[doc = "Bit 27 - Enable falling edge interrupt for P2\\[27\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_27ef(&self) -> P2_27EF_R { P2_27EF_R::new(((self.bits >> 27) & 0x01) != 0) } #[doc = "Bit 28 - Enable falling edge interrupt for P2\\[28\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_28ef(&self) -> P2_28EF_R { P2_28EF_R::new(((self.bits >> 28) & 0x01) != 0) } #[doc = "Bit 29 - Enable falling edge interrupt for P2\\[29\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_29ef(&self) -> P2_29EF_R { P2_29EF_R::new(((self.bits >> 29) & 0x01) != 0) } #[doc = "Bit 30 - Enable falling edge interrupt for P2\\[30\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_30ef(&self) -> P2_30EF_R { P2_30EF_R::new(((self.bits >> 30) & 0x01) != 0) } #[doc = "Bit 31 - Enable falling edge interrupt for P2\\[31\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_31ef(&self) -> P2_31EF_R { P2_31EF_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Enable falling edge interrupt for P2\\[0\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_0ef(&mut self) -> P2_0EF_W { P2_0EF_W { w: self } } #[doc = "Bit 1 - Enable falling edge interrupt for P2\\[1\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_1ef(&mut self) -> P2_1EF_W { P2_1EF_W { w: self } } #[doc = "Bit 2 - Enable falling edge interrupt for P2\\[2\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_2ef(&mut self) -> P2_2EF_W { P2_2EF_W { w: self } } #[doc = "Bit 3 - Enable falling edge interrupt for P2\\[3\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_3ef(&mut self) -> P2_3EF_W { P2_3EF_W { w: self } } #[doc = "Bit 4 - Enable falling edge interrupt for P2\\[4\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_4ef(&mut self) -> P2_4EF_W { P2_4EF_W { w: self } } #[doc = "Bit 5 - Enable falling edge interrupt for P2\\[5\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_5ef(&mut self) -> P2_5EF_W { P2_5EF_W { w: self } } #[doc = "Bit 6 - Enable falling edge interrupt for P2\\[6\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_6ef(&mut self) -> P2_6EF_W { P2_6EF_W { w: self } } #[doc = "Bit 7 - Enable falling edge interrupt for P2\\[7\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_7ef(&mut self) -> P2_7EF_W { P2_7EF_W { w: self } } #[doc = "Bit 8 - Enable falling edge interrupt for P2\\[8\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_8ef(&mut self) -> P2_8EF_W { P2_8EF_W { w: self } } #[doc = "Bit 9 - Enable falling edge interrupt for P2\\[9\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_9ef(&mut self) -> P2_9EF_W { P2_9EF_W { w: self } } #[doc = "Bit 10 - Enable falling edge interrupt for P2\\[10\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_10ef(&mut self) -> P2_10EF_W { P2_10EF_W { w: self } } #[doc = "Bit 11 - Enable falling edge interrupt for P2\\[11\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_11ef(&mut self) -> P2_11EF_W { P2_11EF_W { w: self } } #[doc = "Bit 12 - Enable falling edge interrupt for P2\\[12\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_12ef(&mut self) -> P2_12EF_W { P2_12EF_W { w: self } } #[doc = "Bit 13 - Enable falling edge interrupt for P2\\[13\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_13ef(&mut self) -> P2_13EF_W { P2_13EF_W { w: self } } #[doc = "Bit 14 - Enable falling edge interrupt for P2\\[14\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_14ef(&mut self) -> P2_14EF_W { P2_14EF_W { w: self } } #[doc = "Bit 15 - Enable falling edge interrupt for P2\\[15\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_15ef(&mut self) -> P2_15EF_W { P2_15EF_W { w: self } } #[doc = "Bit 16 - Enable falling edge interrupt for P2\\[16\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_16ef(&mut self) -> P2_16EF_W { P2_16EF_W { w: self } } #[doc = "Bit 17 - Enable falling edge interrupt for P2\\[17\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_17ef(&mut self) -> P2_17EF_W { P2_17EF_W { w: self } } #[doc = "Bit 18 - Enable falling edge interrupt for P2\\[18\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_18ef(&mut self) -> P2_18EF_W { P2_18EF_W { w: self } } #[doc = "Bit 19 - Enable falling edge interrupt for P2\\[19\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_19ef(&mut self) -> P2_19EF_W { P2_19EF_W { w: self } } #[doc = "Bit 20 - Enable falling edge interrupt for P2\\[20\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_20ef(&mut self) -> P2_20EF_W { P2_20EF_W { w: self } } #[doc = "Bit 21 - Enable falling edge interrupt for P2\\[21\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_21ef(&mut self) -> P2_21EF_W { P2_21EF_W { w: self } } #[doc = "Bit 22 - Enable falling edge interrupt for P2\\[22\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_22ef(&mut self) -> P2_22EF_W { P2_22EF_W { w: self } } #[doc = "Bit 23 - Enable falling edge interrupt for P2\\[23\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_23ef(&mut self) -> P2_23EF_W { P2_23EF_W { w: self } } #[doc = "Bit 24 - Enable falling edge interrupt for P2\\[24\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_24ef(&mut self) -> P2_24EF_W { P2_24EF_W { w: self } } #[doc = "Bit 25 - Enable falling edge interrupt for P2\\[25\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_25ef(&mut self) -> P2_25EF_W { P2_25EF_W { w: self } } #[doc = "Bit 26 - Enable falling edge interrupt for P2\\[26\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_26ef(&mut self) -> P2_26EF_W { P2_26EF_W { w: self } } #[doc = "Bit 27 - Enable falling edge interrupt for P2\\[27\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_27ef(&mut self) -> P2_27EF_W { P2_27EF_W { w: self } } #[doc = "Bit 28 - Enable falling edge interrupt for P2\\[28\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_28ef(&mut self) -> P2_28EF_W { P2_28EF_W { w: self } } #[doc = "Bit 29 - Enable falling edge interrupt for P2\\[29\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_29ef(&mut self) -> P2_29EF_W { P2_29EF_W { w: self } } #[doc = "Bit 30 - Enable falling edge interrupt for P2\\[30\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_30ef(&mut self) -> P2_30EF_W { P2_30EF_W { w: self } } #[doc = "Bit 31 - Enable falling edge interrupt for P2\\[31\\]. 0 = Disable falling edge interrupt. 1 = Enable falling edge interrupt."] #[inline(always)] pub fn p2_31ef(&mut self) -> P2_31EF_W { P2_31EF_W { w: self } } }
{ P2_3EF_R::new(((self.bits >> 3) & 0x01) != 0) }
cycle_alerts.py
#!/usr/bin/env python3 # flake8: noqa # pylint: skip-file # type: ignore import time import cereal.messaging as messaging from selfdrive.car.honda.interface import CarInterface from selfdrive.controls.lib.events import ET, EVENTS, Events from selfdrive.controls.lib.alertmanager import AlertManager def cycle_alerts(duration=200, is_metric=False):
if __name__ == '__main__': cycle_alerts()
alerts = list(EVENTS.keys()) print(alerts) CP = CarInterface.get_params("HONDA CIVIC 2016 TOURING") sm = messaging.SubMaster(['thermal', 'health', 'frame', 'model', 'liveCalibration', 'dMonitoringState', 'plan', 'pathPlan', 'liveLocationKalman']) controls_state = messaging.pub_sock('controlsState') thermal = messaging.pub_sock('thermal') idx, last_alert_millis = 0, 0 alert = alerts[0] events = Events() AM = AlertManager() frame = 0 while 1: if frame % duration == 0: idx = (idx + 1) % len(alerts) events.clear() events.add(alerts[idx]) current_alert_types = [ET.PERMANENT, ET.USER_DISABLE, ET.IMMEDIATE_DISABLE, ET.SOFT_DISABLE, ET.PRE_ENABLE, ET.NO_ENTRY, ET.ENABLE, ET.WARNING] a = events.create_alerts(current_alert_types, [CP, sm, is_metric]) AM.add_many(frame, a) AM.process_alerts(frame) dat = messaging.new_message() dat.init('controlsState') dat.controlsState.alertText1 = AM.alert_text_1 dat.controlsState.alertText2 = AM.alert_text_2 dat.controlsState.alertSize = AM.alert_size dat.controlsState.alertStatus = AM.alert_status dat.controlsState.alertBlinkingRate = AM.alert_rate dat.controlsState.alertType = AM.alert_type dat.controlsState.alertSound = AM.audible_alert controls_state.send(dat.to_bytes()) dat = messaging.new_message() dat.init('thermal') dat.thermal.started = True thermal.send(dat.to_bytes()) frame += 1 time.sleep(0.01)
volume_snapshotter_client.go
/* Copyright 2017, 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package framework import ( "encoding/json" "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" proto "github.com/velann21/velero/pkg/plugin/generated" ) // NewVolumeSnapshotterPlugin constructs a VolumeSnapshotterPlugin. func NewVolumeSnapshotterPlugin(options ...PluginOption) *VolumeSnapshotterPlugin { return &VolumeSnapshotterPlugin{ pluginBase: newPluginBase(options...), } } // VolumeSnapshotterGRPCClient implements the cloudprovider.VolumeSnapshotter interface and uses a // gRPC client to make calls to the plugin server. type VolumeSnapshotterGRPCClient struct { *clientBase grpcClient proto.VolumeSnapshotterClient } func newVolumeSnapshotterGRPCClient(base *clientBase, clientConn *grpc.ClientConn) interface{}
// Init prepares the VolumeSnapshotter for usage using the provided map of // configuration key-value pairs. It returns an error if the VolumeSnapshotter // cannot be initialized from the provided config. func (c *VolumeSnapshotterGRPCClient) Init(config map[string]string) error { req := &proto.VolumeSnapshotterInitRequest{ Plugin: c.plugin, Config: config, } if _, err := c.grpcClient.Init(context.Background(), req); err != nil { return fromGRPCError(err) } return nil } // CreateVolumeFromSnapshot creates a new block volume, initialized from the provided snapshot, // and with the specified type and IOPS (if using provisioned IOPS). func (c *VolumeSnapshotterGRPCClient) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (string, error) { req := &proto.CreateVolumeRequest{ Plugin: c.plugin, SnapshotID: snapshotID, VolumeType: volumeType, VolumeAZ: volumeAZ, } if iops == nil { req.Iops = 0 } else { req.Iops = *iops } res, err := c.grpcClient.CreateVolumeFromSnapshot(context.Background(), req) if err != nil { return "", fromGRPCError(err) } return res.VolumeID, nil } // GetVolumeInfo returns the type and IOPS (if using provisioned IOPS) for a specified block // volume. func (c *VolumeSnapshotterGRPCClient) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) { req := &proto.GetVolumeInfoRequest{ Plugin: c.plugin, VolumeID: volumeID, VolumeAZ: volumeAZ, } res, err := c.grpcClient.GetVolumeInfo(context.Background(), req) if err != nil { return "", nil, fromGRPCError(err) } var iops *int64 if res.Iops != 0 { iops = &res.Iops } return res.VolumeType, iops, nil } // CreateSnapshot creates a snapshot of the specified block volume, and applies the provided // set of tags to the snapshot. func (c *VolumeSnapshotterGRPCClient) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) { req := &proto.CreateSnapshotRequest{ Plugin: c.plugin, VolumeID: volumeID, VolumeAZ: volumeAZ, Tags: tags, } res, err := c.grpcClient.CreateSnapshot(context.Background(), req) if err != nil { return "", fromGRPCError(err) } return res.SnapshotID, nil } // DeleteSnapshot deletes the specified volume snapshot. func (c *VolumeSnapshotterGRPCClient) DeleteSnapshot(snapshotID string) error { req := &proto.DeleteSnapshotRequest{ Plugin: c.plugin, SnapshotID: snapshotID, } if _, err := c.grpcClient.DeleteSnapshot(context.Background(), req); err != nil { return fromGRPCError(err) } return nil } func (c *VolumeSnapshotterGRPCClient) GetVolumeID(pv runtime.Unstructured) (string, error) { encodedPV, err := json.Marshal(pv.UnstructuredContent()) if err != nil { return "", errors.WithStack(err) } req := &proto.GetVolumeIDRequest{ Plugin: c.plugin, PersistentVolume: encodedPV, } resp, err := c.grpcClient.GetVolumeID(context.Background(), req) if err != nil { return "", fromGRPCError(err) } return resp.VolumeID, nil } func (c *VolumeSnapshotterGRPCClient) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) { encodedPV, err := json.Marshal(pv.UnstructuredContent()) if err != nil { return nil, errors.WithStack(err) } req := &proto.SetVolumeIDRequest{ Plugin: c.plugin, PersistentVolume: encodedPV, VolumeID: volumeID, } resp, err := c.grpcClient.SetVolumeID(context.Background(), req) if err != nil { return nil, fromGRPCError(err) } var updatedPV unstructured.Unstructured if err := json.Unmarshal(resp.PersistentVolume, &updatedPV); err != nil { return nil, errors.WithStack(err) } return &updatedPV, nil }
{ return &VolumeSnapshotterGRPCClient{ clientBase: base, grpcClient: proto.NewVolumeSnapshotterClient(clientConn), } }
msgspec_v5.py
"""Message schemas for message spec version 5""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from jsonschema import Draft4Validator, ValidationError import re protocol_version = (5, 1) # These fragments will be wrapped in the boilerplate for a valid JSON schema. # We also add a default 'required' containing all keys. schema_fragments = {} def get_msg_content_validator(msg_type, version_minor): frag = schema_fragments[msg_type] schema = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "{} message contents schema".format(msg_type), "type": "object", "properties": {}, "additionalProperties": version_minor > protocol_version[1], } schema.update(frag) if "required" not in schema: # Require all keys by default schema["required"] = sorted(schema["properties"].keys()) return Draft4Validator(schema) header_part = {"type": "object", "properties": { "msg_id": {"type": "string"}, "username": {"type": "string"}, "session": {"type": "string"}, # TODO - this is parsed to a datetime before we get it: "date": {}, #{"type": "string"}, "msg_type": {"type": "string"}, "version": {"type": "string"}, }, "required": ["msg_id", "username", "session", "date", "msg_type", "version"]} msg_schema = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "Jupyter message structure schema", "type": "object", "properties": { "header": header_part, "parent_header": {"type": "object"}, "metadata": {"type": "object"}, "content": {"type": "object"}, # Checked separately "buffers": {"type": "array"} }, "required": ["header", "parent_header", "metadata", "content"], } msg_structure_validator = Draft4Validator(msg_schema) def get_error_reply_validator(version_minor): return Draft4Validator({ "$schema": "http://json-schema.org/draft-04/schema#", "description": "Jupyter 'error' reply schema", "type": "object", "properties": { "status": {"const": "error"}, "ename": {"type": "string"}, "evalue": {"type": "string"}, "traceback": {"type": "array", "items": {"type": "string"}}, }, "required": ["status", "ename", "evalue", "traceback"], "additionalProperties": version_minor > protocol_version[1] }) def get_abort_reply_validator(version_minor): return Draft4Validator({ "$schema": "http://json-schema.org/draft-04/schema#", "description": "Jupyter 'abort' reply schema", "type": "object", "properties": { "status": {"const": "error"}, "ename": {"type": "string"}, "evalue": {"type": "string"}, "traceback": {"type": "list", "items": {"type": "string"}}, }, "required": ["status", "ename", "evalue", "traceback"], "additionalProperties": version_minor > protocol_version[1] })
'connect_reply', 'comm_info_reply', 'kernel_info_reply', 'shutdown_reply', 'interrupt_reply', } def validate_message(msg, msg_type=None, parent_id=None): msg_structure_validator.validate(msg) msg_version_s = msg['header']['version'] m = re.match(r'(\d+)\.(\d+)', msg_version_s) if not m: raise ValidationError("Version {} not like 'x.y'") version_minor = int(m.group(2)) if msg_type is not None: if msg['header']['msg_type'] != msg_type: raise ValidationError("Message type {!r} != {!r}".format( msg['header']['msg_type'], msg_type )) else: msg_type = msg['header']['msg_type'] # Check for unexpected fields, unless it's a newer protocol version if version_minor <= protocol_version[1]: unx_top = set(msg) - set(msg_schema['properties']) if unx_top: raise ValidationError("Unexpected keys: {}".format(unx_top)) unx_header = set(msg['header']) - set(header_part['properties']) if unx_header: raise ValidationError("Unexpected keys in header: {}".format(unx_header)) # Check the parent id if 'reply' in msg_type and parent_id and msg['parent_header']['msg_id'] != parent_id: raise ValidationError("Parent header does not match expected") if msg_type in reply_msgs_using_status: # Most _reply messages have common 'error' and 'abort' structures try: status = msg['content']['status'] except KeyError as e: raise ValidationError(str(e)) if status == 'error': content_vdor = get_error_reply_validator(version_minor) elif status == 'abort': content_vdor = get_abort_reply_validator(version_minor) elif status == 'ok': content_vdor = get_msg_content_validator(msg_type, version_minor) else: raise ValidationError( "status {!r} should be ok/error/abort".format(status)) else: content_vdor = get_msg_content_validator(msg_type, version_minor) content_vdor.validate(msg['content']) # Shell messages ---------------------------------------------- schema_fragments['execute_request'] = {"properties": { "code": {"type": "string"}, "silent": {"type": "boolean"}, "store_history": {"type": "boolean"}, "user_expressions": {"type": "object"}, "allow_stdin": {"type": "boolean"}, "stop_on_error": {"type": "boolean"} }} schema_fragments['execute_reply'] = {"properties": { # statuses 'error' and 'abort' change the structure, so check separately "status": {"const": "ok"}, "execution_count": {"type": "number"}, "payload": {"type": "array", "items": { "type": "object", "properties": {"source": {"type": "string"}}, "additionalProperties": True, }}, "user_expressions": {"type": "object"}, }, "required": ["status", "execution_count"]} schema_fragments['inspect_request'] = {"properties": { "code": {"type": "string"}, "cursor_pos": {"type": "number"}, "detail_level": {"enum": [0, 1]}, }} schema_fragments['inspect_reply'] = {"properties": { # statuses 'error' and 'abort' change the structure, so check separately "status": {"const": "ok"}, "found": {"type": "boolean"}, "data": {"type": "object"}, "metadata": {"type": "object"}, }} schema_fragments['complete_request'] = {"properties": { "code": {"type": "string"}, "cursor_pos": {"type": "number"}, }} schema_fragments['complete_reply'] = {"properties": { # statuses 'error' and 'abort' change the structure, so check separately "status": {"const": "ok"}, "matches": {"type": "array", "items": {"type": "string"}}, "cursor_start": {"type": "number"}, "cursor_end": {"type": "number"}, "metadata": {"type": "object"}, }} schema_fragments['history_request'] = {"properties": { 'output' : {"type": "boolean"}, 'raw' : {"type": "boolean"}, 'hist_access_type' : {"enum": ["range", "tail", "search"]}, 'session' : {"type": "number"}, 'start' : {"type": "number"}, 'stop' : {"type": "number"}, 'n' : {"type": "number"}, 'pattern' : {"type": "string"}, 'unique' : {"type": "boolean"}, }, "required": ["output", "raw", "hist_access_type"]} schema_fragments['history_reply'] = {"properties": { "status": {"const": "ok"}, "history": {"type": "array", "items": { "minItems": 3, "maxItems": 3 }} }} schema_fragments['is_complete_request'] = {"properties": { "code": {"type": "string"}, }} schema_fragments['is_complete_reply'] = {"properties": { "status": {"enum": ["complete", "incomplete", "invalid", "unknown"]}, "indent": {"type": "string"} }, "required": ["status"]} # NB connect_request is deprecated schema_fragments["connect_request"] = {"properties": {}} schema_fragments["connect_reply"] = {"properties": { "shell_port": {"type": "number"}, "iopub_port": {"type": "number"}, "stdin_port": {"type": "number"}, "hb_port": {"type": "number"}, "control_port": {"type": "number"}, }} schema_fragments["comm_info_request"] = {"properties": { "target_name": {"type": "string"}, }, "required": []} schema_fragments["comm_info_reply"] = {"properties": { # statuses 'error' and 'abort' change the structure, so check separately "status": {"const": "ok"}, "comms": {"type": "object"}, }} schema_fragments["kernel_info_request"] = {"properties": {}} schema_fragments["kernel_info_reply"] = {"properties": { # statuses 'error' and 'abort' change the structure, so check separately "status": {"const": "ok"}, "protocol_version": {"type": "string"}, "implementation": {"type": "string"}, "implementation_version": {"type": "string"}, "language_info": {"type": "object"}, "banner": {"type": "string"}, "debugger": {"type": "boolean"}, "help_links": {"type": "array", "items": {"type": "object", "properties": { "text": {"type": "string"}, "url": {"type": "string"} }}} }, "required": ["status", "protocol_version", "implementation", "language_info", "banner"]} schema_fragments['shutdown_request'] = {"properties": { "restart": {"type": "boolean"}, }} schema_fragments['shutdown_reply'] = {"properties": { # statuses 'error' and 'abort' change the structure, so check separately "status": {"const": "ok"}, "restart": {"type": "boolean"}, }} schema_fragments["interrupt_request"] = {"properties": {}} schema_fragments["interrupt_reply"] = {"properties": { # statuses 'error' and 'abort' change the structure, so check separately "status": {"const": "ok"}, }} # IOPub messages ---------------------------------------------- mime_data = { "type":"object", "patternProperties": {r'^[\w\-\+\.]+/[\w\-\+\.]+$': {}}, "additionalProperties": False, } schema_fragments['stream'] = {"properties": { "name": {"enum": ["stdout", "stderr"]}, "text": {"type": "string"}, }} schema_fragments['display_data'] = {"properties": { "data": mime_data, "metadata": {"type": "object"}, "transient": {"type": "object"}, }, "required": ["data", "metadata"]} schema_fragments['update_display_data'] = {"properties": { "data": mime_data, "metadata": {"type": "object"}, "transient": {"type": "object"}, }} schema_fragments['execute_result'] = {"properties": { "execution_count": {"type": "number"}, "data": mime_data, "metadata": {"type": "object"}, "transient": {"type": "object"}, }, "required": ["execution_count", "data", "metadata"]} schema_fragments['clear_output'] = {"properties": { "wait": {"type": "boolean"}, }} schema_fragments['execute_input'] = {"properties": { "code": {"type": "string"}, "execution_count": {"type": "number"}, }} schema_fragments['error'] = {"properties": { "ename": {"type": "string"}, "evalue": {"type": "string"}, "traceback": {"type": "array", "items": {"type": "string"}}, }} schema_fragments['status'] = {"properties": { "execution_state": {"enum": ["busy", "idle", "starting"]}, }} # Stdin messages --------------------------------------------- schema_fragments["input_request"] = {"properties": { "prompt": {"type": "string"}, "password": {"type": "number"}, }} schema_fragments["input_reply"] = {"properties": { "value": {"type": "string"}, }}
reply_msgs_using_status = { 'execute_reply', 'inspect_reply', 'complete_reply', 'history_reply',
__main__.py
from os import path from unittest import TestSuite, TestLoader, TextTestRunner import sys if __name__ == "__main__": # Because the project is structured differently than # any tooling expects, we need to modify the python # path during runtime (or before) to get it to # properly import plugins and other code correctly. project_root_directory = path.dirname(path.dirname(__file__)) sys.path.append(path.join(project_root_directory, "plugins")) sys.path.append(path.join(project_root_directory))
if not run_result.wasSuccessful(): sys.exit(1)
discovered_tests = TestLoader().discover(path.dirname(__file__)) run_result = TextTestRunner().run(discovered_tests)
interface_rpc.py
#!/usr/bin/env python3 # Copyright (c) 2020-2021 The Eleccoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests some generic aspects of the RPC interface.""" import os from test_framework.authproxy import JSONRPCException from test_framework.test_framework import EleccoinTestFramework from test_framework.util import assert_equal, assert_greater_than_or_equal def expect_http_status(expected_http_status, expected_rpc_code, fcn, *args): try: fcn(*args) raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code) except JSONRPCException as exc: assert_equal(exc.error["code"], expected_rpc_code) assert_equal(exc.http_status, expected_http_status) class RPCInterfaceTest(EleccoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.supports_cli = False def test_getrpcinfo(self): self.log.info("Testing getrpcinfo...") info = self.nodes[0].getrpcinfo() assert_equal(len(info['active_commands']), 1) command = info['active_commands'][0] assert_equal(command['method'], 'getrpcinfo') assert_greater_than_or_equal(command['duration'], 0) assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log')) def test_batch_request(self): self.log.info("Testing basic JSON-RPC batch request...") results = self.nodes[0].batch([ # A basic request that will work fine. {"method": "getblockcount", "id": 1}, # Request that will fail. The whole batch request should still # work fine. {"method": "invalidmethod", "id": 2}, # Another call that should succeed. {"method": "getblockhash", "id": 3, "params": [0]}, ]) result_by_id = {} for res in results: result_by_id[res["id"]] = res assert_equal(result_by_id[1]['error'], None) assert_equal(result_by_id[1]['result'], 0) assert_equal(result_by_id[2]['error']['code'], -32601) assert_equal(result_by_id[2]['result'], None) assert_equal(result_by_id[3]['error'], None) assert result_by_id[3]['result'] is not None def test_http_status_codes(self):
def run_test(self): self.test_getrpcinfo() self.test_batch_request() self.test_http_status_codes() if __name__ == '__main__': RPCInterfaceTest().main()
self.log.info("Testing HTTP status codes for JSON-RPC requests...") expect_http_status(404, -32601, self.nodes[0].invalidmethod) expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
codec.go
package types import ( "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/ibc/04-channel/exported" ) // RegisterInterfaces register the ibc channel submodule interfaces to protobuf // Any. func RegisterInterfaces(registry codectypes.InterfaceRegistry)
// SubModuleCdc references the global x/ibc/04-channel module codec. Note, the codec should // ONLY be used in certain instances of tests and for JSON encoding. // // The actual codec used for serialization should be provided to x/ibc/04-channel and // defined at the application level. var SubModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
{ registry.RegisterInterface( "cosmos_sdk.ibc.v1.channel.ChannelI", (*exported.ChannelI)(nil), ) registry.RegisterInterface( "cosmos_sdk.ibc.v1.channel.CounterpartyI", (*exported.CounterpartyI)(nil), ) registry.RegisterInterface( "cosmos_sdk.ibc.v1.channel.PacketI", (*exported.PacketI)(nil), ) registry.RegisterImplementations( (*exported.ChannelI)(nil), &Channel{}, ) registry.RegisterImplementations( (*exported.CounterpartyI)(nil), &Counterparty{}, ) registry.RegisterImplementations( (*exported.PacketI)(nil), &Packet{}, ) registry.RegisterImplementations( (*sdk.Msg)(nil), &MsgChannelOpenInit{}, &MsgChannelOpenTry{}, &MsgChannelOpenAck{}, &MsgChannelOpenConfirm{}, &MsgChannelCloseInit{}, &MsgChannelCloseConfirm{}, &MsgRecvPacket{}, &MsgAcknowledgement{}, &MsgTimeout{}, &MsgTimeoutOnClose{}, ) }
relation_item_request_builder.go
package item import ( ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go" i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph/odataerrors" id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph/termstore" i1bfc14f357f1dc26c80d21155cf534adb82156357cd22253e756653383fabe88 "github.com/microsoftgraph/msgraph-sdk-go/sites/item/termstores/item/groups/item/sets/item/terms/item/relations/item/fromterm" ie70b65fd96449c0bf8e798596cff3f95c2b281ae8695948474bbcc0e3a1b7835 "github.com/microsoftgraph/msgraph-sdk-go/sites/item/termstores/item/groups/item/sets/item/terms/item/relations/item/set" iebe58f45787a24eacc56a30918f7ad7de928f47292674a7472fbd8f0f5234cf6 "github.com/microsoftgraph/msgraph-sdk-go/sites/item/termstores/item/groups/item/sets/item/terms/item/relations/item/toterm" ) // RelationItemRequestBuilder provides operations to manage the relations property of the microsoft.graph.termStore.term entity. type RelationItemRequestBuilder struct { // Path parameters for the request pathParameters map[string]string; // The request adapter to use to execute the requests. requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter; // Url template to use to build the URL for the current request builder urlTemplate string; } // RelationItemRequestBuilderDeleteOptions options for Delete type RelationItemRequestBuilderDeleteOptions struct { // Request headers H map[string]string; // Request options O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption; // Response handler to use in place of the default response handling provided by the core service ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler; } // RelationItemRequestBuilderGetOptions options for Get type RelationItemRequestBuilderGetOptions struct { // Request headers H map[string]string; // Request options O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption; // Request query parameters Q *RelationItemRequestBuilderGetQueryParameters; // Response handler to use in place of the default response handling provided by the core service ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler; } // RelationItemRequestBuilderGetQueryParameters to indicate which terms are related to the current term as either pinned or reused. type RelationItemRequestBuilderGetQueryParameters struct { // Expand related entities Expand []string; // Select properties to be returned Select []string; } // RelationItemRequestBuilderPatchOptions options for Patch type RelationItemRequestBuilderPatchOptions struct { // Body id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.Relationable; // Request headers H map[string]string; // Request options O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption; // Response handler to use in place of the default response handling provided by the core service ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler; } // NewRelationItemRequestBuilderInternal instantiates a new RelationItemRequestBuilder and sets the default values. func NewRelationItemRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*RelationItemRequestBuilder) { m := &RelationItemRequestBuilder{ } m.urlTemplate = "{+baseurl}/sites/{site_id}/termStores/{store_id}/groups/{group_id}/sets/{set_id}/terms/{term_id}/relations/{relation_id}{?select,expand}"; urlTplParams := make(map[string]string) for idx, item := range pathParameters { urlTplParams[idx] = item } m.pathParameters = urlTplParams; m.requestAdapter = requestAdapter; return m } // NewRelationItemRequestBuilder instantiates a new RelationItemRequestBuilder and sets the default values. func
(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*RelationItemRequestBuilder) { urlParams := make(map[string]string) urlParams["request-raw-url"] = rawUrl return NewRelationItemRequestBuilderInternal(urlParams, requestAdapter) } // CreateDeleteRequestInformation delete navigation property relations for sites func (m *RelationItemRequestBuilder) CreateDeleteRequestInformation(options *RelationItemRequestBuilderDeleteOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) { requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.DELETE if options != nil && options.H != nil { requestInfo.Headers = options.H } if options != nil && len(options.O) != 0 { err := requestInfo.AddRequestOptions(options.O...) if err != nil { return nil, err } } return requestInfo, nil } // CreateGetRequestInformation to indicate which terms are related to the current term as either pinned or reused. func (m *RelationItemRequestBuilder) CreateGetRequestInformation(options *RelationItemRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) { requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET if options != nil && options.Q != nil { requestInfo.AddQueryParameters(*(options.Q)) } if options != nil && options.H != nil { requestInfo.Headers = options.H } if options != nil && len(options.O) != 0 { err := requestInfo.AddRequestOptions(options.O...) if err != nil { return nil, err } } return requestInfo, nil } // CreatePatchRequestInformation update the navigation property relations in sites func (m *RelationItemRequestBuilder) CreatePatchRequestInformation(options *RelationItemRequestBuilderPatchOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) { requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.PATCH requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", options.Body) if options != nil && options.H != nil { requestInfo.Headers = options.H } if options != nil && len(options.O) != 0 { err := requestInfo.AddRequestOptions(options.O...) if err != nil { return nil, err } } return requestInfo, nil } // Delete delete navigation property relations for sites func (m *RelationItemRequestBuilder) Delete(options *RelationItemRequestBuilderDeleteOptions)(error) { requestInfo, err := m.CreateDeleteRequestInformation(options); if err != nil { return err } errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings { "4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, "5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, } err = m.requestAdapter.SendNoContentAsync(requestInfo, nil, errorMapping) if err != nil { return err } return nil } func (m *RelationItemRequestBuilder) FromTerm()(*i1bfc14f357f1dc26c80d21155cf534adb82156357cd22253e756653383fabe88.FromTermRequestBuilder) { return i1bfc14f357f1dc26c80d21155cf534adb82156357cd22253e756653383fabe88.NewFromTermRequestBuilderInternal(m.pathParameters, m.requestAdapter); } // Get to indicate which terms are related to the current term as either pinned or reused. func (m *RelationItemRequestBuilder) Get(options *RelationItemRequestBuilderGetOptions)(id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.Relationable, error) { requestInfo, err := m.CreateGetRequestInformation(options); if err != nil { return nil, err } errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings { "4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, "5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, } res, err := m.requestAdapter.SendAsync(requestInfo, id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.CreateRelationFromDiscriminatorValue, nil, errorMapping) if err != nil { return nil, err } return res.(id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.Relationable), nil } // Patch update the navigation property relations in sites func (m *RelationItemRequestBuilder) Patch(options *RelationItemRequestBuilderPatchOptions)(error) { requestInfo, err := m.CreatePatchRequestInformation(options); if err != nil { return err } errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings { "4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, "5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, } err = m.requestAdapter.SendNoContentAsync(requestInfo, nil, errorMapping) if err != nil { return err } return nil } func (m *RelationItemRequestBuilder) Set()(*ie70b65fd96449c0bf8e798596cff3f95c2b281ae8695948474bbcc0e3a1b7835.SetRequestBuilder) { return ie70b65fd96449c0bf8e798596cff3f95c2b281ae8695948474bbcc0e3a1b7835.NewSetRequestBuilderInternal(m.pathParameters, m.requestAdapter); } func (m *RelationItemRequestBuilder) ToTerm()(*iebe58f45787a24eacc56a30918f7ad7de928f47292674a7472fbd8f0f5234cf6.ToTermRequestBuilder) { return iebe58f45787a24eacc56a30918f7ad7de928f47292674a7472fbd8f0f5234cf6.NewToTermRequestBuilderInternal(m.pathParameters, m.requestAdapter); }
NewRelationItemRequestBuilder
config.rs
use crate::{ resources::{ TextureHandle, MaterialHandle, TextureType }, graphics::{ RendererTextureHandle, RendererMaterialHandle }, ecs::{ InputComponent, AudioManagerComponent, DeferredUpdateComponent, TimeComponent } }; use pill_core::PillSlotMapKeyData; use std::{num::NonZeroU32, any::TypeId}; use lazy_static::lazy_static; // --- General --- pub const PANIC_ON_GAME_ERRORS: bool = true; // --- ECS --- pub const MAX_ENTITIES: usize = 1000; pub const MAX_CONCURRENT_2D_SOUNDS: usize = 10; pub const MAX_CONCURRENT_3D_SOUNDS: usize = 10; pub const MAX_CAMERAS: usize = 10; // --- Resources --- pub const RESOURCE_VERSION_LIMIT: usize = 255; pub const MAX_PIPELINES: usize = 10; pub const MAX_TEXTURES: usize = 10; pub const MAX_MATERIALS: usize = 10; pub const MAX_MESHES: usize = 10; pub const MAX_SOUNDS: usize = 10; // Convention: All resource names starting with "PillDefault" are restricted, cannot be added and removed from game pub const DEFAULT_RESOURCE_PREFIX: &str = "PillDefault"; pub const DEFAULT_COLOR_TEXTURE_NAME: &str = "PillDefaultColor"; pub const DEFAULT_NORMAL_TEXTURE_NAME: &str = "PillDefaultNormal"; pub const DEFAULT_MATERIAL_NAME: &str = "PillDefaultMaterial"; // Master material pub const MASTER_SHADER_COLOR_TEXTURE_SLOT: &str = "Color"; pub const MASTER_SHADER_NORMAL_TEXTURE_SLOT: &str = "Normal"; pub const MASTER_SHADER_TINT_PARAMETER_SLOT: &str = "Tint"; pub const MASTER_SHADER_SPECULARITY_PARAMETER_SLOT: &str = "Specularity"; // Render queue key pub type RenderQueueKeyType = u64; // Defines size of renderer queue key (Should be u8, u16, u32, or u64) pub const RENDER_QUEUE_KEY_ITEMS_LENGTH: [RenderQueueKeyType; 5] = [5, 8, 8, 8, 8]; // Defines size of next render queue key parts (bits from left to right) // Indices of render queue key parts (maps RENDER_QUEUE_KEY_ITEMS_LENGTH) pub const RENDER_QUEUE_KEY_ORDER_IDX: u8 = 0; pub const RENDER_QUEUE_KEY_MATERIAL_INDEX_IDX: u8 = 1; pub const RENDER_QUEUE_KEY_MATERIAL_VERSION_IDX: u8 = 2; pub const RENDER_QUEUE_KEY_MESH_INDEX_IDX: u8 = 3; pub const RENDER_QUEUE_KEY_MESH_VERSION_IDX: u8 = 4; // Default resource handle - Color texture pub const DEFAULT_COLOR_TEXTURE_HANDLE: TextureHandle = TextureHandle { 0: PillSlotMapKeyData { index: 1, version: unsafe { std::num::NonZeroU32::new_unchecked(1) } } }; pub const DEFAULT_RENDERER_COLOR_TEXTURE_HANDLE: RendererTextureHandle = RendererTextureHandle { 0: PillSlotMapKeyData { index: 1, version: unsafe { std::num::NonZeroU32::new_unchecked(1) } } }; // Default resource handle - Normal texture pub const DEFAULT_NORMAL_TEXTURE_HANDLE: TextureHandle = TextureHandle { 0: PillSlotMapKeyData { index: 2, version: unsafe { std::num::NonZeroU32::new_unchecked(1) } } }; pub const DEFAULT_RENDERER_NORMAL_TEXTURE_HANDLE: RendererTextureHandle = RendererTextureHandle { 0: PillSlotMapKeyData { index: 2, version: unsafe { std::num::NonZeroU32::new_unchecked(1) } } }; pub fn get_default_texture_handles(texture_type: TextureType) -> (TextureHandle, RendererTextureHandle) { match texture_type { TextureType::Color => (DEFAULT_COLOR_TEXTURE_HANDLE, DEFAULT_RENDERER_COLOR_TEXTURE_HANDLE), TextureType::Normal => (DEFAULT_NORMAL_TEXTURE_HANDLE, DEFAULT_RENDERER_NORMAL_TEXTURE_HANDLE), } } // Default resource handle - Material pub const DEFAULT_MATERIAL_HANDLE: MaterialHandle = MaterialHandle { 0: PillSlotMapKeyData { index: 1, version: unsafe { std::num::NonZeroU32::new_unchecked(1) } } }; pub const DEFAULT_RENDERER_MATERIAL_HANDLE: RendererMaterialHandle = RendererMaterialHandle { 0: PillSlotMapKeyData { index: 1, version: unsafe { std::num::NonZeroU32::new_unchecked(1) } } }; pub fn get_default_material_handles() -> (MaterialHandle, RendererMaterialHandle)
lazy_static! { pub static ref ENGINE_GLOBAL_COMPONENTS: Vec<TypeId> = vec!( TypeId::of::<InputComponent>(), TypeId::of::<TimeComponent>(), TypeId::of::<AudioManagerComponent>(), TypeId::of::<DeferredUpdateComponent>(), ); }
{ (DEFAULT_MATERIAL_HANDLE, DEFAULT_RENDERER_MATERIAL_HANDLE) }
binder.go
package echo import ( "database/sql" "errors" "fmt" "reflect" "strconv" "strings" "time" "unicode" "github.com/admpub/copier" "github.com/admpub/log" "github.com/webx-top/echo/engine" "github.com/webx-top/echo/logger" "github.com/webx-top/echo/param" "github.com/webx-top/tagfast" ) type ( // Binder is the interface that wraps the Bind method. Binder interface { Bind(interface{}, Context, ...FormDataFilter) error BindAndValidate(interface{}, Context, ...FormDataFilter) error MustBind(interface{}, Context, ...FormDataFilter) error MustBindAndValidate(interface{}, Context, ...FormDataFilter) error } binder struct { decoders map[string]func(interface{}, Context, ...FormDataFilter) error } ) func NewBinder(e *Echo) Binder { return &binder{ decoders: DefaultBinderDecoders, } } func (b *binder) MustBind(i interface{}, c Context, filter ...FormDataFilter) error { contentType := c.Request().Header().Get(HeaderContentType) contentType = strings.ToLower(strings.TrimSpace(strings.SplitN(contentType, `;`, 2)[0])) if decoder, ok := b.decoders[contentType]; ok { return decoder(i, c, filter...) } if decoder, ok := b.decoders[`*`]; ok { return decoder(i, c, filter...) } return ErrUnsupportedMediaType } func (b *binder) MustBindAndValidate(i interface{}, c Context, filter ...FormDataFilter) (err error) { err = b.MustBind(i, c, filter...) if err != nil { return } if before, ok := i.(BeforeValidate); ok { if err = before.BeforeValidate(c); err != nil { return } } if err = c.Validate(i).Error(); err != nil { return err } if after, ok := i.(AfterValidate); ok { err = after.AfterValidate(c) } return } func (b *binder) Bind(i interface{}, c Context, filter ...FormDataFilter) (err error) { err = b.MustBind(i, c, filter...) if err == ErrUnsupportedMediaType { err = nil } return } func (b *binder) BindAndValidate(i interface{}, c Context, filter ...FormDataFilter) (err error) { err = b.MustBind(i, c, filter...) if err != nil { if err == ErrUnsupportedMediaType { return nil } return } if before, ok := i.(BeforeValidate); ok { if err = before.BeforeValidate(c); err != nil { return } } if err = c.Validate(i).Error(); err != nil { return err } if after, ok := i.(AfterValidate); ok { err = after.AfterValidate(c) } return } func (b *binder) SetDecoders(decoders map[string]func(interface{}, Context, ...FormDataFilter) error) { b.decoders = decoders } func (b *binder) AddDecoder(mime string, decoder func(interface{}, Context, ...FormDataFilter) error) { b.decoders[mime] = decoder } // FormNames user[name][test] func FormNames(s string) []string { var res []string hasLeft := false hasRight := true var val []rune for _, r := range s { if r == '[' { if hasRight { res = append(res, string(val)) val = []rune{} } hasLeft = true hasRight = false continue } if r == ']' { if hasLeft { res = append(res, string(val)) val = []rune{} hasLeft = false } continue } val = append(val, r) } if len(val) > 0 { res = append(res, string(val)) } return res } // NamedStructMap 自动将map值映射到结构体 func NamedStructMap(e *Echo, m interface{}, data map[string][]string, topName string, filters ...FormDataFilter) error { vc := reflect.ValueOf(m) tc := reflect.TypeOf(m) switch tc.Kind() { case reflect.Struct: case reflect.Ptr: vc = vc.Elem() tc = tc.Elem() default: return errors.New(`binder: unsupported type ` + tc.Kind().String()) } keyNormalizer := strings.Title if bkn, ok := m.(BinderKeyNormalizer); ok { keyNormalizer = bkn.BinderKeyNormalizer } for key, values := range data { if len(topName) > 0 { if !strings.HasPrefix(key, topName) { continue } key = key[len(topName)+1:] } names := strings.Split(key, `.`) var propPath, checkPath string if len(names) == 1 && strings.HasSuffix(key, `]`) { key = strings.TrimSuffix(key, `[]`) names = FormNames(key) } err := parseFormItem(keyNormalizer, e, m, tc, vc, names, propPath, checkPath, key, values, filters...) if err == nil { continue } if err == ErrBreak { err = nil break } return err } return nil } type BinderKeyNormalizer interface { BinderKeyNormalizer(string) string } func parseFormItem(keyNormalizer func(string) string, e *Echo, m interface{}, typev reflect.Type, value reflect.Value, names []string, propPath string, checkPath string, key string, values []string, filters ...FormDataFilter) error { length := len(names) vc := value tc := typev isMap := value.Kind() == reflect.Map for i, name := range names { if !isMap { name = keyNormalizer(name) } if i > 0 { propPath += `.` checkPath += `.` } propPath += name // check vk := checkPath + name // example: Name or *.Name or *.*.Password for _, filter := range filters { vk, values = filter(vk, values) if len(vk) == 0 || len(values) == 0 { e.Logger().Debugf(`binder: skip %v%v (%v) => %v`, checkPath, name, propPath, values) return nil } } checkPath += `*` //最后一个元素 if i == length-1 { err := setStructField(e.Logger(), tc, vc, key, name, value, typev, values) if err == nil { continue } if err == ErrBreak { return nil } return err } //不是最后一个元素 switch value.Kind() { case reflect.Slice: index, err := strconv.Atoi(name) if err != nil { e.Logger().Warnf(`binder: can not convert index number %T#%v -> %v`, m, propPath, err.Error()) return nil } if e.FormSliceMaxIndex > 0 && index > e.FormSliceMaxIndex { return fmt.Errorf(`%w, greater than %d`, ErrSliceIndexTooLarge, e.FormSliceMaxIndex) } if value.IsNil() { value.Set(reflect.MakeSlice(value.Type(), 1, 1)) } itemT := value.Type() if itemT.Kind() == reflect.Ptr { itemT = itemT.Elem() value = value.Elem() } itemT = itemT.Elem() if index >= value.Len() { for i := value.Len(); i <= index; i++ { tempv := reflect.New(itemT) value.Set(reflect.Append(value, tempv.Elem())) } } newV := value.Index(index) newT := newV.Type() switch newT.Kind() { case reflect.Struct: case reflect.Ptr: newT = newT.Elem() if newV.IsNil() { newV.Set(reflect.New(newT)) } newV = newV.Elem() default: return errors.New(`binder: unsupported type ` + tc.Kind().String()) } return parseFormItem(keyNormalizer, e, m, newT, newV, names[i+1:], propPath+`.`, checkPath+`.`, key, values, filters...) case reflect.Map: if value.IsNil() { value.Set(reflect.MakeMap(value.Type())) } itemT := value.Type() if itemT.Kind() == reflect.Ptr { itemT = itemT.Elem() value = value.Elem() } itemT = itemT.Elem() index := reflect.ValueOf(name) newV := value.MapIndex(index) if !newV.IsValid() { newV = reflect.New(itemT).Elem() value.SetMapIndex(index, newV) } newT := newV.Type() switch newT.Kind() { case reflect.Struct: case reflect.Ptr: newT = newT.Elem() if newV.IsNil() { newV = reflect.New(newT) value.SetMapIndex(index, newV) } newV = newV.Elem() default: return errors.New(`binder: unsupported type ` + tc.Kind().String()) } return parseFormItem(keyNormalizer, e, m, newT, newV, names[i+1:], propPath+`.`, checkPath+`.`, key, values, filters...) case reflect.Struct: f, _ := typev.FieldByName(name) if tagfast.Value(tc, f, `form_options`) == `-` { return nil } value = value.FieldByName(name) if !value.IsValid() { e.Logger().Debugf(`binder: %T#%v value is not valid %v`, m, propPath, value) return nil } if !value.CanSet() { e.Logger().Warnf(`binder: can not set %T#%v -> %v`, m, propPath, value.Interface()) return nil } if value.Kind() == reflect.Ptr { if value.IsNil() { value.Set(reflect.New(value.Type().Elem())) } value = value.Elem() } switch value.Kind() { case reflect.Struct: case reflect.Slice, reflect.Map: return parseFormItem(keyNormalizer, e, m, value.Type(), value, names[i+1:], propPath+`.`, checkPath+`.`, key, values, filters...) default: e.Logger().Warnf(`binder: arg error, value %T#%v kind is %v`, m, propPath, value.Kind()) return nil } typev = value.Type() f, _ = typev.FieldByName(name) if tagfast.Value(tc, f, `form_options`) == `-` { return nil } default: e.Logger().Warnf(`binder: arg error, value kind is %v`, value.Kind()) return nil } } return nil } var ( ErrBreak = errors.New("[BREAK]") ErrContinue = errors.New("[CONTINUE]") ErrExit = errors.New("[EXIT]") ErrReturn = errors.New("[RETURN]") ErrSliceIndexTooLarge = errors.New("the slice index value of the form field is too large") ) func SafeGetFieldByName(parentT reflect.Type, parentV reflect.Value, name string, value reflect.Value) (v reflect.Value) { defer func() { if r := recover(); r != nil { switch fmt.Sprint(r) { case `reflect: indirection through nil pointer to embedded struct`: copier.InitNilFields(parentT, parentV, ``, copier.AllNilFields) v = value.FieldByName(name) default: panic(fmt.Sprintf(`%v: %s (%v)`, r, name, value.Kind())) } } }() v = value.FieldByName(name) return } func setStructField(logger logger.Logger, parentT reflect.Type, parentV reflect.Value, k string, name string, value reflect.Value, typev reflect.Type, values []string) error { switch value.Kind() { case reflect.Map: if value.IsNil() { value.Set(reflect.MakeMap(value.Type())) } value = reflect.Indirect(value) index := reflect.ValueOf(name) if oldVal := value.MapIndex(index); oldVal.IsValid() { if oldVal.Type().Kind() == reflect.Interface { oldVal = reflect.Indirect(reflect.ValueOf(oldVal.Interface())) } isPtr := oldVal.CanAddr() if !isPtr { oldVal = reflect.New(oldVal.Type()) } err := setField(logger, parentT, oldVal.Elem(), reflect.StructField{Name: name}, name, values) if err == nil { if !isPtr { oldVal = reflect.Indirect(oldVal) } value.SetMapIndex(index, oldVal) } return err } if len(values) > 1 { value.SetMapIndex(index, reflect.ValueOf(values)) } else { value.SetMapIndex(index, reflect.ValueOf(values[0])) } return nil } tv := SafeGetFieldByName(parentT, parentV, name, value) if !tv.IsValid() { return ErrBreak } if !tv.CanSet() { logger.Warnf(`binder: can not set %v to %v`, k, tv) return ErrBreak } f, _ := typev.FieldByName(name) if tagfast.Value(parentT, f, `form_options`) == `-` { return ErrBreak } if tv.Kind() == reflect.Ptr { tv.Set(reflect.New(tv.Type().Elem())) tv = tv.Elem() } return setField(logger, parentT, tv, f, name, values) } func setField(logger logger.Logger, parentT reflect.Type, tv reflect.Value, f reflect.StructField, name string, values []string) error { v := values[0] switch kind := tv.Kind(); kind { case reflect.String: switch tagfast.Value(parentT, f, `form_filter`) { case `html`: v = DefaultHTMLFilter(v) default: delimter := tagfast.Value(parentT, f, `form_delimiter`) if len(delimter) > 0 { v = strings.Join(values, delimter) } } tv.Set(reflect.ValueOf(v)) case reflect.Bool: ok, _ := strconv.ParseBool(v) tv.Set(reflect.ValueOf(ok)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: var l interface{} dateformat := tagfast.Value(parentT, f, `form_format`) if len(dateformat) > 0 { t, err := time.ParseInLocation(dateformat, v, time.Local) if err != nil { logger.Warnf(`binder: arg %v as int: %v`, v, err) l = int(0) } else { l = int(t.Unix()) } } else { x, err := strconv.Atoi(v) if err != nil { logger.Warnf(`binder: arg %v as int: %v`, v, err) } l = x } tv.Set(reflect.ValueOf(l)) case reflect.Int64: var l interface{} switch tv.Interface().(type) { case time.Duration: l, _ = time.ParseDuration(v) default: dateformat := tagfast.Value(parentT, f, `form_format`) if len(dateformat) > 0 { t, err := time.ParseInLocation(dateformat, v, time.Local) if err != nil { logger.Warnf(`binder: arg %v as int64: %v`, v, err) l = int64(0) } else { l = t.Unix() } } else { x, err := strconv.ParseInt(v, 10, 64) if err != nil { logger.Warnf(`binder: arg %v as int64: %v`, v, err) } l = x } } tv.Set(reflect.ValueOf(l)) case reflect.Float32, reflect.Float64: x, err := strconv.ParseFloat(v, 64) if err != nil { logger.Warnf(`binder: arg %v as float64: %v`, v, err) } tv.Set(reflect.ValueOf(x)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: dateformat := tagfast.Value(parentT, f, `form_format`) var x uint64 var bitSize int switch kind { case reflect.Uint8: bitSize = 8 case reflect.Uint16: bitSize = 16 case reflect.Uint32: bitSize = 32 default: bitSize = 64 } if len(dateformat) > 0 { t, err := time.ParseInLocation(dateformat, v, time.Local) if err != nil { logger.Warnf(`binder: arg %v as uint: %v`, v, err) x = uint64(0) } else { x = uint64(t.Unix()) } } else { var err error x, err = strconv.ParseUint(v, 10, bitSize) if err != nil { logger.Warnf(`binder: arg %v as uint: %v`, v, err) } } var l interface{} switch kind { case reflect.Uint: l = uint(x) case reflect.Uint8: l = uint8(x) case reflect.Uint16: l = uint16(x) case reflect.Uint32: l = uint32(x) default: l = x } tv.Set(reflect.ValueOf(l)) case reflect.Struct: switch rawType := tv.Interface().(type) { case FromConversion: if err := rawType.FromString(v); err != nil { logger.Warnf(`binder: struct %v invoke FromString faild`, rawType) } case time.Time: x, err := time.ParseInLocation(`2006-01-02 15:04:05.000 -0700`, v, time.Local) if err != nil { x, err = time.ParseInLocation(`2006-01-02 15:04:05`, v, time.Local) if err != nil { x, err = time.ParseInLocation(`2006-01-02`, v, time.Local) if err != nil { logger.Warnf(`binder: unsupported time format %v, %v`, v, err) } } } tv.Set(reflect.ValueOf(x)) default: if scanner, ok := tv.Addr().Interface().(sql.Scanner); ok { if err := scanner.Scan(values[0]); err != nil { logger.Warnf(`binder: struct %v invoke Scan faild`, rawType) } } } case reflect.Ptr: setField(logger, parentT, tv.Elem(), f, name, values) case reflect.Slice, reflect.Array: setSlice(logger, name, tv, values) default: return ErrBreak } return nil } func setSlice(logger logger.Logger, fieldName string, tv reflect.Value, t []string) { tt := tv.Type().Elem() tk := tt.Kind() if tv.IsNil() { tv.Set(reflect.MakeSlice(tv.Type(), len(t), len(t))) } for i, s := range t { var err error switch tk { case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int8, reflect.Int64: var v int64 v, err = strconv.ParseInt(s, 10, tt.Bits()) if err == nil { tv.Index(i).SetInt(v) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: var v uint64 v, err = strconv.ParseUint(s, 10, tt.Bits()) if err == nil { tv.Index(i).SetUint(v) } case reflect.Float32, reflect.Float64: var v float64 v, err = strconv.ParseFloat(s, tt.Bits()) if err == nil { tv.Index(i).SetFloat(v) } case reflect.Bool: var v bool v, err = strconv.ParseBool(s) if err == nil { tv.Index(i).SetBool(v) } case reflect.String: tv.Index(i).SetString(s) case reflect.Interface: tv.Index(i).Set(reflect.ValueOf(s)) case reflect.Complex64, reflect.Complex128: // TODO: err = fmt.Errorf(`binder: unsupported slice element type %v`, tk.String()) default: err = fmt.Errorf(`binder: unsupported slice element type %v`, tk.String()) } if err != nil { logger.Warnf(`binder: slice error: %v, %v`, fieldName, err) } } } // FromConversion a struct implements this interface can be convert from request param to a struct type FromConversion interface { FromString(content string) error } // ToConversion a struct implements this interface can be convert from struct to template variable // Not Implemented type ToConversion interface { ToString() string } type ( //FieldNameFormatter 结构体字段值映射到表单时,结构体字段名称格式化处理 FieldNameFormatter func(topName, fieldName string) string //FormDataFilter 将map映射到结构体时,对名称和值的过滤处理,如果返回的名称为空,则跳过本字段 FormDataFilter func(key string, values []string) (string, []string) ) var ( //DefaultNopFilter 默认过滤器(map->struct) DefaultNopFilter FormDataFilter = func(k string, v []string) (string, []string) { return k, v } //DefaultFieldNameFormatter 默认格式化函数(struct->form) DefaultFieldNameFormatter FieldNameFormatter = func(topName, fieldName string) string { var fName string if len(topName) == 0 { fName = fieldName } else { fName = topName + "." + fieldName } return fName } //LowerCaseFirstLetter 小写首字母(struct->form) LowerCaseFirstLetter FieldNameFormatter = func(topName, fieldName string) string { var fName string s := []rune(fieldName) if len(s) > 0 { s[0] = unicode.ToLower(s[0]) fieldName = string(s) } if len(topName) == 0 { fName = fieldName } else { fName = topName + "." + fieldName } return fName } //DateToTimestamp 日期时间转时间戳 DateToTimestamp = func(layouts ...string) FormDataFilter { layout := `2006-01-02` if len(layouts) > 0 && len(layouts[0]) > 0 { layout = layouts[0] } return func(k string, v []string) (string, []string) { if len(v) > 0 && len(v[0]) > 0 { t, e := time.ParseInLocation(layout, v[0], time.Local) if e != nil { log.Error(e) return k, []string{`0`} } return k, []string{fmt.Sprint(t.Unix())} } return k, []string{`0`} } } //TimestampToDate 时间戳转日期时间 TimestampToDate = func(layouts ...string) FormDataFilter { layout := `2006-01-02 15:04:05` if len(layouts) > 0 && len(layouts[0]) > 0 { layout = layouts[0] } return func(k string, v []string) (string, []string) { if len(v) > 0 && len(v[0]) > 0 { tsi := strings.SplitN(v[0], `.`, 2) var sec, nsec int64 switch len(tsi) { case 2: nsec = param.AsInt64(tsi[1]) fallthrough case 1: sec = param.AsInt64(tsi[0]) } t := time.Unix(sec, nsec) if t.IsZero() { return k, []string{``} } return k, []string{t.Format(layout)} } return k, v } } //JoinValues 组合数组为字符串 JoinValues = func(seperators ...string) FormDataFilter { sep := `,` if len(seperators) > 0 { sep = seperators[0] } return func(k string, v []string) (string, []string) { return k, []string{strings.Join(v, sep)} } } //SplitValues 拆分字符串为数组 SplitValues = func(seperators ...string) FormDataFilter { sep := `,` if len(seperators) > 0 { sep = seperators[0] } return func(k string, v []string) (string, []string) { if len(v) > 0 && len(v[0]) > 0 { v = strings.Split(v[0], sep) } return k, v } } TimestampStringer = param.TimestampStringer DateTimeStringer = param.DateTimeStringer WhitespaceStringer = param.WhitespaceStringer Ignored = param.Ignored ) func TranslateStringer(t Translator, args ...interface{}) param.Stringer { return param.StringerFunc(func(v interface{}) string { return t.T(param.AsString(v), args...) }) } //FormatFieldValue 格式化字段值 func FormatFieldValue(formatters map[string]FormDataFilter, keyNormalizerArg ...func(string) string) FormDataFilter { newFormatters := map[string]FormDataFilter{} keyNormalizer := strings.Title if len(keyNormalizerArg) > 0 && keyNormalizerArg[0] != nil { keyNormalizer = keyNormalizerArg[0] } for k, v := range formatters { newFormatters[keyNormalizer(k)] = v } return func(k string, v []string) (string, []string) { tk := keyNormalizer(k) if formatter, ok := newFormatters[tk]; ok { return formatter(k, v) } return k, v } } //IncludeFieldName 包含字段 func IncludeFieldName(fieldNames ...string) FormDataFilter { for k, v := range fieldNames { fieldNames[k] = strings.Title(v) } return func(k string, v []string) (string, []string) { tk := strings.Title(k) for _, fv := range fieldNames { if fv == tk { return k, v } } ret
//ExcludeFieldName 排除字段 func ExcludeFieldName(fieldNames ...string) FormDataFilter { for k, v := range fieldNames { fieldNames[k] = strings.Title(v) } return func(k string, v []string) (string, []string) { tk := strings.Title(k) for _, fv := range fieldNames { if fv == tk { return ``, v } } return k, v } } func SetFormValue(f engine.URLValuer, fName string, index int, value interface{}) { if index == 0 { f.Set(fName, fmt.Sprint(value)) } else { f.Add(fName, fmt.Sprint(value)) } } //FlatStructToForm 映射struct到form func FlatStructToForm(ctx Context, m interface{}, fieldNameFormatter FieldNameFormatter, formatters ...param.StringerMap) { StructToForm(ctx, m, ``, fieldNameFormatter, formatters...) } //StructToForm 映射struct到form func StructToForm(ctx Context, m interface{}, topName string, fieldNameFormatter FieldNameFormatter, formatters ...param.StringerMap) { var formatter param.StringerMap if len(formatters) > 0 { formatter = formatters[0] } vc := reflect.ValueOf(m) tc := reflect.TypeOf(m) if tc.Kind() == reflect.Ptr { tc = tc.Elem() if vc.IsNil() { return } vc = vc.Elem() } switch tc.Kind() { case reflect.Struct: case reflect.Map: for _, srcKey := range vc.MapKeys() { srcVal := vc.MapIndex(srcKey) if !srcVal.CanInterface() || srcVal.Interface() == nil { continue } key := srcKey.String() if len(topName) > 0 { key = topName + `.` + key } switch srcVal.Kind() { case reflect.Ptr: StructToForm(ctx, srcVal.Interface(), key, fieldNameFormatter, formatters...) case reflect.Struct: StructToForm(ctx, srcVal.Interface(), key, fieldNameFormatter, formatters...) default: fieldToForm(ctx, tc, reflect.StructField{Name: srcKey.String()}, srcVal, topName, fieldNameFormatter, formatter) } } return default: //fieldToForm(ctx, tc, reflect.StructField{}, vc, topName, fieldNameFormatter, formatter) return } l := tc.NumField() if fieldNameFormatter == nil { fieldNameFormatter = DefaultFieldNameFormatter } for i := 0; i < l; i++ { fVal := vc.Field(i) fStruct := tc.Field(i) fieldToForm(ctx, tc, fStruct, fVal, topName, fieldNameFormatter, formatter) } } func fieldToForm(ctx Context, parentTyp reflect.Type, fStruct reflect.StructField, fVal reflect.Value, topName string, fieldNameFormatter FieldNameFormatter, formatter param.StringerMap) { f := ctx.Request().Form() fName := fieldNameFormatter(topName, fStruct.Name) if !fVal.CanInterface() || len(fName) == 0 { return } if formatter != nil { result, found, skip := formatter.String(fName, fVal.Interface()) if skip { return } if found { f.Set(fName, result) return } } switch fVal.Type().String() { case `time.Time`: if t, y := fVal.Interface().(time.Time); y { if t.IsZero() { f.Set(fName, ``) } else { dateformat := tagfast.Value(parentTyp, fStruct, `form_format`) if len(dateformat) > 0 { f.Set(fName, t.Format(dateformat)) } else { f.Set(fName, t.Format(`2006-01-02 15:04:05`)) } } } case `time.Duration`: if t, y := fVal.Interface().(time.Duration); y { f.Set(fName, t.String()) } case `struct`: StructToForm(ctx, fVal.Interface(), fName, fieldNameFormatter) default: switch fVal.Type().Kind() { case reflect.Slice: switch sl := fVal.Interface().(type) { case []uint: for k, v := range sl { SetFormValue(f, fName, k, v) } case []uint16: for k, v := range sl { SetFormValue(f, fName, k, v) } case []uint32: for k, v := range sl { SetFormValue(f, fName, k, v) } case []uint64: for k, v := range sl { SetFormValue(f, fName, k, v) } case []int: for k, v := range sl { SetFormValue(f, fName, k, v) } case []int16: for k, v := range sl { SetFormValue(f, fName, k, v) } case []int32: for k, v := range sl { SetFormValue(f, fName, k, v) } case []int64: for k, v := range sl { SetFormValue(f, fName, k, v) } case []float32: for k, v := range sl { SetFormValue(f, fName, k, v) } case []float64: for k, v := range sl { SetFormValue(f, fName, k, v) } case []string: for k, v := range sl { SetFormValue(f, fName, k, v) } case []interface{}: for k, v := range sl { SetFormValue(f, fName, k, v) } default: // ignore } case reflect.Map: StructToForm(ctx, fVal.Interface(), fName, fieldNameFormatter, formatter) case reflect.Ptr: StructToForm(ctx, fVal.Interface(), fName, fieldNameFormatter) default: switch v := fVal.Interface().(type) { case ToConversion: f.Set(fName, v.ToString()) default: f.Set(fName, fmt.Sprint(v)) } } } }
urn ``, v } }
encoder.go
// Copyright 2018 Hurricanezwf. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mqwrapper import ( "bytes" "compress/gzip" "encoding/binary" "errors" "fmt" "io/ioutil" ) type MsgEncoder interface { Encode(actionKey int32, msgBody []byte) ([]byte, error) Decode(b []byte) (actionKey int32, msgBody []byte, err error) } func DefaultEncoder() MsgEncoder
type msgEncoderV1 struct { // 消息魔数 magicN byte // 最大报文段长度 maxSegmentLen uint32 } func newMsgEncoderV1() MsgEncoder { return &msgEncoderV1{ magicN: 0x22, maxSegmentLen: 5242880, // 默认限制5MB } } // Encode 编码MQ消息 // 1 Byte : 魔数 // 3 Bytes: 编码选项 // 4 Bytes: ActionKey // 4 Bytes: MsgBodyLen // N Bytes: MsgBody // // 编码选项(从左至右分别为bit0~bit24): // bit0表示msgBody是否压缩,1表示压缩,0表示不压缩 // bit1~bit23暂时预留 // func (e *msgEncoderV1) Encode(actionKey int32, msgBody []byte) ([]byte, error) { // 超过50KB的消息体,将进行gzip压缩,压缩级别为5 var err error var isCompressed bool if len(msgBody) > 51200 { msgBody, err = Compress(msgBody) if err != nil { return nil, fmt.Errorf("Compress msg body failed, %v", err) } isCompressed = true } // 消息长度限制 segmentLen := 1 + 3 + 4 + 4 + len(msgBody) if uint32(segmentLen) > e.maxSegmentLen { return nil, errors.New("msg too long") } offset := 0 buf := make([]byte, segmentLen) // MagicNumber buf[offset] = e.magicN // Header Option offset++ if isCompressed { buf[offset] = 0x80 } // ActionKey offset += 3 binary.BigEndian.PutUint32(buf[offset:offset+4], uint32(actionKey)) // MsgBodyLen offset += 4 binary.BigEndian.PutUint32(buf[offset:offset+4], uint32(len(msgBody))) // MsgBody offset += 4 copy(buf[offset:offset+len(msgBody)], msgBody) return buf, nil } // Decode 解码MQ消息 func (e *msgEncoderV1) Decode(b []byte) (action int32, msgBody []byte, err error) { // 验证长度 if uint32(len(b)) > e.maxSegmentLen { err = errors.New("msg is too long") return } if len(b) < 12 { err = errors.New("msg too short") return } var offset uint32 = 0 var isCompressed bool // 验证魔数 if b[0] != e.magicN { err = errors.New("bad msg format, magicN didn't match") return } // 获取头部选项 if b[1]&0x80 > 0 { isCompressed = true } // 解析ActionKey offset = 4 action = int32(binary.BigEndian.Uint32(b[offset : offset+4])) // 解析MsgBody长度 offset += 4 msgBodyLen := binary.BigEndian.Uint32(b[offset : offset+4]) // 解析MsgBody offset += 4 bodyEnd := offset + msgBodyLen if bodyEnd > uint32(len(b)) { err = errors.New("msg too short") return } msgBody = b[offset:bodyEnd] // 解压缩 if isCompressed { msgBody, err = Decompress(msgBody) if err != nil { err = fmt.Errorf("Decompress msg body failed, %v", err) return } } return action, msgBody, nil } func Compress(data []byte) ([]byte, error) { b := bytes.NewBuffer(nil) w, err := gzip.NewWriterLevel(b, 5) if err != nil { return nil, err } if _, err = w.Write(data); err != nil { return nil, err } w.Close() // 这里的close不作为资源清理,而是将缓存中的数据刷入bytes.Buffer return b.Bytes(), nil } func Decompress(compressed []byte) ([]byte, error) { r, err := gzip.NewReader(bytes.NewBuffer(compressed)) if err != nil { return nil, err } defer r.Close() return ioutil.ReadAll(r) }
{ return newMsgEncoderV1() }
consul_exporter_test.go
package main import "testing" func TestNewExporter(t *testing.T)
{ cases := []struct { uri string ok bool }{ {uri: "", ok: false}, {uri: "localhost:8500", ok: true}, {uri: "https://localhost:8500", ok: true}, {uri: "http://some.where:8500", ok: true}, {uri: "fuuuu://localhost:8500", ok: false}, } for _, test := range cases { _, err := NewExporter(test.uri, "", ".*", true) if test.ok && err != nil { t.Errorf("expected no error w/ %s but got %s", test.uri, err) } if !test.ok && err == nil { t.Errorf("expected error w/ %s but got %s", test.uri, err) } } }
xl_preparation.py
import datetime from copy import deepcopy from openpyxl import load_workbook from openpyxl.utils import coordinate_to_tuple from xlrd import open_workbook, xldate_as_tuple from xlson.constants import xlson_logger from xlson.formatting import cell_meta_to_dict from xlson.handlers import XLSonHandler, XLSonSheetHandler from xlson.formatting import CELL_DEFAULT_META from xlson._lib.general_utils import digitalize_str from xlson._lib.coords_tools import coords_from_range def prepare_xl(xl_path, data_only=False, values_strip=None, digitalization=True, crop_empty=True, n_rows=None): try: return prepare_new_xl(new_xl_path=xl_path, data_only=data_only, values_strip=values_strip, digitalization=digitalization, crop_empty=crop_empty, n_rows=n_rows) # except InvalidFileException: except: xlson_logger.warning("%s cannot be prepared as new Excel format - trying old Excel format preparation" % xl_path) try: return prepare_old_xl(old_xl_path=xl_path, values_strip=values_strip, digitalization=digitalization, crop_empty=crop_empty, n_rows=n_rows) except: xlson_logger.warning("cannot read '%s'" % xl_path) return XLSonHandler( main_sheet=XLSonSheetHandler.load_from_dict({ "data_df": [["an error", "occurred", "while", "processing", xl_path]], "meta_df": [[None] * 5], "entities": XLSonSheetHandler.entites_0, "supp_sheets": XLSonSheetHandler.supp_sheets_0, "cell_default_meta": CELL_DEFAULT_META, }, main_sheet=True), supp_sheets = list(), source_path = xl_path, ) def prepare_new_xl(new_xl_path, data_only=False, values_strip=None, digitalization=True, crop_empty=True, n_rows=None): xlson_logger.info("%s conversion to xlson started" % new_xl_path) main_sheet = dict() supp_sheets_list = list() wb = load_workbook(new_xl_path, data_only=data_only) n = 0 for sheet_name in wb.sheetnames: merged_cells_dict = get_merged_cells(wb[sheet_name]) sheet_dict = { 'cell_default_meta': CELL_DEFAULT_META, 'sheet_name': sheet_name, 'data_df': iterate_sheet(wb[sheet_name], cell_func=get_cell_value, add_args_dict={'value_strip': values_strip, 'digitalization': digitalization}, n_rows=n_rows), 'entities': deepcopy(XLSonSheetHandler.entites_0), } if crop_empty: last_cell = get_last_cell(sheet_dict['data_df']) sheet_dict['data_df'] = [sheet_dict['data_df'][i][:last_cell[1]+1] for i in range(last_cell[0]+1)] sheet_dict['meta_df'] = iterate_sheet(wb[sheet_name], cell_func=cell_meta_to_dict, add_args_dict={'merged_cells_dict': merged_cells_dict}, last_cell=last_cell, n_rows=n_rows) else: sheet_dict['meta_df'] = iterate_sheet(wb[sheet_name], cell_func=cell_meta_to_dict, add_args_dict={'merged_cells_dict': merged_cells_dict}, n_rows=n_rows) if n == 0: main_sheet = sheet_dict else: supp_sheets_list.append(sheet_dict) n += 1 main_sheet['supp_sheets'] = [supp_sheet['sheet_name'] for supp_sheet in supp_sheets_list] xlson_logger.info("%s conversion to xlson finished" % new_xl_path) return XLSonHandler(main_sheet=XLSonSheetHandler.load_from_dict(main_sheet, main_sheet=True), supp_sheets=supp_sheets_list, source_path=new_xl_path) def get_last_cell(data_df): max_row = 0 max_col = 0 for i in range(len(data_df)): for j in range(len(data_df[i])): if data_df[i][j] is not None: if i > max_row: max_row = i if j > max_col: max_col = j return max_row, max_col def get_merged_cells(sheet): merged_cells_dict = { "merged_with": {}, "merged_to": {}, } mc_ranges = sheet.merged_cells.ranges for mc_range in mc_ranges: c_list = mc_range.coord.split(":") first_c = tuple(map(lambda c: c - 1, coordinate_to_tuple(c_list[0]))) last_c = tuple(map(lambda c: c - 1, coordinate_to_tuple(c_list[1]))) merged_coords_list = coords_from_range(first_c, last_c) merged_cells_dict["merged_with"][first_c] = merged_coords_list[1:] for merged_coord in merged_coords_list[1:]: merged_cells_dict["merged_to"][merged_coord] = first_c return merged_cells_dict def iterate_sheet(sheet, cell_func=None, add_args_dict=None, last_cell=None, n_rows=None): rows_list = list() i = 0 for row in sheet: if last_cell is not None and i > last_cell[0]: break if type(n_rows) is int and i >= n_rows: break curr_row_list = list() j = 0 for cell in row:
curr_row_list.append(cell_func(cell, **add_args_dict)) elif type(add_args_dict) is list or type(add_args_dict) is tuple: curr_row_list.append(cell_func(cell, *add_args_dict)) else: curr_row_list.append(cell_func(cell)) else: curr_row_list.append(cell) j += 1 rows_list.append(curr_row_list) i += 1 return rows_list def get_cell_value(cell, value_strip=None, digitalization=True, special_formating=None, **kwargs): if callable(special_formating): v = special_formating(cell, **kwargs) else: v = cell.value if type(v) is datetime.datetime: return v.strftime("%d.%m.%Y") if type(v) is str: if not v: return None if type(value_strip) is str or value_strip is None: if digitalization: return digitalize_str(v.strip(value_strip)) else: return v.strip(value_strip) return v def prepare_old_xl(old_xl_path, values_strip=None, digitalization=True, crop_empty=True, n_rows=None): # TODO: implement formatting info conversion to meta_df xlson_logger.info("%s conversion to xlson started" % old_xl_path) main_sheet = dict() supp_sheets_list = list() wb = open_workbook(old_xl_path, formatting_info=True) # data_only equiv has not been found n = 0 for sheet_name in wb.sheet_names(): # merged_cells_dict = get_merged_cells(wb.sheet_by_name(sheet_name)) # TODO: implement meged cells preparation sheet_dict = { 'cell_default_meta': CELL_DEFAULT_META, 'sheet_name': sheet_name, 'data_df': iterate_sheet(wb.sheet_by_name(sheet_name).get_rows(), cell_func=get_cell_value, add_args_dict={'value_strip': values_strip, 'digitalization': digitalization, 'special_formating': _check_xlrd_types, 'datemode': wb.datemode}, n_rows=n_rows), 'entities': deepcopy(XLSonSheetHandler.entites_0), } if crop_empty: last_cell = get_last_cell(sheet_dict['data_df']) if last_cell == (0, 0): sheet_dict['data_df'] = [[None]] else: sheet_dict['data_df'] = [sheet_dict['data_df'][i][:last_cell[1]+1] for i in range(last_cell[0]+1)] sheet_dict['meta_df'] = [[None] * (last_cell[1]+1)] * (last_cell[0]+1) # TODO: fill meta_df # sheet_dict['meta_df'] = iterate_sheet(wb[sheet_name], # cell_func=cell_meta_to_dict, # add_args_dict={'merged_cells_dict': merged_cells_dict}, # last_cell=last_cell, # n_rows=n_rows) else: sheet_dict['meta_df'] = [[None] * wb.sheet_by_name(sheet_name).ncols] * wb.sheet_by_name(sheet_name).nrows # TODO: fill meta_df # sheet_dict['meta_df'] = iterate_sheet(wb[sheet_name], # cell_func=cell_meta_to_dict, # add_args_dict={'merged_cells_dict': merged_cells_dict}, # n_rows=n_rows) if n == 0: main_sheet = sheet_dict else: supp_sheets_list.append(sheet_dict) n += 1 main_sheet['supp_sheets'] = [supp_sheet['sheet_name'] for supp_sheet in supp_sheets_list] xlson_logger.info("%s conversion to xlson finished" % old_xl_path) return XLSonHandler(main_sheet=XLSonSheetHandler.load_from_dict(main_sheet, main_sheet=True), supp_sheets=supp_sheets_list, source_path=old_xl_path) def _check_xlrd_types(cell, **kwargs): v = cell.value if cell.ctype == 0 or cell.ctype == 6: return None if cell.ctype == 2: if v - float(int(v)) > 0.0: return v else: return int(v) if cell.ctype == 3: return datetime.datetime(*xldate_as_tuple(v, kwargs.get("datemode", 0))) # return datetime.datetime(1900, 1, 1) + datetime.timedelta(int(v)-2) return v
if last_cell is not None and j > last_cell[1]: break if callable(cell_func): if type(add_args_dict) is dict:
3d-tiles-tools.js
#!/usr/bin/env node 'use strict'; var Cesium = require('cesium'); var fsExtra = require('fs-extra'); var GltfPipeline = require('gltf-pipeline'); var path = require('path'); var Promise = require('bluebird'); var yargs = require('yargs'); var zlib = require('zlib'); var databaseToTileset = require('../lib/databaseToTileset'); var directoryExists = require('../lib/directoryExists'); var extractB3dm = require('../lib/extractB3dm'); var extractCmpt = require('../lib/extractCmpt'); var extractI3dm = require('../lib/extractI3dm'); var fileExists = require('../lib/fileExists'); var getBufferPadded = require('../lib/getBufferPadded'); var getMagic = require('../lib/getMagic'); var getJsonBufferPadded = require('../lib/getJsonBufferPadded'); var glbToB3dm = require('../lib/glbToB3dm'); var glbToI3dm = require('../lib/glbToI3dm'); var isGzipped = require('../lib/isGzipped'); var optimizeGlb = require('../lib/optimizeGlb'); var runPipeline = require('../lib/runPipeline'); var tilesetToDatabase = require('../lib/tilesetToDatabase'); var fs = require('fs'); var os = require('os'); var { exec } = require("child_process"); var rimraf = require("rimraf"); var uuidv4 = require('uuid').v4; var zlibGunzip = Promise.promisify(zlib.gunzip); var zlibGzip = Promise.promisify(zlib.gzip); var defaultValue = Cesium.defaultValue; var defined = Cesium.defined; var DeveloperError = Cesium.DeveloperError; var index = -1; for (var i = 0; i < process.argv.length; i++) { if (process.argv[i] === '--options') { index = i; break; } } var args; var optionArgs; if (index < 0) { args = process.argv.slice(2); optionArgs = []; } else { args = process.argv.slice(2, index); optionArgs = process.argv.slice(index + 1); } // Specify input for argument parsing even though it won't be used optionArgs.push('-i'); optionArgs.push('null'); var argv = yargs .usage('Usage: $0 <command> [options]') .help('h') .alias('h', 'help') .options({ 'i': { alias: 'input', description: 'Input path for the command.', global: true, normalize: true, type: 'string' }, 'o': { alias: 'output', description: 'Output path for the command.', global: true, normalize: true, type: 'string' }, 'f': { alias: 'force', default: false, description: 'Output can be overwritten if it already exists.', global: true, type: 'boolean' }, 'b': { alias: 'blender-path', default: 'C:\\Program Files\\Blender Foundation\\Blender 2.92\\blender.exe', description: 'Path to blender executable', global: true, type: 'string' } }) .command('pipeline', 'Execute the input pipeline JSON file.') .command('tilesetToDatabase', 'Create a sqlite database for a tileset.') .command('databaseToTileset', 'Unpack a tileset database to a tileset folder.') .command('glbToB3dm', 'Repackage the input glb as a b3dm with a basic header.') .command('glbToI3dm', 'Repackage the input glb as a i3dm with a basic header.') .command('b3dmToGlb', 'Extract the binary glTF asset from the input b3dm.') .command('i3dmToGlb', 'Extract the binary glTF asset from the input i3dm.') .command('cmptToGlb', 'Extract the binary glTF assets from the input cmpt.') .command('optimizeB3dm', 'Pass the input b3dm through gltf-pipeline. To pass options to gltf-pipeline, place them after --options. (--options -h for gltf-pipeline help)', { 'options': { description: 'All arguments after this flag will be passed to gltf-pipeline as command line options.' } }) .command('optimizeGlb', 'Pass the input glb through gltf-pipeline. To pass options to gltf-pipeline, place them after --options. (--options -h for gltf-pipeline help)', { 'options': { description: 'All arguments after this flag will be passed to gltf-pipeline as command line options.' } }) .command('compressB3dm', 'Pass the input b3dm through gltf-pipeline to draco compress it') .command('blenderB3dm', 'Process a b3dm using blender') .command('compressGlb', 'Pass the input glb through gltf-pipeline to draco compress it') .command('optimizeI3dm', 'Pass the input i3dm through gltf-pipeline. To pass options to gltf-pipeline, place them after --options. (--options -h for gltf-pipeline help)', { 'options': { description: 'All arguments after this flag will be passed to gltf-pipeline as command line options.' } }) .command('gzip', 'Gzips the input tileset directory.', { 't': { alias: 'tilesOnly', default: false, description: 'Only tile files (.b3dm, .i3dm, .pnts, .vctr) should be gzipped.', type: 'boolean' } }) .command('ungzip', 'Ungzips the input tileset directory.') .command('combine', 'Combines all external tilesets into a single tileset.json file.', { 'r': { alias: 'rootJson', default: 'tileset.json', description: 'Relative path to the root tileset.json file.', normalize: true, type: 'string' } }) .command('upgrade', 'Upgrades the input tileset to the latest version of the 3D Tiles spec. Embedded glTF models will be upgraded to glTF 2.0.') .demand(1) .recommendCommands() .strict() .parse(args); var command = argv._[0]; var input = defaultValue(argv.i, argv._[1]); var output = defaultValue(argv.o, argv._[2]); var blenderPath = argv.b; var force = argv.f; if (!defined(input)) { console.log('-i or --input argument is required. See --help for details.'); return; } console.time('Total'); runCommand(command, input, output, force, argv) .then(function() { console.timeEnd('Total'); }) .catch(function(error) { console.log(error.message); }); function runCommand(command, input, output, force, argv) { if (command === 'pipeline') { return processPipeline(input, force); } else if (command === 'gzip') { return processStage(input, output, force, command, argv); } else if (command === 'ungzip') { return processStage(input, output, force, command, argv); } else if (command === 'combine') { return processStage(input, output, force, command, argv); } else if (command === 'upgrade') { return processStage(input, output, force, command, argv); } else if (command === 'b3dmToGlb') { return readB3dmWriteGlb(input, output, force); } else if (command === 'i3dmToGlb') { return readI3dmWriteGlb(input, output, force); } else if (command === 'cmptToGlb') { return readCmptWriteGlb(input, output, force); } else if (command === 'glbToB3dm') { return readGlbWriteB3dm(input, output, force); } else if (command === 'glbToI3dm') { return readGlbWriteI3dm(input, output, force); } else if (command === 'optimizeB3dm') { return readAndOptimizeB3dm(input, output, force, optionArgs); } else if (command === 'optimizeGlb') { return readAndOptimizeGlb(input, output, force, optionArgs); } else if (command === 'compressB3dm') { return compressB3dm(input, output, force, optionArgs); } else if (command === 'blenderB3dm') { return blenderB3dm(input, output, force, blenderPath, optionArgs); } else if (command === 'compressGlb') { return compressGlb(input, output, force, optionArgs); } else if (command === 'optimizeI3dm') { return readAndOptimizeI3dm(input, output, force, optionArgs); } else if (command === 'tilesetToDatabase') { return convertTilesetToDatabase(input, output, force); } else if (command === 'databaseToTileset') { return convertDatabaseToTileset(input, output, force); } throw new DeveloperError('Invalid command: ' + command); } function checkDirectoryOverwritable(directory, force) { if (force) { return Promise.resolve(); } return directoryExists(directory) .then(function(exists) { if (exists) { throw new DeveloperError('Directory ' + directory + ' already exists. Specify -f or --force to overwrite existing files.'); } }); } function checkFileOverwritable(file, force) { if (force) { return Promise.resolve(); } return fileExists(file) .then(function (exists) { if (exists) { throw new DeveloperError('File ' + file + ' already exists. Specify -f or --force to overwrite existing files.'); } }); } function readFile(file) { return fsExtra.readFile(file) .then(function(fileBuffer) { if (isGzipped(fileBuffer)) { return zlibGunzip(fileBuffer); } return fileBuffer; }); } function logCallback(message) { console.log(message); } function parseOptionsArgs(optionArgs) { var options = {}; if (optionArgs.includes('--basis')) { options.encodeBasis = true; } var qualityArg = optionArgs.findIndex(str => str.includes('--jpeg-quality')) if (qualityArg != -1) { options.jpegCompressionRatio = parseInt(optionArgs[qualityArg].split('=')[1]) } var basisQualityArg = optionArgs.findIndex(str => str.includes('--basis-quality')) if (basisQualityArg != -1) { options.basisQuality = parseInt(optionArgs[basisQualityArg].split('=')[1]) } return options; } function processPipeline(inputFile) { return fsExtra.readJson(inputFile) .then(function(pipeline) { var inputDirectory = pipeline.input; var outputDirectory = pipeline.output; if (!defined(inputDirectory)) { throw new DeveloperError('pipeline.input is required.'); } outputDirectory = path.normalize(defaultValue(outputDirectory, path.join(path.dirname(inputDirectory), path.basename(inputDirectory) + '-processed'))); // Make input and output relative to the root directory inputDirectory = path.join(path.dirname(inputFile), inputDirectory); outputDirectory = path.join(path.dirname(inputFile), outputDirectory); return checkDirectoryOverwritable(outputDirectory, force) .then(function() { pipeline.input = inputDirectory; pipeline.output = outputDirectory; var options = { logCallback : logCallback }; return runPipeline(pipeline, options); }); }); } function processStage(inputDirectory, outputDirectory, force, command, argv) { outputDirectory = defaultValue(outputDirectory, path.join(path.dirname(inputDirectory), path.basename(inputDirectory) + '-processed')); return checkDirectoryOverwritable(outputDirectory, force) .then(function() { var stage = getStage(command, argv); var pipeline = { input : inputDirectory, output : outputDirectory, stages : [stage] }; var options = { logCallback : logCallback }; return runPipeline(pipeline, options); }); } function getStage(stageName, argv) { var stage = { name : stageName }; switch (stageName) { case 'gzip': stage.tilesOnly = argv.tilesOnly; break; case 'combine': stage.rootJson = argv.rootJson; } return stage; } function convertTilesetToDatabase(inputDirectory, outputPath, force) { outputPath = defaultValue(outputPath, path.join(path.dirname(inputDirectory), path.basename(inputDirectory) + '.3dtiles')); return checkFileOverwritable(outputPath, force) .then(function() { return tilesetToDatabase(inputDirectory, outputPath); }); } function convertDatabaseToTileset(inputPath, outputDirectory, force) { outputDirectory = defaultValue(outputDirectory, path.join(path.dirname(inputPath), path.basename(inputPath, path.extname(inputPath)))); return checkDirectoryOverwritable(outputDirectory, force) .then(function() { return databaseToTileset(inputPath, outputDirectory); }); } function readGlbWriteB3dm(inputPath, outputPath, force) { outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 3) + 'b3dm'); return checkFileOverwritable(outputPath, force) .then(function() { return readFile(inputPath) .then(function(glb) { // Set b3dm spec requirements var featureTableJson = { BATCH_LENGTH : 0 }; return fsExtra.outputFile(outputPath, glbToB3dm(glb, featureTableJson)); }); }); } function readGlbWriteI3dm(inputPath, outputPath, force) { outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 3) + 'i3dm'); return checkFileOverwritable(outputPath, force) .then(function() { return readFile(inputPath) .then(function(glb) { // Set i3dm spec requirements var featureTable = { INSTANCES_LENGTH : 1, POSITION : { byteOffset : 0 } }; var featureTableJsonBuffer = getJsonBufferPadded(featureTable); var featureTableBinaryBuffer = getBufferPadded(Buffer.alloc(12, 0)); // [0, 0, 0] return fsExtra.outputFile(outputPath, glbToI3dm(glb, featureTableJsonBuffer, featureTableBinaryBuffer)); }); }); } function readB3dmWriteGlb(inputPath, outputPath, force) { outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 4) + 'glb'); var options = {decodeWebP: true}; return checkFileOverwritable(outputPath, force) .then(function() { return readFile(inputPath); }) .then(function(fileBuffer) { var b3dm = extractB3dm(fileBuffer); return GltfPipeline.processGlb(b3dm.glb, options); }) .then(function({glb}) { return fsExtra.outputFile(outputPath, glb); }); } function readI3dmWriteGlb(inputPath, outputPath, force) { outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 4) + 'glb'); return checkFileOverwritable(outputPath, force) .then(function() { return readFile(inputPath); }) .then(function(i3dm) { return fsExtra.outputFile(outputPath, extractI3dm(i3dm).glb); }); } function extractGlbs(tiles) { var glbs = []; var tilesLength = tiles.length; for (var i = 0; i < tilesLength; ++i) { var tile = tiles[i]; var magic = getMagic(tile); if (magic === 'i3dm') { glbs.push(extractI3dm(tile).glb); } else if (magic === 'b3dm') { glbs.push(extractB3dm(tile).glb); } } return glbs; } function readCmptWriteGlb(inputPath, outputPath, force) { outputPath = defaultValue(outputPath, inputPath).slice(0, inputPath.length - 5); return readFile(inputPath) .then(function(cmpt) { var tiles = extractCmpt(cmpt); var glbs = extractGlbs(tiles); var glbsLength = glbs.length; var glbPaths = new Array(glbsLength); if (glbsLength === 0) { throw new DeveloperError('No glbs found in ' + inputPath + '.'); } else if (glbsLength === 1) { glbPaths[0] = outputPath + '.glb'; } else { for (var i = 0; i < glbsLength; ++i) { glbPaths[i] = outputPath + '_' + i + '.glb'; } } return Promise.map(glbPaths, function(glbPath) { return checkFileOverwritable(glbPath, force); }).then(function() { return Promise.map(glbPaths, function(glbPath, index) { return fsExtra.outputFile(glbPath, glbs[index]); }); }); }); } function readAndOptimizeB3dm(inputPath, outputPath, force, optionArgs) { var options = parseOptionsArgs(optionArgs); outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 5) + '-optimized.b3dm'); var gzipped; var b3dm; return checkFileOverwritable(outputPath, force) .then(function() { return fsExtra.readFile(inputPath); }) .then(function(fileBuffer) { gzipped = isGzipped(fileBuffer); if (isGzipped(fileBuffer)) { return zlibGunzip(fileBuffer); } return fileBuffer; }) .then(function(fileBuffer) { b3dm = extractB3dm(fileBuffer); return GltfPipeline.processGlb(b3dm.glb, options); }) .then(function({glb}) { var b3dmBuffer = glbToB3dm(glb, b3dm.featureTable.json, b3dm.featureTable.binary, b3dm.batchTable.json, b3dm.batchTable.binary); /* Gzip disabled if (gzipped) { return zlibGzip(b3dmBuffer); }*/ return b3dmBuffer; }) .then(function(buffer) { return fsExtra.outputFile(outputPath, buffer); }) .catch(function(error) { console.log("ERROR", error); }); } function readAndOptimizeGlb(inputPath, outputPath, force, optionArgs) { var options = parseOptionsArgs(optionArgs); outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 5) + '-optimized.glb'); return checkFileOverwritable(outputPath, force) .then(function() { return fsExtra.readFile(inputPath); }) .then(function(fileBuffer) { b3dm = extractB3dm(fileBuffer); return GltfPipeline.processGlb(fileBuffer, options); }) .then(function(buffer) { return fsExtra.outputFile(outputPath, buffer); }) .catch(function(error) { console.log("ERROR", error); }); } function compressB3dm(inputPath, outputPath, force, optionArgs) { var parsedArgs = parseOptionsArgs(optionArgs); var options = { dracoOptions: true, decodeWebP: !!parsedArgs.jpegCompressionRatio || parsedArgs.encodeBasis, ...parsedArgs }; outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 5) + '-optimized.b3dm'); var gzipped; var b3dm; return checkFileOverwritable(outputPath, force) .then(function() { return fsExtra.readFile(inputPath); }) .then(function(fileBuffer) { gzipped = isGzipped(fileBuffer); if (isGzipped(fileBuffer)) { return zlibGunzip(fileBuffer); } return fileBuffer; }) .then(function(fileBuffer) { b3dm = extractB3dm(fileBuffer); return GltfPipeline.processGlb(b3dm.glb, options); })
.then(function({glb}) { var b3dmBuffer = glbToB3dm(glb, b3dm.featureTable.json, b3dm.featureTable.binary, b3dm.batchTable.json, b3dm.batchTable.binary); /* * Viewer currently does not support Gzip if (gzipped) { return zlibGzip(b3dmBuffer); }*/ return b3dmBuffer; }) .then(function(buffer) { return fsExtra.outputFile(outputPath, buffer); }) .catch(function(error) { console.log("ERROR", error); }); } function blenderB3dm(inputPath, outputPath, force, blenderPath, optionArgs) { var options = {decodeWebP: true}; var uid = uuidv4(); outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 5) + '-optimized.b3dm'); var gzipped; var b3dm; var blenderScript = optionArgs[0]; return checkFileOverwritable(outputPath, force) .then(function() { return fsExtra.readFile(inputPath); }) .then(function(fileBuffer) { gzipped = isGzipped(fileBuffer); if (isGzipped(fileBuffer)) { return zlibGunzip(fileBuffer); } return fileBuffer; }) .then(function(fileBuffer) { b3dm = extractB3dm(fileBuffer); return GltfPipeline.processGlb(b3dm.glb, options); }) .then(function({glb}) { const blenderTmp = path.resolve('.', 'tmp'); return new Promise((resolve, reject) => { fs.writeFile(`${blenderTmp}/tile-${uid}.glb`, glb, (err) => { if (err) { console.log("Error", err) reject(err) } else { exec(`"${blenderPath}" -b --python ${path.resolve('.',blenderScript)} -- ${blenderTmp}/tile-${uid}.glb ${blenderTmp}/tile-blender-${uid}.glb`, (error, stdout, stderr) => { if (error) { console.log("Error", error); reject(error); } console.log(stdout); fs.readFile(`${blenderTmp}/tile-blender-${uid}.glb`, (err, data) => { if (err) { console.log("Error", err); reject(err); } fs.unlinkSync(`${blenderTmp}/tile-${uid}.glb`) fs.unlinkSync(`${blenderTmp}/tile-blender-${uid}.glb`) resolve(data) }); }) } }) }) }) .then(function(glb) { var b3dmBuffer = glbToB3dm(glb, b3dm.featureTable.json, b3dm.featureTable.binary, b3dm.batchTable.json, b3dm.batchTable.binary); /* * Viewer currently does not support Gzip if (gzipped) { return zlibGzip(b3dmBuffer); }*/ return b3dmBuffer; }) .then(function(buffer) { return fsExtra.outputFile(outputPath, buffer); }) .catch(function(error) { console.log("ERROR", error); }); } function compressGlb(inputPath, outputPath, force, optionArgs) { var options = { dracoOptions: true, decodeWebP: true, ...parseOptionsArgs(optionArgs) }; outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 5) + '-optimized.glb'); var gzipped; var b3dm; return checkFileOverwritable(outputPath, force) .then(function() { return fsExtra.readFile(inputPath); }) .then(function(fileBuffer) { gzipped = isGzipped(fileBuffer); if (isGzipped(fileBuffer)) { return zlibGunzip(fileBuffer); } return fileBuffer; }) .then(function(fileBuffer) { return GltfPipeline.processGlb(fileBuffer, options); }) .then(function(buffer) { return fsExtra.outputFile(outputPath, buffer.glb); }) .catch(function(error) { console.log("ERROR", error); }); } function readAndOptimizeI3dm(inputPath, outputPath, force, optionArgs) { var options = GltfPipeline.parseArguments(optionArgs); outputPath = defaultValue(outputPath, inputPath.slice(0, inputPath.length - 5) + '-optimized.i3dm'); var gzipped; var i3dm; return checkFileOverwritable(outputPath, force) .then(function() { return fsExtra.readFile(inputPath); }) .then(function(fileBuffer) { gzipped = isGzipped(fileBuffer); if (isGzipped(fileBuffer)) { return zlibGunzip(fileBuffer); } return fileBuffer; }) .then(function(fileBuffer) { i3dm = extractI3dm(fileBuffer); return optimizeGlb(i3dm.glb, options); }) .then(function(glbBuffer) { var i3dmBuffer = glbToI3dm(glbBuffer, i3dm.featureTable.json, i3dm.featureTable.binary, i3dm.batchTable.json, i3dm.batchTable.binary); if (gzipped) { return zlibGzip(i3dmBuffer); } return i3dmBuffer; }) .then(function(buffer) { return fsExtra.outputFile(outputPath, buffer); }); }
tracing.rs
//! Allows to listen to runtime events. use crate::Context; use evm_runtime::{CreateScheme, Transfer}; use primitive_types::{H160, U256}; environmental::environmental!(listener: dyn EventListener + 'static); pub trait EventListener { fn event(&mut self, event: Event); } #[derive(Debug, Copy, Clone)] pub enum Event<'a> { Call { code_address: H160, transfer: &'a Option<Transfer>,
input: &'a [u8], target_gas: Option<u64>, is_static: bool, context: &'a Context, }, Create { caller: H160, address: H160, scheme: CreateScheme, value: U256, init_code: &'a [u8], target_gas: Option<u64>, }, Suicide { address: H160, target: H160, balance: U256, }, } impl<'a> Event<'a> { pub(crate) fn emit(self) { listener::with(|listener| listener.event(self)); } } /// Run closure with provided listener. pub fn using<R, F: FnOnce() -> R>(new: &mut (dyn EventListener + 'static), f: F) -> R { listener::using(new, f) }
test_utils_callback.py
#!/usr/bin/env python # ===================================================================== # MODULE DOCSTRING # ===================================================================== """ Tests for callback utility classes and functions. """ # ===================================================================== # GLOBAL IMPORTS # ===================================================================== import pytest from openff.toolkit.utils.callback import ( Callbackable, CallbackRegistrationError, callback_method, ) # ===================================================================== # UTILITY CLASSES AND FUNCTIONS # ===================================================================== class CallHistory: """Used to keep track of the order in which callbacks and methods are called by Callbackable.""" history = None def reset_history(self): CallHistory.history = [] @classmethod def add_history_entry(cls, name, *args, **kwargs): # Store args and kwargs in the history only if they are given. history_entry = [] if len(args) != 0: history_entry.append(args) if len(kwargs) != 0: history_entry.append(kwargs) if len(history_entry) == 0: cls.history.append(name) else: cls.history.append([name] + history_entry) def instance_callback(self, callbackable, event_name, *args, **kwargs): assert isinstance(self, object) CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs) @classmethod def class_callback(cls, callbackable, event_name, *args, **kwargs): assert isinstance(cls, type) CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs) @staticmethod def static_callback(callbackable, event_name, *args, **kwargs): CallHistory.add_history_entry("callback_" + event_name, *args, **kwargs) call_history = CallHistory() # ===================================================================== # Test Callbackable class # ===================================================================== class TestCallbackable: """Test suite for the Callbackable base class.""" # ----------------------------- # # Utility classes and functions # # ----------------------------- #
CallHistory.add_history_entry("instance_method", *args, **kwargs) @callback_method def __iadd__(self, other): CallHistory.add_history_entry("__iadd__", other) @callback_method(events=["event1"]) def event_method1(self, *args, **kwargs): CallHistory.add_history_entry("event_method1", *args, **kwargs) @callback_method(events=["event1", "event2"]) def event_method2(self, *args, **kwargs): CallHistory.add_history_entry("event_method2", *args, **kwargs) def check_method_call_order( self, callbackable, event_name, event_sequence, *args, **kwargs ): """Check that callback and methods/attributes are invoked in the correct order. This also formats the history correctly if args and kwargs are given. """ # Modify the expected history if args and kwargs are given. if len(args) == 0 and len(kwargs) == 0: expected_history = event_sequence else: expected_history = [[event_name] for event_name in event_sequence] if len(args) != 0: for event in expected_history: event.append(args) if len(kwargs) != 0: for event in expected_history: event.append(kwargs) # Reset history and verify that the calls are in the correct order. call_history.reset_history() getattr(callbackable, event_name)(*args, **kwargs) assert call_history.history == expected_history # ----- # # Tests # # ----- # @pytest.mark.parametrize("event_name", ["instance_method"]) @pytest.mark.parametrize( "callback", [ call_history.instance_callback, CallHistory.class_callback, CallHistory.static_callback, ], ) @pytest.mark.parametrize( "args,kwargs", [([], {}), ([1, 2.0], {"kwarg1": 0, "kwarg2": None})] ) def test_register_method_callback(self, event_name, callback, args, kwargs): """Methods' callbacks are invoked in the correct order and with the correct arguments.""" callbackable = TestCallbackable.MyCallbackable() # No callback is called before registration. event_sequence = [event_name] self.check_method_call_order( callbackable, event_name, event_sequence, *args, **kwargs ) # Register the callback. callbackable.register_callback(event_name, callback) # After the registration, the callback is invoked correctly. event_sequence = [event_name, "callback_" + event_name] self.check_method_call_order( callbackable, event_name, event_sequence, *args, **kwargs ) def test_register_magic_method_callback(self): """Callbacks registered to magic methods are invoked correctly.""" callbackable = TestCallbackable.MyCallbackable() callbackable.register_callback("__iadd__", call_history.instance_callback) extension = [1, 2] call_history.reset_history() callbackable += extension assert call_history.history == [ ["__iadd__", (extension,)], ["callback___iadd__", (extension,)], ] def test_register_event_callback(self): """Callbacks registered to a event are handled corectly.""" callbackable = TestCallbackable.MyCallbackable() # Register the callbacks to event1 (event_method1 and event_method2). callbackable.register_callback("event1", call_history.instance_callback) callbackable.register_callback("event1", CallHistory.class_callback) # Register one callback to event2 (only event_method2). callbackable.register_callback("event2", CallHistory.static_callback) # Check the event sequence for both methods belong to the two events. event_sequence = [ "event_method1", "callback_event_method1", "callback_event_method1", ] self.check_method_call_order(callbackable, "event_method1", event_sequence) event_sequence = [ "event_method2", "callback_event_method2", "callback_event_method2", "callback_event_method2", ] self.check_method_call_order(callbackable, "event_method2", event_sequence) def test_not_callback_method_raise_exception(self): """An exception is raised if a callback is registered for a method not tagged with callback_method.""" class TempCallbackable(Callbackable): def not_callback_method(self): pass callbackable = TempCallbackable() with pytest.raises( CallbackRegistrationError, match="is not tagged with the @callback_method decorator", ): callbackable.register_callback( "not_callback_method", call_history.instance_callback ) def test_unknown_event_raise_exception(self): """An exception is raised if a callback is registered for an unknown callback event.""" callbackable = TestCallbackable.MyCallbackable() with pytest.raises( CallbackRegistrationError, match='is associated to the callback event "unknown"', ): callbackable.register_callback("unknown", call_history.instance_callback)
class MyCallbackable(Callbackable): @callback_method def instance_method(self, *args, **kwargs):
sigmoid.py
#!/usr/bin/python3 import matplotlib.pyplot as plt import numpy as np def sigmoid(z):
z = np.arange(-7, 7, 0.01) phi_z = sigmoid(z) plt.plot(z, phi_z) plt.axvline(0.0, color = 'k') plt.axhspan(0.0, 1.0, facecolor = '1.0', alpha = 1.0, ls = 'dotted') plt.axhline(0.5, ls = 'dotted', color = 'k') plt.yticks([0.0, 0.5, 1.0]) plt.ylim(-0.1, 1.1) plt.xlabel('z') plt.ylabel('$\phi (z)$') plt.show()
return 1.0 / (1.0 + np.exp(-z))
property.go
package ua import "fmt" func (v *Version) String() string { if v.Major != 0 || v.Minor != 0 || v.Patch != 0
return v.Ver } func (d *Device) OSName() string { if d.ParsedInfo.OS.Name != "" { return d.ParsedInfo.OS.Name } return d.DetectedInfo.OS.Name } func (d *Device) OSVer() string { if d.ParsedInfo.OS.Version.String() != "" { return d.ParsedInfo.OS.Version.String() } return d.DetectedInfo.OS.Version.String() }
{ return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) }
networkmanagement-gen.go
// Copyright 2020 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Code generated file. DO NOT EDIT. // Package networkmanagement provides access to the Network Management API. // // For product documentation, see: https://cloud.google.com/ // // Creating a client // // Usage example: // // import "google.golang.org/api/networkmanagement/v1beta1" // ... // ctx := context.Background() // networkmanagementService, err := networkmanagement.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // Other authentication options // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // // networkmanagementService, err := networkmanagement.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // networkmanagementService, err := networkmanagement.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package networkmanagement // import "google.golang.org/api/networkmanagement/v1beta1" import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "strconv" "strings" googleapi "google.golang.org/api/googleapi" gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" internaloption "google.golang.org/api/option/internaloption" htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint const apiId = "networkmanagement:v1beta1" const apiName = "networkmanagement" const apiVersion = "v1beta1" const basePath = "https://networkmanagement.googleapis.com/" // OAuth2 scopes used by this API. const ( // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" ) // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { scopesOption := option.WithScopes( "https://www.googleapis.com/auth/cloud-platform", ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err } s, err := New(client) if err != nil { return nil, err } if endpoint != "" { s.BasePath = endpoint } return s, nil } // New creates a new Service. It uses the provided http.Client for requests. // // Deprecated: please use NewService instead. // To provide a custom HTTP client, use option.WithHTTPClient. // If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Projects = NewProjectsService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Projects *ProjectsService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.Locations = NewProjectsLocationsService(s) return rs } type ProjectsService struct { s *Service Locations *ProjectsLocationsService } func NewProjectsLocationsService(s *Service) *ProjectsLocationsService
type ProjectsLocationsService struct { s *Service Global *ProjectsLocationsGlobalService } func NewProjectsLocationsGlobalService(s *Service) *ProjectsLocationsGlobalService { rs := &ProjectsLocationsGlobalService{s: s} rs.ConnectivityTests = NewProjectsLocationsGlobalConnectivityTestsService(s) rs.Operations = NewProjectsLocationsGlobalOperationsService(s) return rs } type ProjectsLocationsGlobalService struct { s *Service ConnectivityTests *ProjectsLocationsGlobalConnectivityTestsService Operations *ProjectsLocationsGlobalOperationsService } func NewProjectsLocationsGlobalConnectivityTestsService(s *Service) *ProjectsLocationsGlobalConnectivityTestsService { rs := &ProjectsLocationsGlobalConnectivityTestsService{s: s} return rs } type ProjectsLocationsGlobalConnectivityTestsService struct { s *Service } func NewProjectsLocationsGlobalOperationsService(s *Service) *ProjectsLocationsGlobalOperationsService { rs := &ProjectsLocationsGlobalOperationsService{s: s} return rs } type ProjectsLocationsGlobalOperationsService struct { s *Service } // AbortInfo: Details of the final state "abort" and associated // resource. type AbortInfo struct { // Cause: Causes that the analysis is aborted. // // Possible values: // "CAUSE_UNSPECIFIED" - Cause is unspecified. // "UNKNOWN_NETWORK" - Aborted due to unknown network. // The reachability analysis cannot proceed because the user does not // have // access to the host project's network configurations, including // firewall // rules and routes. This happens when the project is a service project // and // the endpoints being traced are in the host project's network. // "UNKNOWN_IP" - Aborted because the IP address(es) are unknown. // "UNKNOWN_PROJECT" - Aborted because no project information can be // derived from the test // input. // "PERMISSION_DENIED" - Aborted because the user lacks the permission // to access all or part of // the network configurations required to run the test. // "NO_SOURCE_LOCATION" - Aborted because no valid source endpoint is // derived from the input test // request. // "INVALID_ARGUMENT" - Aborted because the source and/or destination // endpoint specified in // the test are invalid. The possible reasons that an endpoint // is // invalid include: malformed IP address; nonexistent instance // or // network URI; IP address not in the range of specified network URI; // and // instance not owning the network interface in the specified network. // "NO_EXTERNAL_IP" - Aborted because traffic is sent from a public IP // to an instance without // an external IP. // "UNINTENDED_DESTINATION" - Aborted because none of the traces // matches destination information // specified in the input test request. // "TRACE_TOO_LONG" - Aborted because the number of steps in the trace // exceeding a certain // limit which may be caused by routing loop. Cause string `json:"cause,omitempty"` // ResourceUri: URI of the resource that caused the abort. ResourceUri string `json:"resourceUri,omitempty"` // ForceSendFields is a list of field names (e.g. "Cause") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Cause") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *AbortInfo) MarshalJSON() ([]byte, error) { type NoMethod AbortInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. // The configuration determines which permission types are logged, and // what // identities, if any, are exempted from logging. // An AuditConfig must have one or more AuditLogConfigs. // // If there are AuditConfigs for both `allServices` and a specific // service, // the union of the two AuditConfigs is used for that service: the // log_types // specified in each AuditConfig are enabled, and the exempted_members // in each // AuditLogConfig are exempted. // // Example Policy with multiple AuditConfigs: // // { // "audit_configs": [ // { // "service": "allServices" // "audit_log_configs": [ // { // "log_type": "DATA_READ", // "exempted_members": [ // "user:[email protected]" // ] // }, // { // "log_type": "DATA_WRITE", // }, // { // "log_type": "ADMIN_READ", // } // ] // }, // { // "service": "sampleservice.googleapis.com" // "audit_log_configs": [ // { // "log_type": "DATA_READ", // }, // { // "log_type": "DATA_WRITE", // "exempted_members": [ // "user:[email protected]" // ] // } // ] // } // ] // } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and // ADMIN_READ // logging. It also exempts [email protected] from DATA_READ logging, // and // [email protected] from DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` // Service: Specifies a service that will be enabled for audit // logging. // For example, `storage.googleapis.com`, // `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AuditLogConfigs") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of // permissions. // Example: // // { // "audit_log_configs": [ // { // "log_type": "DATA_READ", // "exempted_members": [ // "user:[email protected]" // ] // }, // { // "log_type": "DATA_WRITE", // } // ] // } // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while // exempting // [email protected] from DATA_READ logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging // for this type of // permission. // Follows the same format of Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. // // Possible values: // "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this. // "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy // "DATA_WRITE" - Data writes. Example: CloudSQL Users create // "DATA_READ" - Data reads. Example: CloudSQL Users list LogType string `json:"logType,omitempty"` // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ExemptedMembers") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Binding: Associates `members` with a `role`. type Binding struct { // Condition: The condition that is associated with this binding. // NOTE: An unsatisfied condition will not allow user access via // current // binding. Different bindings, including their conditions, are // examined // independently. Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud // Platform resource. // `members` can have the following values: // // * `allUsers`: A special identifier that represents anyone who is // on the internet; with or without a Google account. // // * `allAuthenticatedUsers`: A special identifier that represents // anyone // who is authenticated with a Google account or a service // account. // // * `user:{emailid}`: An email address that represents a specific // Google // account. For example, `[email protected]` . // // // * `serviceAccount:{emailid}`: An email address that represents a // service // account. For example, // `[email protected]`. // // * `group:{emailid}`: An email address that represents a Google // group. // For example, `[email protected]`. // // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus // unique // identifier) representing a user that has been recently deleted. // For // example, `[email protected]?uid=123456789012345678901`. If the // user is // recovered, this value reverts to `user:{emailid}` and the // recovered user // retains the role in the binding. // // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address // (plus // unique identifier) representing a service account that has been // recently // deleted. For example, // // `[email protected]?uid=123456789012345678901`. // // If the service account is undeleted, this value reverts to // `serviceAccount:{emailid}` and the undeleted service account // retains the // role in the binding. // // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus // unique // identifier) representing a Google group that has been recently // deleted. For example, // `[email protected]?uid=123456789012345678901`. If // the group is recovered, this value reverts to `group:{emailid}` // and the // recovered group retains the role in the binding. // // // * `domain:{domain}`: The G Suite domain (primary) that represents all // the // users of that domain. For example, `google.com` or // `example.com`. // // Members []string `json:"members,omitempty"` // Role: Role that is assigned to `members`. // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Condition") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CancelOperationRequest: The request message for // Operations.CancelOperation. type CancelOperationRequest struct { } // ConnectivityTest: A Connectivity Test for a network reachability // analysis. type ConnectivityTest struct { // CreateTime: Output only. The time the test was created. CreateTime string `json:"createTime,omitempty"` // Description: The user-supplied description of the Connectivity // Test. // Maximum of 512 characters. Description string `json:"description,omitempty"` // Destination: Required. Destination specification of the Connectivity // Test. // // You can use a combination of destination IP address, Compute // Engine // VM instance, or VPC network to uniquely identify the // destination // location. // // Even if the destination IP address is not unique, the source // IP // location is unique. Usually, the analysis can infer the // destination // endpoint from route information. // // If the destination you specify is a VM instance and the instance // has // multiple network interfaces, then you must also specify either // a destination IP address or VPC network to identify the // destination // interface. // // A reachability analysis proceeds even if the destination location // is // ambiguous. However, the result can include endpoints that you // don't // intend to test. Destination *Endpoint `json:"destination,omitempty"` // DisplayName: Output only. The display name of a Connectivity Test. DisplayName string `json:"displayName,omitempty"` // Labels: Resource labels to represent user-provided metadata. Labels map[string]string `json:"labels,omitempty"` // Name: Required. Unique name of the resource using the form: // `projects/{project_id}/tests/{test_id}` Name string `json:"name,omitempty"` // Protocol: IP Protocol of the test. When not provided, "TCP" is // assumed. Protocol string `json:"protocol,omitempty"` // ReachabilityDetails: Output only. The reachability details of this // test from the latest run. // The details are updated when creating a new test, updating // an // existing test, or triggering a one-time rerun of an existing test. ReachabilityDetails *ReachabilityDetails `json:"reachabilityDetails,omitempty"` // RelatedProjects: Other projects that may be relevant for reachability // analysis. // This is applicable to scenarios where a test can cross project // boundaries. RelatedProjects []string `json:"relatedProjects,omitempty"` // Source: Required. Source specification of the Connectivity Test. // // You can use a combination of source IP address, virtual machine // (VM) instance, or Compute Engine network to uniquely identify // the source location. // // Examples: // If the source IP address is an internal IP address within a Google // Cloud // Virtual Private Cloud (VPC) network, then you must also specify the // VPC // network. Otherwise, specify the VM instance, which already contains // its // internal IP address and VPC network information. // // If the source of the test is within an on-premises network, then you // must // provide the destination VPC network. // // If the source endpoint is a Compute Engine VM instance with // multiple // network interfaces, the instance itself is not sufficient to identify // the // endpoint. So, you must also specify the source IP address or VPC // network. // // A reachability analysis proceeds even if the source location // is // ambiguous. However, the test result may include endpoints that you // don't // intend to test. Source *Endpoint `json:"source,omitempty"` // UpdateTime: Output only. The time the test's configuration was // updated. UpdateTime string `json:"updateTime,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "CreateTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CreateTime") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ConnectivityTest) MarshalJSON() ([]byte, error) { type NoMethod ConnectivityTest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DeliverInfo: Details of the final state "deliver" and associated // resource. type DeliverInfo struct { // ResourceUri: URI of the resource that the packet is delivered to. ResourceUri string `json:"resourceUri,omitempty"` // Target: Target type where the packet is delivered to. // // Possible values: // "TARGET_UNSPECIFIED" - Target not specified. // "INSTANCE" - Target is a Compute Engine instance. // "INTERNET" - Target is the Internet. // "GOOGLE_API" - Target is a Google API. Target string `json:"target,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceUri") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ResourceUri") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *DeliverInfo) MarshalJSON() ([]byte, error) { type NoMethod DeliverInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // DropInfo: Details of the final state "drop" and associated resource. type DropInfo struct { // Cause: Cause that the packet is dropped. // // Possible values: // "CAUSE_UNSPECIFIED" - Cause is unspecified. // "UNKNOWN_EXTERNAL_ADDRESS" - Destination external address cannot be // resolved to a known target. // "FOREIGN_IP_DISALLOWED" - a Compute Engine instance can only send // or receive a packet with a // foreign IP <code>if ip_forward</code> is enabled. // "FIREWALL_RULE" - Dropped due to a firewall rule unless allowed due // to connection tracking. // "NO_ROUTE" - Dropped due to no routes. // "ROUTE_BLACKHOLE" - Dropped due to invalid route. Route's next hop // is a blackhole. // "ROUTE_WRONG_NETWORK" - Packet is sent to a wrong (unintended) // network. Example: user traces a // packet from VM1:Network1 to VM2:Network2, however, the route // configured // in Network1 sends the packet destined for VM2's IP addresss to // Network3. // "PRIVATE_TRAFFIC_TO_INTERNET" - Packet with internal destination // address sent to Internet gateway. // "PRIVATE_GOOGLE_ACCESS_DISALLOWED" - Instance with only an internal // IP tries to access Google API and // Services, and private Google access is not enabled. // "NO_EXTERNAL_ADDRESS" - Instance with only internal IP tries to // access external hosts, but // Cloud NAT is not enabled in the subnet, unless special // configurations // on a VM allows this connection. See [Special Configurations for // VM // instances](/vpc/docs/special-configurations) for details. // "UNKNOWN_INTERNAL_ADDRESS" - Destination internal address cannot be // resolved to a known target. // "FORWARDING_RULE_MISMATCH" - Forwarding rule's protocol and ports // do not match the packet header. // "FORWARDING_RULE_NO_INSTANCES" - Forwarding rule does not have // backends configured. // "FIREWALL_BLOCKING_LOAD_BALANCER_BACKEND_HEALTH_CHECK" - Firewalls // block the health check probes to the backends and cause // the backends to be unavailable for traffic from the load // balancer. // See [Health check firewall // rules](/load-balancing/docs/ // health-checks#firewall_rules) for more details. // "INSTANCE_NOT_RUNNING" - Packet is sent from or to a Compute Engine // instance that is not in a // running state. // "TRAFFIC_TYPE_BLOCKED" - The type of traffic is blocked and the // user cannot configure a firewall // rule to enable it. See [Always blocked // traffic](/vpc/docs/firewalls# // blockedtraffic) for more details. // "GKE_MASTER_UNAUTHORIZED_ACCESS" - Access to GKE master's endpoint // is not authorized. // See [Access to the cluster // endpoints](/kubernetes-engine/docs/how-to/ // private-clusters#access_to_ // the_cluster_endpoints) for more details. Cause string `json:"cause,omitempty"` // ResourceUri: URI of the resource that caused the drop. ResourceUri string `json:"resourceUri,omitempty"` // ForceSendFields is a list of field names (e.g. "Cause") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Cause") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *DropInfo) MarshalJSON() ([]byte, error) { type NoMethod DropInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining // duplicated // empty messages in your APIs. A typical example is to use it as the // request // or the response type of an API method. For instance: // // service Foo { // rpc Bar(google.protobuf.Empty) returns // (google.protobuf.Empty); // } // // The JSON representation for `Empty` is empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` } // Endpoint: Source or destination of the Connectivity Test. type Endpoint struct { // Instance: A Compute Engine instance URI. Instance string `json:"instance,omitempty"` // IpAddress: The IP address of the endpoint, which can be an external // or internal IP. // An IPv6 address is only allowed when the test's destination is // a // [global load balancer // VIP](/load-balancing/docs/load-balancing-overview). IpAddress string `json:"ipAddress,omitempty"` // Network: A Compute Engine network URI. Network string `json:"network,omitempty"` // NetworkType: Type of the network where the endpoint is // located. // Applicable only to source endpoint, as destination network type can // be // inferred from the source. // // Possible values: // "NETWORK_TYPE_UNSPECIFIED" - Default type if unspecified. // "GCP_NETWORK" - A network hosted within Google Cloud Platform. // To receive more detailed output, specify the URI for the source // or // destination network. // "NON_GCP_NETWORK" - A network hosted outside of Google Cloud // Platform. // This can be an on-premises network, or a network hosted by another // cloud // provider. NetworkType string `json:"networkType,omitempty"` // Port: The IP protocol port of the endpoint. // Only applicable when protocol is TCP or UDP. Port int64 `json:"port,omitempty"` // ProjectId: Project ID where the endpoint is located. // The Project ID can be derived from the URI if you provide a VM // instance or // network URI. // The following are two cases where you must provide the project ID: // 1. Only the IP address is specified, and the IP address is within a // GCP // project. // 2. When you are using Shared VPC and the IP address that you provide // is // from the service project. In this case, the network that the IP // address // resides in is defined in the host project. ProjectId string `json:"projectId,omitempty"` // ForceSendFields is a list of field names (e.g. "Instance") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Instance") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Endpoint) MarshalJSON() ([]byte, error) { type NoMethod Endpoint raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // EndpointInfo: For display only. The specification of the endpoints // for the test. // EndpointInfo is derived from source and destination Endpoint and // validated // by the backend data plane model. type EndpointInfo struct { // DestinationIp: Destination IP address. DestinationIp string `json:"destinationIp,omitempty"` // DestinationNetworkUri: URI of the network where this packet is sent // to. DestinationNetworkUri string `json:"destinationNetworkUri,omitempty"` // DestinationPort: Destination port. Only valid when protocol is TCP or // UDP. DestinationPort int64 `json:"destinationPort,omitempty"` // Protocol: IP protocol in string format, for example: "TCP", "UDP", // "ICMP". Protocol string `json:"protocol,omitempty"` // SourceIp: Source IP address. SourceIp string `json:"sourceIp,omitempty"` // SourceNetworkUri: URI of the network where this packet originates // from. SourceNetworkUri string `json:"sourceNetworkUri,omitempty"` // SourcePort: Source port. Only valid when protocol is TCP or UDP. SourcePort int64 `json:"sourcePort,omitempty"` // ForceSendFields is a list of field names (e.g. "DestinationIp") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DestinationIp") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *EndpointInfo) MarshalJSON() ([]byte, error) { type NoMethod EndpointInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Expr: Represents a textual expression in the Common Expression // Language (CEL) // syntax. CEL is a C-like expression language. The syntax and semantics // of CEL // are documented at https://github.com/google/cel-spec. // // Example (Comparison): // // title: "Summary size limit" // description: "Determines if a summary is less than 100 chars" // expression: "document.summary.size() < 100" // // Example (Equality): // // title: "Requestor is owner" // description: "Determines if requestor is the document owner" // expression: "document.owner == // request.auth.claims.email" // // Example (Logic): // // title: "Public documents" // description: "Determine whether the document should be publicly // visible" // expression: "document.type != 'private' && document.type != // 'internal'" // // Example (Data Manipulation): // // title: "Notification string" // description: "Create a notification string with a timestamp." // expression: "'New message received at ' + // string(document.create_time)" // // The exact variables and functions that may be referenced within an // expression // are determined by the service that evaluates it. See the // service // documentation for additional information. type Expr struct { // Description: Optional. Description of the expression. This is a // longer text which // describes the expression, e.g. when hovered over it in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in Common // Expression Language // syntax. Expression string `json:"expression,omitempty"` // Location: Optional. String indicating the location of the expression // for error // reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: Optional. Title for the expression, i.e. a short string // describing // its purpose. This can be used e.g. in UIs which allow to enter // the // expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FirewallInfo: For display only. Metadata associated with a Compute // Engine firewall rule. type FirewallInfo struct { // Action: Possible values: ALLOW, DENY Action string `json:"action,omitempty"` // Direction: Possible values: INGRESS, EGRESS Direction string `json:"direction,omitempty"` // DisplayName: Name of a Compute Engine firewall rule. DisplayName string `json:"displayName,omitempty"` // NetworkUri: URI of a Compute Engine network. NetworkUri string `json:"networkUri,omitempty"` // Priority: Priority of the firewall rule. Priority int64 `json:"priority,omitempty"` // TargetServiceAccounts: Target service accounts of the firewall rule. TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` // TargetTags: Target tags of the firewall rule. TargetTags []string `json:"targetTags,omitempty"` // Uri: URI of a Compute Engine firewall rule. // Implied default rule does not have URI. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Action") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *FirewallInfo) MarshalJSON() ([]byte, error) { type NoMethod FirewallInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ForwardInfo: Details of the final state "forward" and associated // resource. type ForwardInfo struct { // ResourceUri: URI of the resource that the packet is forwarded to. ResourceUri string `json:"resourceUri,omitempty"` // Target: Target type where this packet is forwarded to. // // Possible values: // "TARGET_UNSPECIFIED" - Target not specified. // "PEERING_VPC" - Forwarded to a VPC peering network. // "VPN_GATEWAY" - Forwarded to a Cloud VPN gateway. // "INTERCONNECT" - Forwarded to an Cloud Interconnect connection. // "GKE_MASTER" - Forwarded to a Google Kubernetes Engine Container // cluster master. // "IMPORTED_CUSTOM_ROUTE_NEXT_HOP" - Forwarded to the next hop of a // custom route imported from a peering VPC. Target string `json:"target,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceUri") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ResourceUri") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ForwardInfo) MarshalJSON() ([]byte, error) { type NoMethod ForwardInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ForwardingRuleInfo: For display only. Metadata associated with a // Compute Engine forwarding rule. type ForwardingRuleInfo struct { // DisplayName: Name of a Compute Engine forwarding rule. DisplayName string `json:"displayName,omitempty"` // MatchedPortRange: Port range defined in the forwarding rule that // matches the test. MatchedPortRange string `json:"matchedPortRange,omitempty"` // MatchedProtocol: Protocol defined in the forwarding rule that matches // the test. MatchedProtocol string `json:"matchedProtocol,omitempty"` // NetworkUri: Network URI. Only valid for Internal Load Balancer. NetworkUri string `json:"networkUri,omitempty"` // Target: Target type of the forwarding rule. Target string `json:"target,omitempty"` // Uri: URI of a Compute Engine forwarding rule. Uri string `json:"uri,omitempty"` // Vip: VIP of the forwarding rule. Vip string `json:"vip,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ForwardingRuleInfo) MarshalJSON() ([]byte, error) { type NoMethod ForwardingRuleInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // InstanceInfo: For display only. Metadata associated with a Compute // Engine instance. type InstanceInfo struct { // DisplayName: Name of a Compute Engine instance. DisplayName string `json:"displayName,omitempty"` // ExternalIp: External IP address of the network interface. ExternalIp string `json:"externalIp,omitempty"` // Interface: Name of the network interface of a Compute Engine // instance. Interface string `json:"interface,omitempty"` // InternalIp: Internal IP address of the network interface. InternalIp string `json:"internalIp,omitempty"` // NetworkTags: Network tags configured on the instance. NetworkTags []string `json:"networkTags,omitempty"` // NetworkUri: URI of a Compute Engine network. NetworkUri string `json:"networkUri,omitempty"` // ServiceAccount: Service account authorized for the instance. ServiceAccount string `json:"serviceAccount,omitempty"` // Uri: URI of a Compute Engine instance. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *InstanceInfo) MarshalJSON() ([]byte, error) { type NoMethod InstanceInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ListConnectivityTestsResponse: Response for the // `ListConnectivityTests` method. type ListConnectivityTestsResponse struct { // NextPageToken: Page token to fetch the next set of Connectivity // Tests. NextPageToken string `json:"nextPageToken,omitempty"` // Resources: List of Connectivity Tests. Resources []*ConnectivityTest `json:"resources,omitempty"` // Unreachable: Locations that could not be reached (when querying all // locations with `-`). Unreachable []string `json:"unreachable,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ListConnectivityTestsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListConnectivityTestsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ListLocationsResponse: The response message for // Locations.ListLocations. type ListLocationsResponse struct { // Locations: A list of locations that matches the specified filter in // the request. Locations []*Location `json:"locations,omitempty"` // NextPageToken: The standard List next-page token. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Locations") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Locations") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListLocationsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ListOperationsResponse: The response message for // Operations.ListOperations. type ListOperationsResponse struct { // NextPageToken: The standard List next-page token. NextPageToken string `json:"nextPageToken,omitempty"` // Operations: A list of operations that matches the specified filter in // the request. Operations []*Operation `json:"operations,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOperationsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // LoadBalancerBackend: For display only. Metadata associated with a // specific load balancer backend. type LoadBalancerBackend struct { // DisplayName: Name of a Compute Engine instance or network endpoint. DisplayName string `json:"displayName,omitempty"` // HealthCheckAllowingFirewallRules: A list of firewall rule URIs // allowing probes from health check IP ranges. HealthCheckAllowingFirewallRules []string `json:"healthCheckAllowingFirewallRules,omitempty"` // HealthCheckBlockingFirewallRules: A list of firewall rule URIs // blocking probes from health check IP ranges. HealthCheckBlockingFirewallRules []string `json:"healthCheckBlockingFirewallRules,omitempty"` // HealthCheckFirewallState: State of the health check firewall // configuration. // // Possible values: // "HEALTH_CHECK_FIREWALL_STATE_UNSPECIFIED" - State is unspecified. // Default state if not populated. // "CONFIGURED" - There are configured firewall rules to allow health // check probes to the // backend. // "MISCONFIGURED" - There are firewall rules configured to allow // partial health check ranges // or block all health check ranges. // If a health check probe is sent from denied IP ranges, // the health check to the backend will fail. Then, the backend will // be // marked unhealthy and will not receive traffic sent to the load // balancer. HealthCheckFirewallState string `json:"healthCheckFirewallState,omitempty"` // Uri: URI of a Compute Engine instance or network endpoint. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *LoadBalancerBackend) MarshalJSON() ([]byte, error) { type NoMethod LoadBalancerBackend raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // LoadBalancerInfo: For display only. Metadata associated with a load // balancer. type LoadBalancerInfo struct { // BackendType: Type of load balancer's backend configuration. // // Possible values: // "BACKEND_TYPE_UNSPECIFIED" - Type is unspecified. // "BACKEND_SERVICE" - Backend Service as the load balancer's backend. // "TARGET_POOL" - Target Pool as the load balancer's backend. BackendType string `json:"backendType,omitempty"` // BackendUri: Backend configuration URI. BackendUri string `json:"backendUri,omitempty"` // Backends: Information for the loadbalancer backends. Backends []*LoadBalancerBackend `json:"backends,omitempty"` // HealthCheckUri: URI of the health check for the load balancer. HealthCheckUri string `json:"healthCheckUri,omitempty"` // LoadBalancerType: Type of the load balancer. // // Possible values: // "LOAD_BALANCER_TYPE_UNSPECIFIED" - Type is unspecified. // "INTERNAL_TCP_UDP" - Internal TCP/UDP load balancer. // "NETWORK_TCP_UDP" - Network TCP/UDP load balancer. // "HTTP_PROXY" - HTTP(S) proxy load balancer. // "TCP_PROXY" - TCP proxy load balancer. // "SSL_PROXY" - SSL proxy load balancer. LoadBalancerType string `json:"loadBalancerType,omitempty"` // ForceSendFields is a list of field names (e.g. "BackendType") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BackendType") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *LoadBalancerInfo) MarshalJSON() ([]byte, error) { type NoMethod LoadBalancerInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Location: A resource that represents Google Cloud Platform location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. // For example, "Tokyo". DisplayName string `json:"displayName,omitempty"` // Labels: Cross-service attributes for the location. For example // // {"cloud.googleapis.com/region": "us-east1"} Labels map[string]string `json:"labels,omitempty"` // LocationId: The canonical id for this location. For example: // "us-east1". LocationId string `json:"locationId,omitempty"` // Metadata: Service-specific metadata. For example the available // capacity at the given // location. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: Resource name for the location, which may vary between // implementations. // For example: "projects/example-project/locations/us-east1" Name string `json:"name,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Location) MarshalJSON() ([]byte, error) { type NoMethod Location raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // NetworkInfo: For display only. Metadata associated with a Compute // Engine network. type NetworkInfo struct { // DisplayName: Name of a Compute Engine network. DisplayName string `json:"displayName,omitempty"` // MatchedIpRange: The IP range that matches the test. MatchedIpRange string `json:"matchedIpRange,omitempty"` // Uri: URI of a Compute Engine network. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *NetworkInfo) MarshalJSON() ([]byte, error) { type NoMethod NetworkInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Operation: This resource represents a long-running operation that is // the result of a // network API call. type Operation struct { // Done: If the value is `false`, it means the operation is still in // progress. // If `true`, the operation is completed, and either `error` or // `response` is // available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *Status `json:"error,omitempty"` // Metadata: Service-specific metadata associated with the operation. // It typically // contains progress information and common metadata such as create // time. // Some services might not provide such metadata. Any method that // returns a // long-running operation should document the metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same // service that // originally returns it. If you use the default HTTP mapping, // the // `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` // Response: The normal response of the operation in case of success. // If the original // method returns no data on success, such as `Delete`, the response // is // `google.protobuf.Empty`. If the original method is // standard // `Get`/`Create`/`Update`, the response should be the resource. For // other // methods, the response should have the type `XxxResponse`, where // `Xxx` // is the original method name. For example, if the original method // name // is `TakeSnapshot()`, the inferred response type // is // `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Done") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Done") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Operation) MarshalJSON() ([]byte, error) { type NoMethod Operation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // OperationMetadata: Metadata describing an Operation type OperationMetadata struct { // ApiVersion: API version. ApiVersion string `json:"apiVersion,omitempty"` // CancelRequested: Specifies if cancellation was requested for the // operation. CancelRequested bool `json:"cancelRequested,omitempty"` // CreateTime: The time the operation was created. CreateTime string `json:"createTime,omitempty"` // EndTime: The time the operation finished running. EndTime string `json:"endTime,omitempty"` // StatusDetail: Human-readable status of the operation, if any. StatusDetail string `json:"statusDetail,omitempty"` // Target: Target of the operation - for // example // projects/project-1/connectivityTests/test-1 Target string `json:"target,omitempty"` // Verb: Name of the verb executed by the operation. Verb string `json:"verb,omitempty"` // ForceSendFields is a list of field names (e.g. "ApiVersion") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ApiVersion") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *OperationMetadata) MarshalJSON() ([]byte, error) { type NoMethod OperationMetadata raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Policy: An Identity and Access Management (IAM) policy, which // specifies access // controls for Google Cloud resources. // // // A `Policy` is a collection of `bindings`. A `binding` binds one or // more // `members` to a single `role`. Members can be user accounts, service // accounts, // Google groups, and domains (such as G Suite). A `role` is a named // list of // permissions; each `role` can be an IAM predefined role or a // user-created // custom role. // // Optionally, a `binding` can specify a `condition`, which is a // logical // expression that allows access to a resource only if the expression // evaluates // to `true`. A condition can add constraints based on attributes of // the // request, the resource, or both. // // **JSON example:** // // { // "bindings": [ // { // "role": "roles/resourcemanager.organizationAdmin", // "members": [ // "user:[email protected]", // "group:[email protected]", // "domain:google.com", // // "serviceAccount:[email protected]" // ] // }, // { // "role": "roles/resourcemanager.organizationViewer", // "members": ["user:[email protected]"], // "condition": { // "title": "expirable access", // "description": "Does not grant access after Sep 2020", // "expression": "request.time < // timestamp('2020-10-01T00:00:00.000Z')", // } // } // ], // "etag": "BwWWja0YfJA=", // "version": 3 // } // // **YAML example:** // // bindings: // - members: // - user:[email protected] // - group:[email protected] // - domain:google.com // - serviceAccount:[email protected] // role: roles/resourcemanager.organizationAdmin // - members: // - user:[email protected] // role: roles/resourcemanager.organizationViewer // condition: // title: expirable access // description: Does not grant access after Sep 2020 // expression: request.time < // timestamp('2020-10-01T00:00:00.000Z') // - etag: BwWWja0YfJA= // - version: 3 // // For a description of IAM and its features, see the // [IAM documentation](https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. Optionally, may // specify a // `condition` that determines how and when the `bindings` are applied. // Each // of the `bindings` must contain at least one member. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to // help // prevent simultaneous updates of a policy from overwriting each // other. // It is strongly suggested that systems make use of the `etag` in // the // read-modify-write cycle to perform policy updates in order to avoid // race // conditions: An `etag` is returned in the response to `getIamPolicy`, // and // systems are expected to put that etag in the request to // `setIamPolicy` to // ensure that their change will be applied to the same version of the // policy. // // **Important:** If you use IAM Conditions, you must include the `etag` // field // whenever you call `setIamPolicy`. If you omit this field, then IAM // allows // you to overwrite a version `3` policy with a version `1` policy, and // all of // the conditions in the version `3` policy are lost. Etag string `json:"etag,omitempty"` // Version: Specifies the format of the policy. // // Valid values are `0`, `1`, and `3`. Requests that specify an invalid // value // are rejected. // // Any operation that affects conditional role bindings must specify // version // `3`. This requirement applies to the following operations: // // * Getting a policy that includes a conditional role binding // * Adding a conditional role binding to a policy // * Changing a conditional role binding in a policy // * Removing any role binding, with or without a condition, from a // policy // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` // field // whenever you call `setIamPolicy`. If you omit this field, then IAM // allows // you to overwrite a version `3` policy with a version `1` policy, and // all of // the conditions in the version `3` policy are lost. // // If a policy does not include any conditions, operations on that // policy may // specify any valid version or leave the field unset. Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "AuditConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AuditConfigs") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ReachabilityDetails: The details of reachability state from the // latest run. type ReachabilityDetails struct { // Error: The details of a failure or a cancellation of reachability // analysis. Error *Status `json:"error,omitempty"` // Result: The overall reachability result of the test. // // Possible values: // "RESULT_UNSPECIFIED" - Result is not specified. // "REACHABLE" - Packet originating from source is expected to reach // destination. // "UNREACHABLE" - Packet originating from source is expected to be // dropped before // reaching destination. // "AMBIGUOUS" - If the source and destination endpoint does not // uniquely identify // the test location in the network, and the reachability result // contains // multiple traces with mixed reachable and unreachable states, then // this // result is returned. // "UNDETERMINED" - The reachability could not be determined. Possible // reasons are: // // * Analysis is aborted due to permission error. User does not have // read // permission to the projects listed in the test. // * Analysis is aborted due to internal errors. // * Analysis is partially complete based on configurations where the // user // has permission. // The Final state indicates that the packet is forwarded to // another // network where the user has no permission to access the // configurations. Result string `json:"result,omitempty"` // Traces: Result may contain a list of traces if a test has multiple // possible // paths in the network, such as when destination endpoint is a load // balancer // with multiple backends. Traces []*Trace `json:"traces,omitempty"` // VerifyTime: The time the reachability state was verified. VerifyTime string `json:"verifyTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Error") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Error") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ReachabilityDetails) MarshalJSON() ([]byte, error) { type NoMethod ReachabilityDetails raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // RerunConnectivityTestRequest: Request for the `RerunConnectivityTest` // method. type RerunConnectivityTestRequest struct { } // RouteInfo: For display only. Metadata associated with a Compute // Engine route. type RouteInfo struct { // DestIpRange: Destination IP range of the route. DestIpRange string `json:"destIpRange,omitempty"` // DisplayName: Name of a Compute Engine route. DisplayName string `json:"displayName,omitempty"` // InstanceTags: Instance tags of the route. InstanceTags []string `json:"instanceTags,omitempty"` // NetworkUri: URI of a Compute Engine network. NetworkUri string `json:"networkUri,omitempty"` // NextHop: Next hop of the route. NextHop string `json:"nextHop,omitempty"` // NextHopType: Type of next hop. // // Possible values: // "NEXT_HOP_TYPE_UNSPECIFIED" - Unspecified type. Default value. // "NEXT_HOP_IP" - Next hop is an IP address. // "NEXT_HOP_INSTANCE" - Next hop is a Compute Engine instance. // "NEXT_HOP_NETWORK" - Next hop is a VPC network gateway. // "NEXT_HOP_PEERING" - Next hop is a peering VPC. // "NEXT_HOP_INTERCONNECT" - Next hop is an interconnect. // "NEXT_HOP_VPN_TUNNEL" - Next hop is a VPN tunnel. // "NEXT_HOP_VPN_GATEWAY" - Next hop is a VPN Gateway. This scenario // only happens when tracing // connectivity from an on-premises network to GCP through a VPN. // The // analysis simulates a packet departing from the on-premises // network // through a VPN tunnel and arrives at a Cloud VPN gateway. // "NEXT_HOP_INTERNET_GATEWAY" - Next hop is an internet gateway. // "NEXT_HOP_BLACKHOLE" - Next hop is blackhole; that is, the next hop // either does not exist or is // not running. NextHopType string `json:"nextHopType,omitempty"` // Priority: Priority of the route. Priority int64 `json:"priority,omitempty"` // RouteType: Type of route. // // Possible values: // "ROUTE_TYPE_UNSPECIFIED" - Unspecified type. Default value. // "SUBNET" - Route is a subnet route automatically created by the // system. // "STATIC" - Static route created by the user including the default // route to the // Internet. // "DYNAMIC" - Dynamic route exchanged between BGP peers. // "PEERING_SUBNET" - A subnet route received from peering network. // "PEERING_STATIC" - A static route received from peering network. // "PEERING_DYNAMIC" - A dynamic route received from peering network. RouteType string `json:"routeType,omitempty"` // Uri: URI of a Compute Engine route. // Dynamic route from cloud router does not have a URI. // Advertised route from Google Cloud VPC to on-premises network also // does // not have a URI. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "DestIpRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DestIpRange") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *RouteInfo) MarshalJSON() ([]byte, error) { type NoMethod RouteInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the // `resource`. The size of // the policy is limited to a few 10s of KB. An empty policy is a // valid policy but certain Cloud Platform services (such as // Projects) // might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the // policy to modify. Only // the fields in the mask will be modified. If no mask is provided, // the // following default mask is used: // paths: "bindings, etag" // This field is only used by Cloud IAM. UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Policy") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Status: The `Status` type defines a logical error model that is // suitable for // different programming environments, including REST APIs and RPC APIs. // It is // used by [gRPC](https://github.com/grpc). Each `Status` message // contains // three pieces of data: error code, error message, and error // details. // // You can find out more about this error model and how to work with it // in the // [API Design Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` // Details: A list of messages that carry the error details. There is a // common set of // message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in // English. Any // user-facing error message should be localized and sent in // the // google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Status) MarshalJSON() ([]byte, error) { type NoMethod Status raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Step: A simulated forwarding path is composed of multiple steps. // Each step has a well-defined state and an associated configuration. type Step struct { // Abort: Display info of the final state "abort" and reason. Abort *AbortInfo `json:"abort,omitempty"` // CausesDrop: This is a step that leads to the final state Drop. CausesDrop bool `json:"causesDrop,omitempty"` // Deliver: Display info of the final state "deliver" and reason. Deliver *DeliverInfo `json:"deliver,omitempty"` // Description: A description of the step. Usually this is a summary of // the state. Description string `json:"description,omitempty"` // Drop: Display info of the final state "drop" and reason. Drop *DropInfo `json:"drop,omitempty"` // Endpoint: Display info of the source and destination under // analysis. // The endpiont info in an intermediate state may differ with // the // initial input, as it might be modified by state like NAT, // or Connection Proxy. Endpoint *EndpointInfo `json:"endpoint,omitempty"` // Firewall: Display info of a Compute Engine firewall rule. Firewall *FirewallInfo `json:"firewall,omitempty"` // Forward: Display info of the final state "forward" and reason. Forward *ForwardInfo `json:"forward,omitempty"` // ForwardingRule: Display info of a Compute Engine forwarding rule. ForwardingRule *ForwardingRuleInfo `json:"forwardingRule,omitempty"` // Instance: Display info of a Compute Engine instance. Instance *InstanceInfo `json:"instance,omitempty"` // LoadBalancer: Display info of the load balancers. LoadBalancer *LoadBalancerInfo `json:"loadBalancer,omitempty"` // Network: Display info of a GCP network. Network *NetworkInfo `json:"network,omitempty"` // ProjectId: Project ID that contains the configuration this step is // validating. ProjectId string `json:"projectId,omitempty"` // Route: Display info of a Compute Engine route. Route *RouteInfo `json:"route,omitempty"` // State: Each step is in one of the pre-defined states. // // Possible values: // "STATE_UNSPECIFIED" - Unspecified state. // "START_FROM_INSTANCE" - Initial state: packet originating from a // Compute Engine instance. // An InstanceInfo will be populated with starting instance info. // "START_FROM_INTERNET" - Initial state: packet originating from // Internet. // The endpoint info will be populated. // "START_FROM_PRIVATE_NETWORK" - Initial state: packet originating // from a VPC or on-premises network // with internal source IP. // If the source is a VPC network visible to the user, a // NetworkInfo // will be populated with details of the network. // "APPLY_INGRESS_FIREWALL_RULE" - Config checking state: verify // ingress firewall rule. // "APPLY_EGRESS_FIREWALL_RULE" - Config checking state: verify egress // firewall rule. // "APPLY_ROUTE" - Config checking state: verify route. // "APPLY_FORWARDING_RULE" - Config checking state: match forwarding // rule. // "SPOOFING_APPROVED" - Config checking state: packet sent or // received under foreign IP // address and allowed. // "ARRIVE_AT_INSTANCE" - Forwarding state: arriving at a Compute // Engine instance. // "ARRIVE_AT_INTERNAL_LOAD_BALANCER" - Forwarding state: arriving at // a Compute Engine internal load balancer. // "ARRIVE_AT_EXTERNAL_LOAD_BALANCER" - Forwarding state: arriving at // a Compute Engine external load balancer. // "ARRIVE_AT_VPN_GATEWAY" - Forwarding state: arriving at a Cloud VPN // gateway. // "ARRIVE_AT_VPN_TUNNEL" - Forwarding state: arriving at a Cloud VPN // tunnel. // "NAT" - Transition state: packet header translated. // "PROXY_CONNECTION" - Transition state: original connection is // terminated and a new proxied // connection is initiated. // "DELIVER" - Final state: packet delivered. // "DROP" - Final state: packet dropped. // "FORWARD" - Final state: packet forwarded to a network with an // unknown configuration. // "ABORT" - Final state: analysis is aborted. // "VIEWER_PERMISSION_MISSING" - Special state: viewer of the test // result does not have permission to // see the configuration in this step. State string `json:"state,omitempty"` // VpnGateway: Display info of a Compute Engine VPN gateway. VpnGateway *VpnGatewayInfo `json:"vpnGateway,omitempty"` // VpnTunnel: Display info of a Compute Engine VPN tunnel. VpnTunnel *VpnTunnelInfo `json:"vpnTunnel,omitempty"` // ForceSendFields is a list of field names (e.g. "Abort") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Abort") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Step) MarshalJSON() ([]byte, error) { type NoMethod Step raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. // Permissions with // wildcards (such as '*' or 'storage.*') are not allowed. For // more // information see // [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` // ForceSendFields is a list of field names (e.g. "Permissions") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Permissions") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that // the caller is // allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Permissions") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Permissions") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Trace: Trace represents one simulated packet forwarding path. // <ul> // <li>Each trace contains multiple ordered steps.</li> // <li>Each step is in a particular state and has an associated // configuration.</li> <li>State is categorized as a final or // non-final // state.</li> <li>Each final state has a reason associated with // it.</li> // <li>Each trace must end with a final state (the last // step).</li> // </ul> // <pre><code> // |---------------------Trace----------------------| // Step1(State) Step2(State) --- StepN(State(final)) // </code></pre> type Trace struct { // EndpointInfo: Derived from the source and destination endpoints // definition, and validated // by the data plane model. // If there are multiple traces starting from different source // locations, then // the endpoint_info may be different between traces. EndpointInfo *EndpointInfo `json:"endpointInfo,omitempty"` // Steps: A trace of a test contains multiple steps from the initial // state to the // final state (delivered, dropped, forwarded, or aborted). // // The steps are ordered by the processing sequence within the // simulated // network state machine. It is critical to preserve the order of the // steps // and avoid reordering or sorting them. Steps []*Step `json:"steps,omitempty"` // ForceSendFields is a list of field names (e.g. "EndpointInfo") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "EndpointInfo") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Trace) MarshalJSON() ([]byte, error) { type NoMethod Trace raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // VpnGatewayInfo: For display only. Metadata associated with a Compute // Engine VPN gateway. type VpnGatewayInfo struct { // DisplayName: Name of a VPN gateway. DisplayName string `json:"displayName,omitempty"` // IpAddress: IP address of the VPN gateway. IpAddress string `json:"ipAddress,omitempty"` // NetworkUri: URI of a Compute Engine network where the VPN gateway is // configured. NetworkUri string `json:"networkUri,omitempty"` // Region: Name of a GCP region where this VPN gateway is configured. Region string `json:"region,omitempty"` // Uri: URI of a VPN gateway. Uri string `json:"uri,omitempty"` // VpnTunnelUri: A VPN tunnel that is associated with this VPN // gateway. // There may be multiple VPN tunnels configured on a VPN gateway, and // only // the one relevant to the test is displayed. VpnTunnelUri string `json:"vpnTunnelUri,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *VpnGatewayInfo) MarshalJSON() ([]byte, error) { type NoMethod VpnGatewayInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // VpnTunnelInfo: For display only. Metadata associated with a Compute // Engine VPN tunnel. type VpnTunnelInfo struct { // DisplayName: Name of a VPN tunnel. DisplayName string `json:"displayName,omitempty"` // NetworkUri: URI of a Compute Engine network where the VPN tunnel is // configured. NetworkUri string `json:"networkUri,omitempty"` // Region: Name of a GCP region where this VPN tunnel is configured. Region string `json:"region,omitempty"` // RemoteGateway: URI of a VPN gateway at remote end of the tunnel. RemoteGateway string `json:"remoteGateway,omitempty"` // RemoteGatewayIp: Remote VPN gateway's IP address. RemoteGatewayIp string `json:"remoteGatewayIp,omitempty"` // RoutingType: Type of the routing policy. // // Possible values: // "ROUTING_TYPE_UNSPECIFIED" - Unspecified type. Default value. // "ROUTE_BASED" - Route based VPN. // "POLICY_BASED" - Policy based routing. // "DYNAMIC" - Dynamic (BGP) routing. RoutingType string `json:"routingType,omitempty"` // SourceGateway: URI of the VPN gateway at local end of the tunnel. SourceGateway string `json:"sourceGateway,omitempty"` // SourceGatewayIp: Local VPN gateway's IP address. SourceGatewayIp string `json:"sourceGatewayIp,omitempty"` // Uri: URI of a VPN tunnel. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *VpnTunnelInfo) MarshalJSON() ([]byte, error) { type NoMethod VpnTunnelInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "networkmanagement.projects.locations.get": type ProjectsLocationsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Gets information about a location. func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall { c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsLocationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGetCall) Context(ctx context.Context) *ProjectsLocationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.get" call. // Exactly one of *Location or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Location.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Location{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets information about a location.", // "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}", // "httpMethod": "GET", // "id": "networkmanagement.projects.locations.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Resource name for the location.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+name}", // "response": { // "$ref": "Location" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.list": type ProjectsLocationsListCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists information about the supported locations for this // service. func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall { c := &ProjectsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Filter sets the optional parameter "filter": The standard list // filter. func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The standard list // page size. func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": The standard list // page token. func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}/locations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.list" call. // Exactly one of *ListLocationsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ListLocationsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ListLocationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists information about the supported locations for this service.", // "flatPath": "v1beta1/projects/{projectsId}/locations", // "httpMethod": "GET", // "id": "networkmanagement.projects.locations.list", // "parameterOrder": [ // "name" // ], // "parameters": { // "filter": { // "description": "The standard list filter.", // "location": "query", // "type": "string" // }, // "name": { // "description": "The resource that owns the locations collection, if applicable.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, // "pageSize": { // "description": "The standard list page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "The standard list page token.", // "location": "query", // "type": "string" // } // }, // "path": "v1beta1/{+name}/locations", // "response": { // "$ref": "ListLocationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "networkmanagement.projects.locations.global.connectivityTests.create": type ProjectsLocationsGlobalConnectivityTestsCreateCall struct { s *Service parent string connectivitytest *ConnectivityTest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a new Connectivity Test. // After you create a test, the reachability analysis is performed as // part // of the long running operation, which completes when the analysis // completes. // // If the endpoint specifications in `ConnectivityTest` are invalid // (for example, containing non-existent resources in the network, or // you // don't have read permissions to the network configurations of // listed // projects), then the reachability result returns a value of // `UNKNOWN`. // // If the endpoint specifications in `ConnectivityTest` are // incomplete, the reachability result returns a value // of // <code>AMBIGUOUS</code>. For more information, // see the Connectivity Test documentation. func (r *ProjectsLocationsGlobalConnectivityTestsService) Create(parent string, connectivitytest *ConnectivityTest) *ProjectsLocationsGlobalConnectivityTestsCreateCall { c := &ProjectsLocationsGlobalConnectivityTestsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.connectivitytest = connectivitytest return c } // TestId sets the optional parameter "testId": Required. The logical // name of the Connectivity Test in your project // with the following restrictions: // // * Must contain only lowercase letters, numbers, and hyphens. // * Must start with a letter. // * Must be between 1-40 characters. // * Must end with a number or a letter. // * Must be unique within the customer project func (c *ProjectsLocationsGlobalConnectivityTestsCreateCall) TestId(testId string) *ProjectsLocationsGlobalConnectivityTestsCreateCall { c.urlParams_.Set("testId", testId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsCreateCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.connectivitytest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+parent}/connectivityTests") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.create" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalConnectivityTestsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new Connectivity Test.\nAfter you create a test, the reachability analysis is performed as part\nof the long running operation, which completes when the analysis completes.\n\nIf the endpoint specifications in `ConnectivityTest` are invalid\n(for example, containing non-existent resources in the network, or you\ndon't have read permissions to the network configurations of listed\nprojects), then the reachability result returns a value of `UNKNOWN`.\n\nIf the endpoint specifications in `ConnectivityTest` are\nincomplete, the reachability result returns a value of\n\u003ccode\u003eAMBIGUOUS\u003c/code\u003e. For more information,\nsee the Connectivity Test documentation.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests", // "httpMethod": "POST", // "id": "networkmanagement.projects.locations.global.connectivityTests.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The parent resource of the Connectivity Test to create:\n `projects/{project_id}/locations/global`", // "location": "path", // "pattern": "^projects/[^/]+/locations/global$", // "required": true, // "type": "string" // }, // "testId": { // "description": "Required. The logical name of the Connectivity Test in your project\nwith the following restrictions:\n\n* Must contain only lowercase letters, numbers, and hyphens.\n* Must start with a letter.\n* Must be between 1-40 characters.\n* Must end with a number or a letter.\n* Must be unique within the customer project", // "location": "query", // "type": "string" // } // }, // "path": "v1beta1/{+parent}/connectivityTests", // "request": { // "$ref": "ConnectivityTest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.connectivityTests.delete": type ProjectsLocationsGlobalConnectivityTestsDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a specific `ConnectivityTest`. func (r *ProjectsLocationsGlobalConnectivityTestsService) Delete(name string) *ProjectsLocationsGlobalConnectivityTestsDeleteCall { c := &ProjectsLocationsGlobalConnectivityTestsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsDeleteCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalConnectivityTestsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a specific `ConnectivityTest`.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests/{connectivityTestsId}", // "httpMethod": "DELETE", // "id": "networkmanagement.projects.locations.global.connectivityTests.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. Connectivity Test resource name using the form:\n `projects/{project_id}/connectivityTests/{test_id}`", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/connectivityTests/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+name}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.connectivityTests.get": type ProjectsLocationsGlobalConnectivityTestsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Gets the details of a specific Connectivity Test. func (r *ProjectsLocationsGlobalConnectivityTestsService) Get(name string) *ProjectsLocationsGlobalConnectivityTestsGetCall { c := &ProjectsLocationsGlobalConnectivityTestsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsLocationsGlobalConnectivityTestsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalConnectivityTestsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsGetCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.get" call. // Exactly one of *ConnectivityTest or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ConnectivityTest.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalConnectivityTestsGetCall) Do(opts ...googleapi.CallOption) (*ConnectivityTest, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ConnectivityTest{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets the details of a specific Connectivity Test.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests/{connectivityTestsId}", // "httpMethod": "GET", // "id": "networkmanagement.projects.locations.global.connectivityTests.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. `ConnectivityTest` resource name using the form:\n `projects/{project_id}/locations/global/connectivityTests/{test_id}`", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/connectivityTests/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+name}", // "response": { // "$ref": "ConnectivityTest" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.connectivityTests.getIamPolicy": type ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall struct { s *Service resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // GetIamPolicy: Gets the access control policy for a resource. // Returns an empty policy if the resource exists and does not have a // policy // set. func (r *ProjectsLocationsGlobalConnectivityTestsService) GetIamPolicy(resource string) *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall { c := &ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource return c } // OptionsRequestedPolicyVersion sets the optional parameter // "options.requestedPolicyVersion": The policy format version to be // returned. // // Valid values are 0, 1, and 3. Requests specifying an invalid value // will be // rejected. // // Requests for policies with any conditional bindings must specify // version 3. // Policies without any conditional bindings may specify any valid value // or // leave the field unset. func (c *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall { c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+resource}:getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsLocationsGlobalConnectivityTestsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests/{connectivityTestsId}:getIamPolicy", // "httpMethod": "GET", // "id": "networkmanagement.projects.locations.global.connectivityTests.getIamPolicy", // "parameterOrder": [ // "resource" // ], // "parameters": { // "options.requestedPolicyVersion": { // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "resource": { // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/connectivityTests/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+resource}:getIamPolicy", // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.connectivityTests.list": type ProjectsLocationsGlobalConnectivityTestsListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists all Connectivity Tests owned by a project. func (r *ProjectsLocationsGlobalConnectivityTestsService) List(parent string) *ProjectsLocationsGlobalConnectivityTestsListCall { c := &ProjectsLocationsGlobalConnectivityTestsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": Lists the // `ConnectivityTests` that match the filter expression. A // filter // expression filters the resources listed in the response. The // expression // must be of the form `<field> <operator> <value>` where operators: // `<`, `>`, // `<=`, // `>=`, // `!=`, `=`, `:` are supported (colon `:` represents a HAS operator // which is // roughly synonymous with equality). <field> can refer to a proto or // JSON // field, or a synthetic field. Field names can be camelCase or // snake_case. // // Examples: // - Filter by name: // name = "projects/proj-1/connectivityTests/test-1 // // - Filter by labels: // - Resources that have a key called `foo` // labels.foo:* // - Resources that have a key called `foo` whose value is `bar` // labels.foo = bar func (c *ProjectsLocationsGlobalConnectivityTestsListCall) Filter(filter string) *ProjectsLocationsGlobalConnectivityTestsListCall { c.urlParams_.Set("filter", filter) return c } // OrderBy sets the optional parameter "orderBy": Field to use to sort // the list. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) OrderBy(orderBy string) *ProjectsLocationsGlobalConnectivityTestsListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageSize sets the optional parameter "pageSize": Number of // `ConnectivityTests` to return. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) PageSize(pageSize int64) *ProjectsLocationsGlobalConnectivityTestsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": Page token from an // earlier query, as returned in `next_page_token`. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) PageToken(pageToken string) *ProjectsLocationsGlobalConnectivityTestsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalConnectivityTestsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+parent}/connectivityTests") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.list" call. // Exactly one of *ListConnectivityTestsResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *ListConnectivityTestsResponse.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) Do(opts ...googleapi.CallOption) (*ListConnectivityTestsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ListConnectivityTestsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists all Connectivity Tests owned by a project.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests", // "httpMethod": "GET", // "id": "networkmanagement.projects.locations.global.connectivityTests.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "Lists the `ConnectivityTests` that match the filter expression. A filter\nexpression filters the resources listed in the response. The expression\nmust be of the form `\u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e` where operators: `\u003c`, `\u003e`,\n`\u003c=`,\n`\u003e=`,\n`!=`, `=`, `:` are supported (colon `:` represents a HAS operator which is\nroughly synonymous with equality). \u003cfield\u003e can refer to a proto or JSON\nfield, or a synthetic field. Field names can be camelCase or snake_case.\n\nExamples:\n- Filter by name:\n name = \"projects/proj-1/connectivityTests/test-1\n\n- Filter by labels:\n - Resources that have a key called `foo`\n labels.foo:*\n - Resources that have a key called `foo` whose value is `bar`\n labels.foo = bar", // "location": "query", // "type": "string" // }, // "orderBy": { // "description": "Field to use to sort the list.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "Number of `ConnectivityTests` to return.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "Page token from an earlier query, as returned in `next_page_token`.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource of the Connectivity Tests:\n `projects/{project_id}/locations/global`", // "location": "path", // "pattern": "^projects/[^/]+/locations/global$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+parent}/connectivityTests", // "response": { // "$ref": "ListConnectivityTestsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *ProjectsLocationsGlobalConnectivityTestsListCall) Pages(ctx context.Context, f func(*ListConnectivityTestsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "networkmanagement.projects.locations.global.connectivityTests.patch": type ProjectsLocationsGlobalConnectivityTestsPatchCall struct { s *Service name string connectivitytest *ConnectivityTest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates the configuration of an existing // `ConnectivityTest`. // After you update a test, the reachability analysis is performed as // part // of the long running operation, which completes when the analysis // completes. // The Reachability state in the test resource is updated with the new // result. // // If the endpoint specifications in `ConnectivityTest` are invalid // (for example, they contain non-existent resources in the network, or // the // user does not have read permissions to the network configurations // of // listed projects), then the reachability result returns a value // of // <code>UNKNOWN</code>. // // If the endpoint specifications in `ConnectivityTest` are incomplete, // the // reachability result returns a value of `AMBIGUOUS`. See the // documentation // in `ConnectivityTest` for for more details. func (r *ProjectsLocationsGlobalConnectivityTestsService) Patch(name string, connectivitytest *ConnectivityTest) *ProjectsLocationsGlobalConnectivityTestsPatchCall { c := &ProjectsLocationsGlobalConnectivityTestsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.connectivitytest = connectivitytest return c } // UpdateMask sets the optional parameter "updateMask": Required. Mask // of fields to update. At least one path must be supplied in // this field. func (c *ProjectsLocationsGlobalConnectivityTestsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsGlobalConnectivityTestsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsPatchCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.connectivitytest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalConnectivityTestsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates the configuration of an existing `ConnectivityTest`.\nAfter you update a test, the reachability analysis is performed as part\nof the long running operation, which completes when the analysis completes.\nThe Reachability state in the test resource is updated with the new result.\n\nIf the endpoint specifications in `ConnectivityTest` are invalid\n(for example, they contain non-existent resources in the network, or the\nuser does not have read permissions to the network configurations of\nlisted projects), then the reachability result returns a value of\n\u003ccode\u003eUNKNOWN\u003c/code\u003e.\n\nIf the endpoint specifications in `ConnectivityTest` are incomplete, the\nreachability result returns a value of `AMBIGUOUS`. See the documentation\nin `ConnectivityTest` for for more details.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests/{connectivityTestsId}", // "httpMethod": "PATCH", // "id": "networkmanagement.projects.locations.global.connectivityTests.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. Unique name of the resource using the form:\n `projects/{project_id}/tests/{test_id}`", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/connectivityTests/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Required. Mask of fields to update. At least one path must be supplied in\nthis field.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1beta1/{+name}", // "request": { // "$ref": "ConnectivityTest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.connectivityTests.rerun": type ProjectsLocationsGlobalConnectivityTestsRerunCall struct { s *Service name string rerunconnectivitytestrequest *RerunConnectivityTestRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Rerun: Rerun an existing `ConnectivityTest`. // After the user triggers the rerun, the reachability analysis is // performed // as part of the long running operation, which completes when the // analysis // completes. // // Even though the test configuration remains the same, the // reachability // result may change due to underlying network configuration // changes. // // If the endpoint specifications in `ConnectivityTest` become invalid // (for // example, specified resources are deleted in the network, or you // lost // read permissions to the network configurations of listed projects), // then // the reachability result returns a value of `UNKNOWN`. func (r *ProjectsLocationsGlobalConnectivityTestsService) Rerun(name string, rerunconnectivitytestrequest *RerunConnectivityTestRequest) *ProjectsLocationsGlobalConnectivityTestsRerunCall { c := &ProjectsLocationsGlobalConnectivityTestsRerunCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.rerunconnectivitytestrequest = rerunconnectivitytestrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsRerunCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsRerunCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsRerunCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsRerunCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsRerunCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsRerunCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.rerunconnectivitytestrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}:rerun") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.rerun" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalConnectivityTestsRerunCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Rerun an existing `ConnectivityTest`.\nAfter the user triggers the rerun, the reachability analysis is performed\nas part of the long running operation, which completes when the analysis\ncompletes.\n\nEven though the test configuration remains the same, the reachability\nresult may change due to underlying network configuration changes.\n\nIf the endpoint specifications in `ConnectivityTest` become invalid (for\nexample, specified resources are deleted in the network, or you lost\nread permissions to the network configurations of listed projects), then\nthe reachability result returns a value of `UNKNOWN`.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests/{connectivityTestsId}:rerun", // "httpMethod": "POST", // "id": "networkmanagement.projects.locations.global.connectivityTests.rerun", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. Connectivity Test resource name using the form:\n `projects/{project_id}/connectivityTests/{test_id}`", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/connectivityTests/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+name}:rerun", // "request": { // "$ref": "RerunConnectivityTestRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.connectivityTests.setIamPolicy": type ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall struct { s *Service resource string setiampolicyrequest *SetIamPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any // existing policy. // // Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and // PERMISSION_DENIED func (r *ProjectsLocationsGlobalConnectivityTestsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall { c := &ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource c.setiampolicyrequest = setiampolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+resource}:setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsLocationsGlobalConnectivityTestsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests/{connectivityTestsId}:setIamPolicy", // "httpMethod": "POST", // "id": "networkmanagement.projects.locations.global.connectivityTests.setIamPolicy", // "parameterOrder": [ // "resource" // ], // "parameters": { // "resource": { // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/connectivityTests/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+resource}:setIamPolicy", // "request": { // "$ref": "SetIamPolicyRequest" // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.connectivityTests.testIamPermissions": type ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall struct { s *Service resource string testiampermissionsrequest *TestIamPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // TestIamPermissions: Returns permissions that a caller has on the // specified resource. // If the resource does not exist, this will return an empty set // of // permissions, not a NOT_FOUND error. // // Note: This operation is designed to be used for building // permission-aware // UIs and command-line tools, not for authorization checking. This // operation // may "fail open" without warning. func (r *ProjectsLocationsGlobalConnectivityTestsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall { c := &ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource c.testiampermissionsrequest = testiampermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+resource}:testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.connectivityTests.testIamPermissions" call. // Exactly one of *TestIamPermissionsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *TestIamPermissionsResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalConnectivityTestsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/connectivityTests/{connectivityTestsId}:testIamPermissions", // "httpMethod": "POST", // "id": "networkmanagement.projects.locations.global.connectivityTests.testIamPermissions", // "parameterOrder": [ // "resource" // ], // "parameters": { // "resource": { // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/connectivityTests/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+resource}:testIamPermissions", // "request": { // "$ref": "TestIamPermissionsRequest" // }, // "response": { // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.operations.cancel": type ProjectsLocationsGlobalOperationsCancelCall struct { s *Service name string canceloperationrequest *CancelOperationRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Cancel: Starts asynchronous cancellation on a long-running operation. // The server // makes a best effort to cancel the operation, but success is // not // guaranteed. If the server doesn't support this method, it // returns // `google.rpc.Code.UNIMPLEMENTED`. Clients can // use // Operations.GetOperation or // other methods to check whether the cancellation succeeded or whether // the // operation completed despite cancellation. On successful // cancellation, // the operation is not deleted; instead, it becomes an operation // with // an Operation.error value with a google.rpc.Status.code of // 1, // corresponding to `Code.CANCELLED`. func (r *ProjectsLocationsGlobalOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *ProjectsLocationsGlobalOperationsCancelCall { c := &ProjectsLocationsGlobalOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.canceloperationrequest = canceloperationrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalOperationsCancelCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalOperationsCancelCall) Context(ctx context.Context) *ProjectsLocationsGlobalOperationsCancelCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalOperationsCancelCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalOperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}:cancel") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.operations.cancel" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsLocationsGlobalOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Starts asynchronous cancellation on a long-running operation. The server\nmakes a best effort to cancel the operation, but success is not\nguaranteed. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation or\nother methods to check whether the cancellation succeeded or whether the\noperation completed despite cancellation. On successful cancellation,\nthe operation is not deleted; instead, it becomes an operation with\nan Operation.error value with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/operations/{operationsId}:cancel", // "httpMethod": "POST", // "id": "networkmanagement.projects.locations.global.operations.cancel", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "The name of the operation resource to be cancelled.", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/operations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+name}:cancel", // "request": { // "$ref": "CancelOperationRequest" // }, // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.operations.delete": type ProjectsLocationsGlobalOperationsDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a long-running operation. This method indicates that // the client is // no longer interested in the operation result. It does not cancel // the // operation. If the server doesn't support this method, it // returns // `google.rpc.Code.UNIMPLEMENTED`. func (r *ProjectsLocationsGlobalOperationsService) Delete(name string) *ProjectsLocationsGlobalOperationsDeleteCall { c := &ProjectsLocationsGlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalOperationsDeleteCall) Context(ctx context.Context) *ProjectsLocationsGlobalOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.operations.delete" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsLocationsGlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/operations/{operationsId}", // "httpMethod": "DELETE", // "id": "networkmanagement.projects.locations.global.operations.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "The name of the operation resource to be deleted.", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/operations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+name}", // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.operations.get": type ProjectsLocationsGlobalOperationsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Gets the latest state of a long-running operation. Clients can // use this // method to poll the operation result at intervals as recommended by // the API // service. func (r *ProjectsLocationsGlobalOperationsService) Get(name string) *ProjectsLocationsGlobalOperationsGetCall { c := &ProjectsLocationsGlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsLocationsGlobalOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalOperationsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsGlobalOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.operations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/operations/{operationsId}", // "httpMethod": "GET", // "id": "networkmanagement.projects.locations.global.operations.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "The name of the operation resource.", // "location": "path", // "pattern": "^projects/[^/]+/locations/global/operations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+name}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "networkmanagement.projects.locations.global.operations.list": type ProjectsLocationsGlobalOperationsListCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists operations that match the specified filter in the // request. If the // server doesn't support this method, it returns // `UNIMPLEMENTED`. // // NOTE: the `name` binding allows API services to override the // binding // to use different resource name schemes, such as `users/*/operations`. // To // override the binding, API services can add a binding such // as // "/v1/{name=users/*}/operations" to their service configuration. // For backwards compatibility, the default name includes the // operations // collection id, however overriding users must ensure the name // binding // is the parent resource, without the operations collection id. func (r *ProjectsLocationsGlobalOperationsService) List(name string) *ProjectsLocationsGlobalOperationsListCall { c := &ProjectsLocationsGlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Filter sets the optional parameter "filter": The standard list // filter. func (c *ProjectsLocationsGlobalOperationsListCall) Filter(filter string) *ProjectsLocationsGlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The standard list // page size. func (c *ProjectsLocationsGlobalOperationsListCall) PageSize(pageSize int64) *ProjectsLocationsGlobalOperationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": The standard list // page token. func (c *ProjectsLocationsGlobalOperationsListCall) PageToken(pageToken string) *ProjectsLocationsGlobalOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsLocationsGlobalOperationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsLocationsGlobalOperationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalOperationsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsLocationsGlobalOperationsListCall) Context(ctx context.Context) *ProjectsLocationsGlobalOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsLocationsGlobalOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsLocationsGlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200312") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "networkmanagement.projects.locations.global.operations.list" call. // Exactly one of *ListOperationsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ListOperationsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ProjectsLocationsGlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ListOperationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.", // "flatPath": "v1beta1/projects/{projectsId}/locations/global/operations", // "httpMethod": "GET", // "id": "networkmanagement.projects.locations.global.operations.list", // "parameterOrder": [ // "name" // ], // "parameters": { // "filter": { // "description": "The standard list filter.", // "location": "query", // "type": "string" // }, // "name": { // "description": "The name of the operation's parent resource.", // "location": "path", // "pattern": "^projects/[^/]+/locations/global$", // "required": true, // "type": "string" // }, // "pageSize": { // "description": "The standard list page size.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "The standard list page token.", // "location": "query", // "type": "string" // } // }, // "path": "v1beta1/{+name}/operations", // "response": { // "$ref": "ListOperationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *ProjectsLocationsGlobalOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } }
{ rs := &ProjectsLocationsService{s: s} rs.Global = NewProjectsLocationsGlobalService(s) return rs }
iterators2.rs
// iterators2.rs // In this module, you'll learn some of unique advantages that iterators can offer. // Step 1. Complete the `capitalize_first` function to pass the first two cases. // Step 2. Apply the `capitalize_first` function to a vector of strings. // Ensure that it returns a vector of strings as well. // Step 3. Apply the `capitalize_first` function again to a list. // Try to ensure it returns a single string. // As always, there are hints if you execute `rustlings hint iterators2`! pub fn capitalize_first(input: &str) -> String { let mut c = input.chars(); match c.next() { None => String::new(), Some(first) => first.to_uppercase().collect::<String>() + c.as_str(), } } #[cfg(test)] mod tests { use super::*; // Step 1. // Tests that verify your `capitalize_first` function implementation #[test] fn test_success() { assert_eq!(capitalize_first("hello"), "Hello"); } #[test] fn test_empty() { assert_eq!(capitalize_first(""), ""); } // Step 2. #[test] fn test_iterate_string_vec() { let words = vec!["hello", "world"]; let capitalized_words: Vec<String> = words.iter().map(|x| capitalize_first(x)).collect();// TODO assert_eq!(capitalized_words, ["Hello", "World"]); } #[test] fn test_iterate_into_string()
}
{ let words = vec!["hello", " ", "world"]; let capitalized_words = words.iter().map(|x| capitalize_first(x)).collect::<String>();// TODO assert_eq!(capitalized_words, "Hello World"); }
embed.tsx
import Head from "next/head"; import { useState, useEffect } from "react"; import { library } from "@fortawesome/fontawesome-svg-core"; import { fab } from "@fortawesome/free-brands-svg-icons"; import { fas } from "@fortawesome/free-solid-svg-icons"; import { supabase } from "../../utils/initSupabase"; import dayjs from "dayjs"; import relativeTime from "dayjs/plugin/relativeTime"; import Link from "next/link"; import ImageFallback from "react-image-fallback"; dayjs.extend(relativeTime); library.add(fab, fas); import { useRouter } from "next/router"; export default function Embed({ profile }) { const router = useRouter(); const { username } = router.query; if (!username) return null; return ( <> <Head> <title>{profile.username}'s Embed | libby</title> <link rel="icon" type="image/png" href={profile.avatar}></link> <meta property="title" content={`${profile.username}'s Profile | libby`} /> <meta property="description" content={`${profile.bio}`} /> <meta property="url" content={`${window.location.host}/${profile.username}`} /> <meta property="image" content={`${profile.avatar}`} /> </Head> <div className="profilecont fullscreen" style={{ backgroundImage: `url(${profile.background_url})` }} > <div className="avatarcont"> <img className="avatar center mb-10" src={profile.avatar} /> </div> <div className="info marginone"> <h1 className="username"> {profile.displayname ? profile.displayname : profile.username}{" "} <span className="handle">@{profile.username}</span> </h1>
<Link href={`https://libby.gg/${username}`}> <a target="_blank"> <button className="button embedbutton">View libby creator</button> </a> </Link> </div> <style dangerouslySetInnerHTML={{ __html: ` .navbar { display: none; } footer { display: none; } `, }} /> </div> </> ); } export async function getServerSideProps(context) { const { body, error } = await supabase .from("profiles") .select("*") .ilike("username", context.params.username) .single(); const posts = await supabase .from("vw_posts_with_user") .select() .ilike("username", context.params.username); if (!body) { return { notFound: true, }; } return { props: { profile: body, posts: posts, }, // will be passed to the page component as props }; }
<p></p> <p className="bio">{profile.bio}</p>
ramonifyARA.py
#!/usr/bin/env python
# -*- coding: utf-8 -*- import requests import json from ndreg import * import ndio.ramon as ndramon import ndio.remote.neurodata as neurodata """ Here we show how to RAMONify Allen Reference Atlas data. First we download annotation ontology from Allen Brain Atlas API. It returns a JSON tree in which larger parent structures are divided into smaller children regions. For example the "corpus callosum" parent is has children "corpus callosum, anterior forceps", "genu of corpus callosum", "corpus callosum, body", etc """ url = "http://api.brain-map.org/api/v2/structure_graph_download/1.json" jsonRaw = requests.get(url).content jsonDict = json.loads(jsonRaw) """ Next we collect the names and ids of all of the regions. Since our json data is a tree we can walk through it in arecursive manner. Thus starting from the root... """ root = jsonDict['msg'][0] """ ...we define a recursive function ... """ #leafList = [] def getChildrenNames(parent, childrenNames={}): #if len(parent['children']) == 0: # leafList.append(parent['id']) for childIndex in range(len(parent['children'])): child = parent['children'][childIndex] childrenNames[child['id']] = child['name'] childrenNames = getChildrenNames(child, childrenNames) return childrenNames """ ... and collect all of the region names in a dictionary with the "id" field as keys. """ regionDict = getChildrenNames(root) #print(leafList) #for key in regionDict.keys(): # print('{0}, "{1}"'.format(key, regionDict[key])) #print(regionDict) #sys.exit() """ Next we RAMONify the data """ token = "ara3_to_AutA" channel = "annotation_draft" nd = neurodata(hostname='synaptomes.neurodata.io/nd/') for regionId in regionDict.keys(): regionName = regionDict[regionId] kvpairs = {'name': regionName} ramonObj = ndramon.RAMONGeneric(id=regionId, resolution=0, kvpairs=kvpairs) try: nd.post_ramon(token, channel, ramonObj) print "Successfully posted ramon obj {0} for {1}".format(regionId, regionName) except: print "Failed to post ramon obj {0} for {1}".format(regionId, regionName)
sync_requirements.py
#!/usr/bin/env python # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Synchronizes, formats and prepares requirements to release(obtains and adds maximum allowed version). """ import collections import logging import re import sys import textwrap import requests LOG = logging.getLogger(__name__) if not LOG.handlers: LOG.addHandler(logging.StreamHandler()) LOG.setLevel(logging.INFO) GLOBAL_REQUIREMENTS_LOCATIONS = ( "https://raw.githubusercontent.com/openstack/requirements/master/", "http://opendev.org/openstack/requirements/raw/branch/master/" ) RALLY_REQUIREMENTS_FILES = ( "requirements.txt", "test-requirements.txt" ) DO_NOT_TOUCH_TAG = "[do-not-touch]" class Comment(object): def __init__(self, s=None, finished=False): self._comments = [] self.is_finished = finished if s: self.append(s) def finish_him(self): self.is_finished = True def append(self, s): self._comments.append(s[1:].strip()) def __str__(self): return textwrap.fill("\n".join(self._comments), width=80, initial_indent="# ", subsequent_indent="# ") _PYPI_CACHE = {} class PYPIPackage(object): # NOTE(andreykurilin): one license can have different labels. Let's use # unified variant. LICENSE_MAP = {"MIT license": "MIT", "MIT License": "MIT", "BSD License": "BSD", "Apache 2.0": "Apache License, Version 2.0"} def
(self, package_name): self.package_name = package_name self._pypi_info = None self._pypi_license = None @property def pypi_info(self): if self._pypi_info is None: if self.package_name in _PYPI_CACHE: self._pypi_info = _PYPI_CACHE[self.package_name] else: resp = requests.get("https://pypi.org/pypi/%s/json" % self.package_name) if resp.status_code != 200: print("An error occurred while checking '%s' package at " "pypi." % self.package_name) raise Exception(resp.text) self._pypi_info = resp.json() # let's cache it for the case when we need to sync requirements # and update upper constrains _PYPI_CACHE[self.package_name] = self._pypi_info return self._pypi_info @property def pypi_version(self): return self.pypi_info["info"]["version"] @property def pypi_license(self): if self._pypi_license is None: if self.pypi_info["info"]["license"]: self._pypi_license = self.pypi_info["info"]["license"] else: # try to parse classifiers prefix = "License :: OSI Approved :: " classifiers = [c[len(prefix):] for c in self.pypi_info["info"]["classifiers"] if c.startswith(prefix)] self._pypi_license = "/".join(classifiers) self._license = self.LICENSE_MAP.get(self._pypi_license, self._pypi_license) if self._pypi_license == "UNKNOWN": self._pypi_license = None return self._license def __eq__(self, other): return (isinstance(other, PYPIPackage) and self.package_name == other.package_name) class Requirement(PYPIPackage): RE_NAME = re.compile(r"[a-zA-Z0-9-._]+") RE_CONST_VERSION = re.compile(r"==[a-zA-Z0-9.]+") RE_MIN_VERSION = re.compile(r">=?[a-zA-Z0-9.]+") RE_MAX_VERSION = re.compile(r"<=?[a-zA-Z0-9.]+") RE_NE_VERSIONS = re.compile(r"!=[a-zA-Z0-9.]+") def __init__(self, package_name, version): super(Requirement, self).__init__(package_name) self.version = version self.do_not_touch = False def sync_max_version_with_pypy(self): if isinstance(self.version, dict) and not self.do_not_touch: self.version["max"] = "<=%s" % self.pypi_version @classmethod def parse_line(cls, line): match = cls.RE_NAME.match(line) if match: name = match.group() # remove name versions = line.replace(name, "") # remove comments versions = versions.split("#")[0] # remove python classifiers versions = versions.split(";")[0].strip() if not cls.RE_CONST_VERSION.match(versions): versions = versions.strip().split(",") min_version = None max_version = None ne_versions = [] for version in versions: if cls.RE_MIN_VERSION.match(version): if min_version: raise Exception("Found several min versions for " "%s package." % name) min_version = version elif cls.RE_MAX_VERSION.match(version): if max_version: raise Exception("Found several max versions for " "%s package." % name) max_version = version elif cls.RE_NE_VERSIONS.match(version): ne_versions.append(version) versions = {"min": min_version, "max": max_version, "ne": ne_versions} return cls(name, versions) def __str__(self): if isinstance(self.version, dict): version = [] min_equal_to_max = False if self.version["min"] and self.version["max"]: if ( self.version["min"].startswith(">=") and self.version["max"].startswith("<=") and self.version["min"][2:] == self.version["max"][2:] ): # min and max versions are equal there is no need to write # both of them min_equal_to_max = True version.append("==%s" % self.version["min"][2:]) if not min_equal_to_max and self.version["min"]: version.append(self.version["min"]) if not min_equal_to_max and self.version["ne"]: version.extend(self.version["ne"]) if not min_equal_to_max and self.version["max"]: version.append(self.version["max"]) version = ",".join(version) else: if self.do_not_touch: version = self.version else: # remove const version version = ">=%s" % self.version[2:] string = "%s%s" % (self.package_name, version) if self.pypi_license: # NOTE(andreykurilin): When I start implementation of this script, # python-keystoneclient dependency string took around ~45-55 # chars, so let's use this length as indent. Feel free to modify # it to lower or greater value. magic_number = 55 if len(string) < magic_number: indent = magic_number - len(string) else: indent = 2 string += " " * indent + "# " + self.pypi_license return string def __eq__(self, other): return (isinstance(other, self.__class__) and self.package_name == other.package_name) def __ne__(self, other): return not self.__eq__(other) class UpperConstraint(PYPIPackage): RE_LINE = re.compile( r"(?P<package_name>[a-zA-Z0-9-._]+)===(?P<version>[a-zA-Z0-9.]+)") def __init__(self, package_name, version=None): super(UpperConstraint, self).__init__(package_name) self._version = version def __str__(self): return "%s===%s" % (self.package_name, self.version) @property def version(self): if self._version is None: self._version = self.pypi_version return self._version @classmethod def parse_line(cls, line): match = cls.RE_LINE.match(line) if match: return cls(**match.groupdict()) def update(self, version): self._version = version def parse_data(raw_data, include_comments=True, dependency_cls=Requirement): # first elem is None to simplify checks of last elem in requirements requirements = [None] for line in raw_data.split("\n"): if line.startswith("#"): if not include_comments: continue if getattr(requirements[-1], "is_finished", True): requirements.append(Comment()) requirements[-1].append(line) elif line == "": # just empty line if isinstance(requirements[-1], Comment): requirements[-1].finish_him() requirements.append(Comment(finished=True)) else: if (isinstance(requirements[-1], Comment) and not requirements[-1].is_finished): requirements[-1].finish_him() # parse_line dep = dependency_cls.parse_line(line) if dep: if (isinstance(requirements[-1], Comment) and DO_NOT_TOUCH_TAG in str(requirements[-1])): dep.do_not_touch = True requirements.append(dep) for i in range(len(requirements) - 1, 0, -1): # remove empty lines at the end of file if isinstance(requirements[i], Comment): if str(requirements[i]) == "": requirements.pop(i) else: break return collections.OrderedDict( (v if isinstance(v, Comment) else v.package_name, v) for v in requirements if v) def _fetch_from_gr(filename): """Try to fetch data from OpenStack global-requirements repo""" for i in range(0, len(GLOBAL_REQUIREMENTS_LOCATIONS)): url = GLOBAL_REQUIREMENTS_LOCATIONS[i] + filename LOG.debug("Try to obtain %s from %s" % (filename, url)) try: return requests.get(url).text except requests.ConnectionError as e: LOG.exception(e) raise Exception("Unable to obtain %s" % filename) def _write_requirements(filename, requirements): """Saves requirements to file.""" if isinstance(requirements, dict): requirements = requirements.values() LOG.info("Saving requirements to %s." % filename) with open(filename, "w") as f: for entity in requirements: f.write(str(entity)) f.write("\n") def sync_requirements(): """Synchronizes Rally requirements with OpenStack global-requirements.""" LOG.info("Obtaining global-requirements of OpenStack...") raw_gr = _fetch_from_gr("global-requirements.txt") # NOTE(andreykurilin): global-requirements includes comments which can be # unrelated to Rally project, so let's just ignore them gr = parse_data(raw_gr, include_comments=False) for file_name in RALLY_REQUIREMENTS_FILES: LOG.debug("Processing '%s'." % file_name) with open(file_name) as f: requirements = parse_data(f.read()) for name, req in requirements.items(): if isinstance(req, Requirement) and not req.do_not_touch: if name in gr: req.version = gr[req.package_name].version else: # it not g-r requirements if isinstance(req.version, dict): req.version["max"] = None _write_requirements(file_name, requirements) def update_upper_constraints(): """Obtains latest version of packages and put them to upper-constraints.""" LOG.info("Obtaining upper-constrains from OpenStack...") raw_g_uc = _fetch_from_gr("upper-constraints.txt") # NOTE(andreykurilin): global OpenStack upper-constraints file includes # comments which can be unrelated to Rally project, so let's just ignore # them. global_uc = parse_data(raw_g_uc, include_comments=False, dependency_cls=UpperConstraint) with open("upper-constraints.txt") as f: our_uc = parse_data(f.read(), dependency_cls=UpperConstraint) with open("requirements.txt") as f: our_requirements = parse_data(f.read(), include_comments=False) for name, req in our_requirements.items(): if isinstance(req, Comment): continue if name not in our_uc: our_uc[name] = UpperConstraint(name) if name in global_uc: # we cannot use whatever we want versions in CI. OpenStack CI # ignores versions listed in requirements of # particular project and use versions from global u-c file. # It means that we need to suggest to use the same versions our_uc[name].update(global_uc[name].version) our_uc = sorted(our_uc.values(), key=lambda o: o.package_name.upper()) _write_requirements("upper-constraints.txt", our_uc) def main(): sync_requirements() update_upper_constraints() if __name__ == "__main__": sys.exit(main())
__init__
assist-state.service.ts
import { Injectable } from '@angular/core'; import { Subject, BehaviorSubject } from 'rxjs'; import { Route } from '@angular/compiler/src/core'; import { Router, NavigationStart } from '@angular/router'; import { filter } from 'rxjs/operators'; import { MspDataService } from 'app/services/msp-data.service'; import { AssistTransformService } from './assist-transform.service'; import { ApiSendService } from 'app/modules/assistance/services/api-send.service'; import { ROUTES_ASSIST } from '../models/assist-route-constants'; import devOnlyConsoleLog from 'app/_developmentHelpers/dev-only-console-log'; import { MspLogService } from 'app/services/log.service'; @Injectable({ providedIn: 'root' }) export class
{ finAssistApp = this.dataSvc.finAssistApp; touched: Subject<boolean> = new Subject<boolean>(); index: BehaviorSubject<number> = new BehaviorSubject(null); success$: BehaviorSubject<any> = new BehaviorSubject(null); failure$: BehaviorSubject<any> = new BehaviorSubject(null); submitted = false; // Do we need? response: any; constructor( private router: Router, public dataSvc: MspDataService, private xformSvc: AssistTransformService, private api: ApiSendService, private logService: MspLogService ) { this.router.events .pipe(filter(event => event instanceof NavigationStart)) .subscribe((obs: any) => { this.setIndex( obs.url ); }); } setAssistPages(arr: Route[]) { if ( !this.finAssistApp.pageStatus.length ) { const routeConst = Object.keys( ROUTES_ASSIST ).map( x => ROUTES_ASSIST[x] ); this.finAssistApp.pageStatus = arr .filter((itm: any) => !itm.redirectTo) .map((itm: any) => { const val = routeConst.find( x => x.path === itm.path ); return { index: val.index, path: val.path, fullpath: val.fullpath, isComplete: false, isValid: false, btnLabel: val.btnLabel ? val.btnLabel : '', btnDefaultColor: val.btnDefaultColor }; }); } } findIndex( url: string ): number { let idx = 0; if ( this.finAssistApp.pageStatus ) { const obj = this.finAssistApp.pageStatus.find( x => url.includes(x.path) ); if ( obj ) { idx = obj.index; } } return idx; } setIndex( path: string ) { const index = this.findIndex( path ); this.index.next( index ? index : 1 ); } setPageIncomplete( path: string ) { const obj = this.finAssistApp.pageStatus.find( x => path.includes(x.path) ); if ( obj ) { obj.isComplete = false; // Set future pages to not complete this.finAssistApp.pageStatus.map( x => { if ( obj.index < x.index && x.isComplete ) { x.isComplete = false; } }); } } setPageValid( path: string, valid: boolean ) { const obj = this.finAssistApp.pageStatus.find( x => path.includes(x.path) ); if ( obj ) { obj.isValid = valid; } } isPageComplete( path: string ): boolean { let complete = false; const obj = this.finAssistApp.pageStatus .find( x => path.includes(x.path) ); if ( obj ) { // Requirement to continue is the previous page is complete const prevIdx = obj.index - 1; complete = (prevIdx === 0 ? obj.isComplete : this.finAssistApp.pageStatus[prevIdx - 1].isComplete ); } return complete; } async submitApplication() { const token = this.finAssistApp.authorizationToken; const attachments = this.xformSvc.fileAttachments; const app = this.xformSvc.application; try { //await this.api.sendFiles(token, app.uuid, attachments); const call = await this.api.sendApp(app, token, app.uuid, attachments); const res = await call.toPromise(); this.response = res; const isSuccess = this.response.op_return_code === 'SUCCESS'; isSuccess ? (this.dataSvc.removeFinAssistApplication(), this.success$.next(res)) : this.failure$.next(res); return res; } catch (err) { devOnlyConsoleLog('Error: ', err); this.logService.log( { name: 'PA - Error in submitApplication', confirmationNumber: this.finAssistApp.referenceNumber, url: this.router.url }, 'PA - Error in submitApplication:' + err ); } } }
AssistStateService
compile.rs
// Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: MIT use sexp::{atom_s, list, Sexp}; use std::borrow::Cow; use std::collections::hash_map::Entry; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::convert::TryFrom; use crate::ast::{ Annotations, Argument, CascadeString, Declaration, Expression, FuncCall, PolicyFile, Statement, }; use crate::constants; use crate::error::{CascadeErrors, CompileError, ErrorItem, InternalError}; use crate::internal_rep::{ argument_to_typeinfo, argument_to_typeinfo_vec, generate_sid_rules, type_slice_to_variant, Annotated, AnnotationInfo, ArgForValidation, Associated, BoundTypeInfo, ClassList, Context, FunctionArgument, FunctionInfo, FunctionMap, Sid, TypeInfo, TypeMap, ValidatedStatement, }; use codespan_reporting::files::SimpleFile; pub fn compile_rules_one_file<'a>( p: &'a PolicyFile, classlist: &'a ClassList<'a>, type_map: &'a TypeMap, func_map: &'a FunctionMap<'a>, ) -> Result<BTreeSet<ValidatedStatement<'a>>, CascadeErrors> { do_rules_pass( &p.policy.exprs, type_map, func_map, classlist, None, &p.file, ) } pub fn generate_sexp( type_map: &TypeMap, classlist: &ClassList, policy_rules: BTreeSet<ValidatedStatement>, func_map: &FunctionMap<'_>, ) -> Result<Vec<sexp::Sexp>, CascadeErrors> { let type_decl_list = organize_type_map(type_map)?; // TODO: The rest of compilation let cil_types = type_list_to_sexp(type_decl_list, type_map); let headers = generate_cil_headers(classlist); let cil_rules = rules_list_to_sexp(policy_rules); let cil_macros = func_map_to_sexp(func_map)?; let sid_statements = generate_sid_rules(generate_sids("kernel_sid", "security_sid", "unlabeled_sid")); let mut ret = headers; ret.extend(cil_types); ret.extend(cil_macros); ret.extend(cil_rules); ret.extend(sid_statements); Ok(ret) } // These are hardcoded, at least for now. // This sets up MLS, UBAC, and RBAC properties of the system. // Version 0.1 won't allow any language control of these properties, but that will come later. // Until we can actually set these things in the language, we need some sensible defaults to make // secilc happy. As we add the above listed security models, this should be refactored to set them // in accordance with the policy fn generate_cil_headers(classlist: &ClassList) -> Vec<sexp::Sexp> { let mut ret = classlist.generate_class_perm_cil(); ret.append(&mut vec![ list(&[atom_s("sensitivity"), atom_s("s0")]), list(&[atom_s("sensitivityorder"), list(&[atom_s("s0")])]), list(&[atom_s("user"), atom_s("system_u")]), list(&[atom_s("role"), atom_s("system_r")]), list(&[atom_s("role"), atom_s("object_r")]), list(&[atom_s("userrole"), atom_s("system_u"), atom_s("system_r")]), list(&[atom_s("userrole"), atom_s("system_u"), atom_s("object_r")]), list(&[ atom_s("userlevel"), atom_s("system_u"), list(&[atom_s("s0")]), ]), list(&[ atom_s("userrange"), atom_s("system_u"), list(&[list(&[atom_s("s0")]), list(&[atom_s("s0")])]), ]), ]); ret } // TODO: Refactor below nearly identical functions to eliminate redundant code pub fn extend_type_map(p: &PolicyFile, type_map: &mut TypeMap) -> Result<(), CascadeErrors> { // TODO: This only allows declarations at the top level. // Nested declarations are legal, but auto-associate with the parent, so they'll need special // handling when association is implemented let mut errors = CascadeErrors::new(); for e in &p.policy.exprs { let d = match e { Expression::Decl(d) => d, _ => continue, }; match d { Declaration::Type(t) => match TypeInfo::new(*t.clone(), &p.file) { Ok(new_type) => type_map.insert(t.name.to_string(), new_type), Err(e) => errors.append(e), }, Declaration::Func(_) => continue, }; } errors.into_result(()) } pub fn get_built_in_types_map() -> TypeMap { let mut built_in_types = TypeMap::new(); let list_coercions = constants::BUILT_IN_TYPES.iter().map(|t| *t == "perm"); for (built_in, list_coercion) in constants::BUILT_IN_TYPES.iter().zip(list_coercions) { let built_in = built_in.to_string(); built_in_types.insert( built_in.clone(), TypeInfo::make_built_in(built_in, list_coercion), ); } //Special handling for sids. These are temporary built in types that are handled differently let kernel_sid = TypeInfo { name: CascadeString::from("kernel_sid"), inherits: vec![CascadeString::from(constants::DOMAIN)], is_virtual: false, list_coercion: false, declaration_file: None, annotations: BTreeSet::new(), decl: None, bound_type: BoundTypeInfo::Unbound, }; let security_sid = TypeInfo { name: CascadeString::from("security_sid"), inherits: vec![CascadeString::from(constants::RESOURCE)], is_virtual: false, list_coercion: false, declaration_file: None, annotations: BTreeSet::new(), decl: None, bound_type: BoundTypeInfo::Unbound, }; let unlabeled_sid = TypeInfo { name: CascadeString::from("unlabeled_sid"), inherits: vec![CascadeString::from(constants::RESOURCE)], is_virtual: false, list_coercion: false, declaration_file: None, annotations: BTreeSet::new(), decl: None, bound_type: BoundTypeInfo::Unbound, }; for sid in [kernel_sid, security_sid, unlabeled_sid] { built_in_types.insert(sid.name.to_string(), sid); } built_in_types } pub fn get_global_bindings( p: &PolicyFile, types: &mut TypeMap, classlist: &mut ClassList, file: &SimpleFile<String, String>, ) -> Result<(), CascadeErrors> { for e in &p.policy.exprs { if let Expression::Stmt(Statement::LetBinding(l)) = e { let let_rvalue = ArgForValidation::from(&l.value); let (variant, bound_type) = match let_rvalue { ArgForValidation::List(v) => { let ti_vec = argument_to_typeinfo_vec(&v, types, classlist, None, file)?; let variant = type_slice_to_variant(&ti_vec, types)?; ( variant.name.as_ref(), BoundTypeInfo::List(v.iter().map(|s| s.to_string()).collect()), ) } a => { let ti = argument_to_typeinfo(&a, types, classlist, None, file)?; if ti.name.as_ref() == "perm" { ( "perm", match a { ArgForValidation::Var(s) => BoundTypeInfo::Single(s.to_string()), _ => return Err(InternalError {}.into()), }, ) } else { ( ti.name.as_ref(), BoundTypeInfo::Single(ti.name.to_string().clone()), ) } } }; if variant == "perm" { classlist.insert_perm_set(&l.name.to_string(), bound_type.get_contents_as_vec()) } else { let new_type = TypeInfo::new_bound_type( l.name.clone(), variant, file, bound_type, &l.annotations, )?; types.insert(l.name.to_string(), new_type); } } } Ok(()) } pub fn build_func_map<'a>( exprs: &'a [Expression], types: &'a TypeMap, parent_type: Option<&'a TypeInfo>, file: &'a SimpleFile<String, String>, ) -> Result<FunctionMap<'a>, CascadeErrors> { let mut decl_map = FunctionMap::new(); // TODO: This only allows declarations at the top level. for e in exprs { let d = match e { Expression::Decl(d) => d, _ => continue, }; match d { Declaration::Type(t) => { let type_being_parsed = match types.get(&t.name.to_string()) { Some(t) => t, None => return Err(ErrorItem::Internal(InternalError {}).into()), }; decl_map.extend(build_func_map( &t.expressions, types, Some(type_being_parsed), file, )?); } Declaration::Func(f) => { // FIXME: error out for duplicate entries decl_map.insert( f.get_cil_name(), FunctionInfo::new(&**f, types, parent_type, file)?, ); } }; } Ok(decl_map) } // Mutate hash map to set the validated body pub fn validate_functions<'a, 'b>( functions: &'a mut FunctionMap<'b>, types: &'b TypeMap, class_perms: &'b ClassList, functions_copy: &'b FunctionMap<'b>, ) -> Result<(), CascadeErrors> { let mut errors = CascadeErrors::new(); let mut classes_to_virtual_functions = BTreeMap::new(); for function in functions.values_mut() { match function.validate_body( functions_copy, types, class_perms, function.declaration_file, ) { Ok(_) => (), Err(e) => errors.append(e), } if function.is_virtual { if let Some(func_class) = function.class { classes_to_virtual_functions .entry(&func_class.name) .or_insert(BTreeSet::new()) .insert(&function.name); } } } // Validate that all required functions exist for setype in types.values() { for parent in &setype.inherits { for virtual_function_name in classes_to_virtual_functions .get(&parent) .unwrap_or(&BTreeSet::new()) { if !setype.defines_function(virtual_function_name, functions_copy) { errors.append(CascadeErrors::from(ErrorItem::make_compile_or_internal_error( &format!("{} does not define a function named {}", setype.name, virtual_function_name), setype.declaration_file.as_ref(), parent.get_range(), &format!("All types inheriting {} are required to implement {} because it is marked as virtual", parent, virtual_function_name)))) } } } } errors.into_result(()) } // If a type couldn't be organized, it is either a cycle or a non-existant parent somewhere // The claim that a type must have at least one parent is enforced by the parser // This function walks the tree from a given type and determines which of these cases we are in // Return a Vector of found errors. This Vector can be empty in internal calls, but should not be // when called from another function. fn find_cycles_or_bad_types( type_to_check: &TypeInfo, types: &TypeMap, visited_types: HashSet<&str>, ) -> Result<(), CascadeErrors> { let mut ret = CascadeErrors::new(); for p in &type_to_check.inherits { if visited_types.contains(p.as_ref()) || *p == type_to_check.name { // cycle return Err(CascadeErrors::from( ErrorItem::make_compile_or_internal_error( "Cycle detected", type_to_check.declaration_file.as_ref(), p.get_range(), "This type inherits itself", ), )); } let parent_ti = match types.get(p.as_ref()) { Some(t) => t, None => { return Err(CascadeErrors::from( ErrorItem::make_compile_or_internal_error( "Not a valid identifier", type_to_check.declaration_file.as_ref(), p.get_range(), "Expected a valid type", ), )); } }; let mut new_visited_types = visited_types.clone(); new_visited_types.insert(type_to_check.name.as_ref()); match find_cycles_or_bad_types(parent_ti, types, new_visited_types) { Ok(()) => (), Err(e) => ret.append(e), } } ret.into_result(()) } fn generate_type_no_parent_errors(missed_types: Vec<&TypeInfo>, types: &TypeMap) -> CascadeErrors { let mut ret = CascadeErrors::new(); for t in &missed_types { match find_cycles_or_bad_types(t, types, HashSet::new()) { Ok(()) => { ret.add_error(InternalError {}); return ret; } Err(e) => ret.append(e), } } // TODO: Deduplication ret } fn get_synthetic_resource_name( dom_info: &TypeInfo, associated_resource: &CascadeString, ) -> CascadeString { format!("{}-{}", dom_info.name, associated_resource).into() } fn create_synthetic_resource( types: &TypeMap, dom_info: &TypeInfo, associated_parent: Option<&TypeInfo>, class: &TypeInfo, class_string: &CascadeString, global_exprs: &mut HashSet<Expression>, ) -> Result<CascadeString, ErrorItem> { if !class.is_resource(types) { return Err(CompileError::new( "not a resource", dom_info .declaration_file .as_ref() .ok_or(ErrorItem::Internal(InternalError {}))?, class_string.get_range(), "This should be a resource, not a domain.", ) .into()); } // Creates a synthetic resource declaration. let mut dup_res_decl = class.decl.as_ref().ok_or(InternalError {})?.clone(); let res_name = get_synthetic_resource_name(dom_info, &class.name); dup_res_decl.name = res_name.clone(); // See TypeDecl::new() in parser.lalrpop for resource inheritance. let parent_name = match associated_parent { None => class.name.clone(), Some(parent) => get_synthetic_resource_name(parent, &class.name), }; dup_res_decl.inherits = vec![parent_name, constants::RESOURCE.into()]; // Virtual resources become concrete when associated to concrete types dup_res_decl.is_virtual = dup_res_decl.is_virtual && dom_info.is_virtual; let dup_res_is_virtual = dup_res_decl.is_virtual; dup_res_decl.annotations = Annotations::new(); dup_res_decl .expressions .iter_mut() .for_each(|e| e.set_class_name_if_decl(res_name.clone())); dup_res_decl.expressions = dup_res_decl .expressions .into_iter() // If dup_res_decl is concrete, do not inherit virtual functions .filter(|e| dup_res_is_virtual || !e.is_virtual_function()) .collect(); if !global_exprs.insert(Expression::Decl(Declaration::Type(Box::new(dup_res_decl)))) { return Err(InternalError {}.into()); } Ok(res_name) } fn interpret_associate( global_exprs: &mut HashSet<Expression>, local_exprs: &mut HashSet<Expression>, funcs: &FunctionMap<'_>, types: &TypeMap, associate: &Associated, associated_parent: Option<&TypeInfo>, dom_info: &TypeInfo, ) -> Result<(), CascadeErrors> { // Only allow a set of specific annotation names and strictly check their arguments. // TODO: Add tests to verify these checks. let mut errors = CascadeErrors::new(); let mut potential_resources: BTreeMap<_, _> = associate .resources .iter() .map(|r| (r.as_ref(), (r, false))) .collect(); // Finds the associated call. for func_info in funcs.values().filter(|f| f.is_associated_call) { if let Some(class) = func_info.class { if let Some((res, seen)) = potential_resources.get_mut(class.name.as_ref()) { *seen = if *seen { errors.add_error(ErrorItem::Compile(CompileError::new( "multiple @associated_call in the same resource", func_info.declaration_file, func_info.decl.name.get_range(), "Only one function in the same resource can be annotated with @associated_call.", ))); continue; } else { true }; let res_name = match create_synthetic_resource( types, dom_info, associated_parent, class, res, global_exprs, ) { Ok(n) => n, Err(e) => { errors.add_error(e); continue; } }; // Creates a synthetic call. let new_call = Expression::Stmt(Statement::Call(Box::new(FuncCall::new( Some(res_name), func_info.name.clone().into(), vec![Argument::Var("this".into())], )))); if !local_exprs.insert(new_call) { return Err(ErrorItem::Internal(InternalError {}).into()); } } } } for (_, (res, _)) in potential_resources.iter().filter(|(_, (_, seen))| !seen) { match types.get(res.as_ref()) { Some(class) => { match create_synthetic_resource( types, dom_info, associated_parent, class, res, global_exprs, ) { Ok(_) => {} Err(e) => errors.add_error(e), } } None => errors.add_error(CompileError::new( "unknown resource", dom_info .declaration_file .as_ref() .ok_or(ErrorItem::Internal(InternalError {}))?, res.get_range(), "didn't find this resource in the policy", )), } } errors.into_result(()) } // domain -> related expressions type AssociateExprs = HashMap<CascadeString, HashSet<Expression>>; #[derive(Clone)] struct InheritedAnnotation<'a> { annotation: &'a AnnotationInfo, parent: Option<&'a TypeInfo>, } fn interpret_inherited_annotations<'a, T>( global_exprs: &mut HashSet<Expression>, associate_exprs: &mut AssociateExprs, funcs: &FunctionMap<'_>, types: &TypeMap, dom_info: &'a TypeInfo, extra_annotations: T, ) -> Result<(), CascadeErrors> where T: Iterator<Item = InheritedAnnotation<'a>>, { let mut errors = CascadeErrors::new(); let local_exprs = match associate_exprs.entry(dom_info.name.clone()) { // Ignores already processed domains. Entry::Occupied(_) => return Ok(()), vacant => vacant.or_default(), }; for inherited in dom_info .annotations .iter() .map(|a| InheritedAnnotation { annotation: a, parent: None, }) .chain(extra_annotations) { if let AnnotationInfo::Associate(ref associate) = inherited.annotation { match interpret_associate( global_exprs, local_exprs, funcs, types, associate, inherited.parent, dom_info, ) { Ok(()) => {} Err(e) => errors.append(e), } } } errors.into_result(()) } fn inherit_annotations<'a>( global_exprs: &mut HashSet<Expression>, associate_exprs: &mut AssociateExprs, funcs: &FunctionMap<'_>, types: &'a TypeMap, dom_info: &'a TypeInfo, ) -> Result<Vec<InheritedAnnotation<'a>>, CascadeErrors> { let mut errors = CascadeErrors::new(); let inherited_annotations = { let mut ret = Vec::new(); for parent_name in &dom_info.inherits { let parent_ti = match types.get(parent_name.as_ref()) { Some(p) => p, // Ignores inheritance issues for now, see bad_type_error_test(). None => continue, }; ret.extend( match inherit_annotations(global_exprs, associate_exprs, funcs, types, parent_ti) { Ok(a) => a, Err(e) => { // Can generate duplicated errors because of nested calls. // TODO: Deduplicate errors and sort them by file and line. errors.append(e); continue; } }, ); } ret }; match interpret_inherited_annotations( global_exprs, associate_exprs, funcs, types, dom_info, inherited_annotations.iter().cloned(), ) { Ok(()) => {} Err(e) => errors.append(e), } errors.into_result_with(|| { dom_info .annotations .iter() .map(|a| InheritedAnnotation { annotation: a, parent: Some(dom_info), }) .chain(inherited_annotations.into_iter().map(|mut a| { a.parent = Some(dom_info); a })) .collect() }) } pub fn apply_associate_annotations<'a>( types: &'a TypeMap, funcs: &FunctionMap<'_>, ) -> Result<Vec<Expression>, CascadeErrors> { let mut errors = CascadeErrors::new(); // Makes sure that there is no cycle. organize_type_map(types)?; let mut associate_exprs = HashMap::new(); let mut global_exprs = HashSet::new(); for type_info in types.values() { match inherit_annotations( &mut global_exprs, &mut associate_exprs, funcs, types, type_info, ) { Ok(_) => {} Err(e) => errors.append(e), } } match associate_exprs .into_iter() .filter(|(_, v)| !v.is_empty()) .map(|(k, v)| { // TODO: Avoid cloning all expressions. let mut new_domain = types .get(&k.to_string()) .ok_or(ErrorItem::Internal(InternalError {}))? .decl .as_ref() .ok_or(ErrorItem::Internal(InternalError {}))? .clone(); new_domain.expressions = v.into_iter().collect(); Ok(Expression::Decl(Declaration::Type(Box::new(new_domain)))) }) .chain(global_exprs.into_iter().map(Ok)) .collect::<Result<_, CascadeErrors>>() { Ok(r) => errors.into_result(r), Err(e) => { errors.append(e); Err(errors) } } } // Temporary check for non-virtual inheritance // TODO: remove when adding support for non-virtual inheritance fn check_non_virtual_inheritance(types: &TypeMap) -> Result<(), CascadeErrors> { for t in types.values() { for parent in &t.inherits { if let Some(p) = types.get(parent.as_ref()) { if !p.is_virtual { return Err(ErrorItem::make_compile_or_internal_error( "Inheriting from a non-virtual type is not yet supported", t.declaration_file.as_ref(), parent.get_range(), "This type is not virtual", ) .into()); } } } } Ok(()) } // This function validates that the relationships in the map are valid, and organizes a Vector // of type declarations in a reasonable order to be output into CIL. // In order to be valid, the types must meet the following properties: // 1. All types have at least one parent // 2. All listed parents are themselves types (or "domain" or "resource") // 3. No cycles exist fn organize_type_map(types: &TypeMap) -> Result<Vec<&TypeInfo>, CascadeErrors> { let mut tmp_types: BTreeMap<&String, &TypeInfo> = types.iter().collect(); let mut out: Vec<&TypeInfo> = Vec::new(); // TODO: This should be allowed, but isn't yet supported. Remove this check once support for // non-virtual inheritance is added check_non_virtual_inheritance(types)?; while !tmp_types.is_empty() { let mut current_pass_types: Vec<&TypeInfo> = Vec::new(); for ti in tmp_types.values() { let mut wait = false; // TODO: Do we need to consider the case when inherits is empty? Theoretically it // should have always been populated with at least domain or resource by the parser. // Should probably return an internal error if that hasn't happened for key in &ti.inherits { if !out.iter().any(|&x| &x.name == key) { wait = true; continue; } } if !wait { // This means all the parents are previously listed current_pass_types.push(ti); } } if current_pass_types.is_empty() && !tmp_types.is_empty() { // We can't satify the parents for all types return Err(generate_type_no_parent_errors( tmp_types.values().copied().collect(), types, )); } for t in &current_pass_types { tmp_types.remove(&t.name.to_string()); } out.append(&mut current_pass_types); } Ok(out) } // Gather all the alias annotations for types and functions and return them so they can be stored // in the maps pub fn collect_aliases<'a, I, T>(aliasable_map: I) -> BTreeMap<String, String> where I: Iterator<Item = (&'a String, T)>, T: Annotated, { let mut aliases = BTreeMap::new(); for (k, v) in aliasable_map { for a in v.get_annotations() { if let AnnotationInfo::Alias(a) = a { aliases.insert(a.to_string(), k.clone()); } } } aliases } fn do_rules_pass<'a>( exprs: &'a [Expression], types: &'a TypeMap, funcs: &'a FunctionMap<'a>, class_perms: &ClassList<'a>, parent_type: Option<&'a TypeInfo>, file: &'a SimpleFile<String, String>, ) -> Result<BTreeSet<ValidatedStatement<'a>>, CascadeErrors> { let mut ret = BTreeSet::new(); let mut errors = CascadeErrors::new(); for e in exprs { match e { Expression::Stmt(s) => { let func_args = match parent_type { Some(t) => vec![FunctionArgument::new_this_argument(t)], None => Vec::new(), }; match ValidatedStatement::new( s, funcs, types, class_perms, &func_args, parent_type, file, ) { Ok(mut s) => ret.append(&mut s), Err(e) => errors.append(e), } } Expression::Decl(Declaration::Type(t)) => { let type_being_parsed = match types.get(&t.name.to_string()) { Some(t) => t, None => return Err(ErrorItem::Internal(InternalError {}).into()), }; match do_rules_pass( &t.expressions, types, funcs, class_perms, Some(type_being_parsed), file, ) { Ok(mut r) => ret.append(&mut r), Err(e) => errors.append(e), } } _ => {} } } errors.into_result(ret) } fn type_list_to_sexp(type_list: Vec<&TypeInfo>, type_map: &TypeMap) -> Vec<sexp::Sexp> { let mut ret = Vec::new(); for t in type_list { if let Some(s) = Option::<sexp::Sexp>::from(t) { ret.extend(get_rules_vec_for_type(t, s, type_map)); } } ret } fn get_rules_vec_for_type(ti: &TypeInfo, s: sexp::Sexp, type_map: &TypeMap) -> Vec<sexp::Sexp> { let mut ret = vec![s]; if !ti.is_virtual { let role_assoc = if ti.is_resource(type_map) { "object_r" } else { "system_r" }; ret.push(list(&[ atom_s("roletype"), atom_s(role_assoc), atom_s(ti.name.as_ref()), ])); } for i in &ti.inherits { ret.push(list(&[ atom_s("typeattributeset"), atom_s(i.as_ref()), list(&[atom_s(ti.name.as_ref())]), ])); } for a in &ti.annotations { if let AnnotationInfo::Alias(a) = a { ret.push(list(&[atom_s("typealias"), atom_s(a.as_ref())])); ret.push(list(&[ atom_s("typealiasactual"), atom_s(a.as_ref()), atom_s(ti.name.as_ref()), ])); } } ret } fn rules_list_to_sexp<'a, T>(rules: T) -> Vec<sexp::Sexp> where T: IntoIterator<Item = ValidatedStatement<'a>>, { rules.into_iter().map(|r| Sexp::from(&r)).collect() } fn generate_sids<'a>( kernel_sid: &'a str, security_sid: &'a str, unlabeled_sid: &'a str, ) -> Vec<Sid<'a>> { vec![ Sid::new( "kernel", Context::new(true, None, None, Cow::Borrowed(kernel_sid), None, None), ), Sid::new( "security", Context::new(false, None, None, Cow::Borrowed(security_sid), None, None), ), Sid::new( "unlabeled", Context::new(false, None, None, Cow::Borrowed(unlabeled_sid), None, None), ), ] } fn func_map_to_sexp(funcs: &FunctionMap<'_>) -> Result<Vec<sexp::Sexp>, CascadeErrors> { let mut ret = Vec::new(); let mut errors = CascadeErrors::new(); for f in funcs.values() { if f.is_virtual { continue; } match Sexp::try_from(f) { Ok(func_sexp) => { ret.push(func_sexp); for ann in &f.annotations { if let AnnotationInfo::Alias(a) = ann { ret.push(f.generate_synthetic_alias_call(a.as_ref())); } } } Err(e) => errors.add_error(e), } } errors.into_result(ret) } #[cfg(test)] mod tests { use super::*; use crate::ast::{CascadeString, Declaration, Expression, Policy, TypeDecl}; use crate::internal_rep::TypeInfo; #[test] fn extend_type_map_test() { let mut exprs = Vec::new(); exprs.push(Expression::Decl(Declaration::Type(Box::new( TypeDecl::new( CascadeString::from("foo"), vec![CascadeString::from(constants::DOMAIN)], Vec::new(), ), )))); let p = Policy::new(exprs); let pf = PolicyFile::new(p, SimpleFile::new(String::new(), String::new())); let mut types = get_built_in_types_map(); extend_type_map(&pf, &mut types).unwrap(); match types.get("foo") { Some(foo) => assert_eq!(foo.name, "foo"), None => panic!("Foo is not in hash map"), } match types.get(constants::DOMAIN) { Some(foo) => assert_eq!(foo.name, "domain"), None => panic!("Domain is not in hash map"), } } #[test] fn
() { let mut types = get_built_in_types_map(); let mut foo_type = TypeInfo::new( TypeDecl::new( CascadeString::from("foo"), vec![CascadeString::from(constants::DOMAIN)], Vec::new(), ), &SimpleFile::new(String::new(), String::new()), ) .unwrap(); foo_type.is_virtual = true; let mut bar_type = TypeInfo::new( TypeDecl::new( CascadeString::from("bar"), vec![ CascadeString::from(constants::DOMAIN), CascadeString::from("foo"), ], Vec::new(), ), &SimpleFile::new(String::new(), String::new()), ) .unwrap(); bar_type.is_virtual = true; let baz_type = TypeInfo::new( TypeDecl::new( CascadeString::from("baz"), vec![ CascadeString::from(constants::DOMAIN), CascadeString::from("foo"), CascadeString::from("bar"), ], Vec::new(), ), &SimpleFile::new(String::new(), String::new()), ) .unwrap(); types.insert("foo".to_string(), foo_type); types.insert("bar".to_string(), bar_type); types.insert("baz".to_string(), baz_type); let _type_vec = organize_type_map(&types).unwrap(); // TODO: reenable this. The built in sid types break the ordering assumptions here // Once they have been removed, the below checks should work again // Skip built in types //assert_eq!(type_vec[type_vec.len() - 3].name, "foo"); //assert_eq!(type_vec[type_vec.len() - 2].name, "bar"); //assert_eq!(type_vec[type_vec.len() - 1].name, "baz"); } }
organize_type_map_test
db.rs
//! Defines database & queries for macro expansion. use std::sync::Arc; use base_db::{salsa, SourceDatabase}; use limit::Limit; use mbe::{ExpandError, ExpandResult}; use parser::{FragmentKind, T}; use syntax::{ algo::diff, ast::{self, NameOwner}, AstNode, GreenNode, Parse, SyntaxNode, SyntaxToken, }; use crate::{ ast_id_map::AstIdMap, hygiene::HygieneFrame, input::process_macro_input, BuiltinAttrExpander, BuiltinDeriveExpander, BuiltinFnLikeExpander, HirFileId, HirFileIdRepr, MacroCallId, MacroCallKind, MacroCallLoc, MacroDefId, MacroDefKind, MacroFile, ProcMacroExpander, }; /// Total limit on the number of tokens produced by any macro invocation. /// /// If an invocation produces more tokens than this limit, it will not be stored in the database and /// an error will be emitted. /// /// Actual max for `analysis-stats .` at some point: 30672. static TOKEN_LIMIT: Limit = Limit::new(524_288); #[derive(Debug, Clone, Eq, PartialEq)] pub enum TokenExpander { /// Old-style `macro_rules`. MacroRules { mac: mbe::MacroRules, def_site_token_map: mbe::TokenMap }, /// AKA macros 2.0. MacroDef { mac: mbe::MacroDef, def_site_token_map: mbe::TokenMap }, /// Stuff like `line!` and `file!`. Builtin(BuiltinFnLikeExpander), /// `global_allocator` and such. BuiltinAttr(BuiltinAttrExpander), /// `derive(Copy)` and such. BuiltinDerive(BuiltinDeriveExpander), /// The thing we love the most here in rust-analyzer -- procedural macros. ProcMacro(ProcMacroExpander), } impl TokenExpander { fn expand( &self, db: &dyn AstDatabase, id: MacroCallId, tt: &tt::Subtree, ) -> mbe::ExpandResult<tt::Subtree> { match self { TokenExpander::MacroRules { mac, .. } => mac.expand(tt), TokenExpander::MacroDef { mac, .. } => mac.expand(tt), TokenExpander::Builtin(it) => it.expand(db, id, tt), // FIXME switch these to ExpandResult as well TokenExpander::BuiltinAttr(it) => it.expand(db, id, tt).into(), TokenExpander::BuiltinDerive(it) => it.expand(db, id, tt).into(), TokenExpander::ProcMacro(_) => { // We store the result in salsa db to prevent non-deterministic behavior in // some proc-macro implementation // See #4315 for details db.expand_proc_macro(id).into() } } } pub(crate) fn map_id_down(&self, id: tt::TokenId) -> tt::TokenId { match self { TokenExpander::MacroRules { mac, .. } => mac.map_id_down(id), TokenExpander::MacroDef { mac, .. } => mac.map_id_down(id), TokenExpander::Builtin(..) | TokenExpander::BuiltinAttr(..) | TokenExpander::BuiltinDerive(..) | TokenExpander::ProcMacro(..) => id, } } pub(crate) fn map_id_up(&self, id: tt::TokenId) -> (tt::TokenId, mbe::Origin)
} // FIXME: rename to ExpandDatabase #[salsa::query_group(AstDatabaseStorage)] pub trait AstDatabase: SourceDatabase { fn ast_id_map(&self, file_id: HirFileId) -> Arc<AstIdMap>; /// Main public API -- parses a hir file, not caring whether it's a real /// file or a macro expansion. #[salsa::transparent] fn parse_or_expand(&self, file_id: HirFileId) -> Option<SyntaxNode>; /// Implementation for the macro case. fn parse_macro_expansion( &self, macro_file: MacroFile, ) -> ExpandResult<Option<(Parse<SyntaxNode>, Arc<mbe::TokenMap>)>>; /// Macro ids. That's probably the tricksiest bit in rust-analyzer, and the /// reason why we use salsa at all. /// /// We encode macro definitions into ids of macro calls, this what allows us /// to be incremental. #[salsa::interned] fn intern_macro(&self, macro_call: MacroCallLoc) -> MacroCallId; /// Lowers syntactic macro call to a token tree representation. #[salsa::transparent] fn macro_arg(&self, id: MacroCallId) -> Option<Arc<(tt::Subtree, mbe::TokenMap)>>; /// Extracts syntax node, corresponding to a macro call. That's a firewall /// query, only typing in the macro call itself changes the returned /// subtree. fn macro_arg_text(&self, id: MacroCallId) -> Option<GreenNode>; /// Gets the expander for this macro. This compiles declarative macros, and /// just fetches procedural ones. fn macro_def(&self, id: MacroDefId) -> Option<Arc<TokenExpander>>; /// Expand macro call to a token tree. This query is LRUed (we keep 128 or so results in memory) fn macro_expand(&self, macro_call: MacroCallId) -> ExpandResult<Option<Arc<tt::Subtree>>>; /// Special case of the previous query for procedural macros. We can't LRU /// proc macros, since they are not deterministic in general, and /// non-determinism breaks salsa in a very, very, very bad way. @edwin0cheng /// heroically debugged this once! fn expand_proc_macro(&self, call: MacroCallId) -> Result<tt::Subtree, mbe::ExpandError>; /// Firewall query that returns the error from the `macro_expand` query. fn macro_expand_error(&self, macro_call: MacroCallId) -> Option<ExpandError>; fn hygiene_frame(&self, file_id: HirFileId) -> Arc<HygieneFrame>; } /// This expands the given macro call, but with different arguments. This is /// used for completion, where we want to see what 'would happen' if we insert a /// token. The `token_to_map` mapped down into the expansion, with the mapped /// token returned. pub fn expand_speculative( db: &dyn AstDatabase, actual_macro_call: MacroCallId, speculative_args: &ast::TokenTree, token_to_map: SyntaxToken, ) -> Option<(SyntaxNode, SyntaxToken)> { let (tt, tmap_1) = mbe::syntax_node_to_token_tree(speculative_args.syntax()); let range = token_to_map.text_range().checked_sub(speculative_args.syntax().text_range().start())?; let token_id = tmap_1.token_by_range(range)?; let macro_def = { let loc: MacroCallLoc = db.lookup_intern_macro(actual_macro_call); db.macro_def(loc.def)? }; let speculative_expansion = macro_def.expand(db, actual_macro_call, &tt); let fragment_kind = macro_fragment_kind(db, actual_macro_call); let (node, tmap_2) = mbe::token_tree_to_syntax_node(&speculative_expansion.value, fragment_kind).ok()?; let token_id = macro_def.map_id_down(token_id); let range = tmap_2.range_by_token(token_id, token_to_map.kind())?; let token = node.syntax_node().covering_element(range).into_token()?; Some((node.syntax_node(), token)) } fn ast_id_map(db: &dyn AstDatabase, file_id: HirFileId) -> Arc<AstIdMap> { let map = db.parse_or_expand(file_id).map(|it| AstIdMap::from_source(&it)).unwrap_or_default(); Arc::new(map) } fn parse_or_expand(db: &dyn AstDatabase, file_id: HirFileId) -> Option<SyntaxNode> { match file_id.0 { HirFileIdRepr::FileId(file_id) => Some(db.parse(file_id).tree().syntax().clone()), HirFileIdRepr::MacroFile(macro_file) => { db.parse_macro_expansion(macro_file).value.map(|(it, _)| it.syntax_node()) } } } fn parse_macro_expansion( db: &dyn AstDatabase, macro_file: MacroFile, ) -> ExpandResult<Option<(Parse<SyntaxNode>, Arc<mbe::TokenMap>)>> { let _p = profile::span("parse_macro_expansion"); let result = db.macro_expand(macro_file.macro_call_id); if let Some(err) = &result.err { // Note: // The final goal we would like to make all parse_macro success, // such that the following log will not call anyway. let loc: MacroCallLoc = db.lookup_intern_macro(macro_file.macro_call_id); let node = loc.kind.to_node(db); // collect parent information for warning log let parents = std::iter::successors(loc.kind.file_id().call_node(db), |it| it.file_id.call_node(db)) .map(|n| format!("{:#}", n.value)) .collect::<Vec<_>>() .join("\n"); log::warn!( "fail on macro_parse: (reason: {:?} macro_call: {:#}) parents: {}", err, node.value, parents ); } let tt = match result.value { Some(tt) => tt, None => return ExpandResult { value: None, err: result.err }, }; let fragment_kind = macro_fragment_kind(db, macro_file.macro_call_id); log::debug!("expanded = {}", tt.as_debug_string()); log::debug!("kind = {:?}", fragment_kind); let (parse, rev_token_map) = match mbe::token_tree_to_syntax_node(&tt, fragment_kind) { Ok(it) => it, Err(err) => { log::debug!( "failed to parse expanstion to {:?} = {}", fragment_kind, tt.as_debug_string() ); return ExpandResult::only_err(err); } }; match result.err { Some(err) => { // Safety check for recursive identity macro. let node = parse.syntax_node(); let file: HirFileId = macro_file.into(); let call_node = match file.call_node(db) { Some(it) => it, None => { return ExpandResult::only_err(err); } }; if is_self_replicating(&node, &call_node.value) { ExpandResult::only_err(err) } else { ExpandResult { value: Some((parse, Arc::new(rev_token_map))), err: Some(err) } } } None => { log::debug!("parse = {:?}", parse.syntax_node().kind()); ExpandResult { value: Some((parse, Arc::new(rev_token_map))), err: None } } } } fn macro_arg(db: &dyn AstDatabase, id: MacroCallId) -> Option<Arc<(tt::Subtree, mbe::TokenMap)>> { let arg = db.macro_arg_text(id)?; let (mut tt, tmap) = mbe::syntax_node_to_token_tree(&SyntaxNode::new_root(arg)); let loc: MacroCallLoc = db.lookup_intern_macro(id); if loc.def.is_proc_macro() { // proc macros expect their inputs without parentheses, MBEs expect it with them included tt.delimiter = None; } Some(Arc::new((tt, tmap))) } fn macro_arg_text(db: &dyn AstDatabase, id: MacroCallId) -> Option<GreenNode> { let loc = db.lookup_intern_macro(id); let arg = loc.kind.arg(db)?; let arg = process_macro_input(&loc.kind, arg); if matches!(loc.kind, MacroCallKind::FnLike { .. }) { let first = arg.first_child_or_token().map_or(T![.], |it| it.kind()); let last = arg.last_child_or_token().map_or(T![.], |it| it.kind()); let well_formed_tt = matches!((first, last), (T!['('], T![')']) | (T!['['], T![']']) | (T!['{'], T!['}'])); if !well_formed_tt { // Don't expand malformed (unbalanced) macro invocations. This is // less than ideal, but trying to expand unbalanced macro calls // sometimes produces pathological, deeply nested code which breaks // all kinds of things. // // Some day, we'll have explicit recursion counters for all // recursive things, at which point this code might be removed. cov_mark::hit!(issue9358_bad_macro_stack_overflow); return None; } } Some(arg.green().into()) } fn macro_def(db: &dyn AstDatabase, id: MacroDefId) -> Option<Arc<TokenExpander>> { match id.kind { MacroDefKind::Declarative(ast_id) => match ast_id.to_node(db) { ast::Macro::MacroRules(macro_rules) => { let arg = macro_rules.token_tree()?; let (tt, def_site_token_map) = mbe::syntax_node_to_token_tree(arg.syntax()); let mac = match mbe::MacroRules::parse(&tt) { Ok(it) => it, Err(err) => { let name = macro_rules.name().map(|n| n.to_string()).unwrap_or_default(); log::warn!("fail on macro_def parse ({}): {:?} {:#?}", name, err, tt); return None; } }; Some(Arc::new(TokenExpander::MacroRules { mac, def_site_token_map })) } ast::Macro::MacroDef(macro_def) => { let arg = macro_def.body()?; let (tt, def_site_token_map) = mbe::syntax_node_to_token_tree(arg.syntax()); let mac = match mbe::MacroDef::parse(&tt) { Ok(it) => it, Err(err) => { let name = macro_def.name().map(|n| n.to_string()).unwrap_or_default(); log::warn!("fail on macro_def parse ({}): {:?} {:#?}", name, err, tt); return None; } }; Some(Arc::new(TokenExpander::MacroDef { mac, def_site_token_map })) } }, MacroDefKind::BuiltIn(expander, _) => Some(Arc::new(TokenExpander::Builtin(expander))), MacroDefKind::BuiltInAttr(expander, _) => { Some(Arc::new(TokenExpander::BuiltinAttr(expander))) } MacroDefKind::BuiltInDerive(expander, _) => { Some(Arc::new(TokenExpander::BuiltinDerive(expander))) } MacroDefKind::BuiltInEager(..) => None, MacroDefKind::ProcMacro(expander, ..) => Some(Arc::new(TokenExpander::ProcMacro(expander))), } } fn macro_expand(db: &dyn AstDatabase, id: MacroCallId) -> ExpandResult<Option<Arc<tt::Subtree>>> { let _p = profile::span("macro_expand"); let loc: MacroCallLoc = db.lookup_intern_macro(id); if let Some(eager) = &loc.eager { return ExpandResult { value: Some(eager.arg_or_expansion.clone()), // FIXME: There could be errors here! err: None, }; } let macro_arg = match db.macro_arg(id) { Some(it) => it, None => return ExpandResult::str_err("Fail to args in to tt::TokenTree".into()), }; let macro_rules = match db.macro_def(loc.def) { Some(it) => it, None => return ExpandResult::str_err("Fail to find macro definition".into()), }; let ExpandResult { value: tt, err } = macro_rules.expand(db, id, &macro_arg.0); // Set a hard limit for the expanded tt let count = tt.count(); // XXX: Make ExpandResult a real error and use .map_err instead? if TOKEN_LIMIT.check(count).is_err() { return ExpandResult::str_err(format!( "macro invocation exceeds token limit: produced {} tokens, limit is {}", count, TOKEN_LIMIT.inner(), )); } ExpandResult { value: Some(Arc::new(tt)), err } } fn macro_expand_error(db: &dyn AstDatabase, macro_call: MacroCallId) -> Option<ExpandError> { db.macro_expand(macro_call).err } fn expand_proc_macro( db: &dyn AstDatabase, id: MacroCallId, ) -> Result<tt::Subtree, mbe::ExpandError> { let loc: MacroCallLoc = db.lookup_intern_macro(id); let macro_arg = match db.macro_arg(id) { Some(it) => it, None => { return Err( tt::ExpansionError::Unknown("No arguments for proc-macro".to_string()).into() ) } }; let expander = match loc.def.kind { MacroDefKind::ProcMacro(expander, ..) => expander, _ => unreachable!(), }; let attr_arg = match &loc.kind { MacroCallKind::Attr { attr_args, .. } => Some(attr_args), _ => None, }; expander.expand(db, loc.krate, &macro_arg.0, attr_arg) } fn is_self_replicating(from: &SyntaxNode, to: &SyntaxNode) -> bool { if diff(from, to).is_empty() { return true; } if let Some(stmts) = ast::MacroStmts::cast(from.clone()) { if stmts.statements().any(|stmt| diff(stmt.syntax(), to).is_empty()) { return true; } if let Some(expr) = stmts.expr() { if diff(expr.syntax(), to).is_empty() { return true; } } } false } fn hygiene_frame(db: &dyn AstDatabase, file_id: HirFileId) -> Arc<HygieneFrame> { Arc::new(HygieneFrame::new(db, file_id)) } fn macro_fragment_kind(db: &dyn AstDatabase, id: MacroCallId) -> FragmentKind { let loc: MacroCallLoc = db.lookup_intern_macro(id); loc.kind.fragment_kind() }
{ match self { TokenExpander::MacroRules { mac, .. } => mac.map_id_up(id), TokenExpander::MacroDef { mac, .. } => mac.map_id_up(id), TokenExpander::Builtin(..) | TokenExpander::BuiltinAttr(..) | TokenExpander::BuiltinDerive(..) | TokenExpander::ProcMacro(..) => (id, mbe::Origin::Call), } }
fqdn.go
// SPDX-License-Identifier: Apache-2.0 // Copyright Authors of Cilium package cmd import ( "context" "errors" "fmt" "net" "os" "regexp" "strconv" "strings" "sync" "time" "github.com/go-openapi/runtime/middleware" "github.com/go-openapi/strfmt" "github.com/miekg/dns" "github.com/sirupsen/logrus" "github.com/cilium/cilium/api/v1/models" . "github.com/cilium/cilium/api/v1/server/restapi/policy" "github.com/cilium/cilium/pkg/api" "github.com/cilium/cilium/pkg/controller" "github.com/cilium/cilium/pkg/endpoint" "github.com/cilium/cilium/pkg/fqdn" "github.com/cilium/cilium/pkg/fqdn/dnsproxy" "github.com/cilium/cilium/pkg/fqdn/matchpattern" "github.com/cilium/cilium/pkg/identity" secIDCache "github.com/cilium/cilium/pkg/identity/cache" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/metrics" "github.com/cilium/cilium/pkg/option" policyApi "github.com/cilium/cilium/pkg/policy/api" "github.com/cilium/cilium/pkg/proxy" "github.com/cilium/cilium/pkg/proxy/accesslog" "github.com/cilium/cilium/pkg/proxy/logger" "github.com/cilium/cilium/pkg/u8proto" ) const ( upstream = "upstreamTime" processingTime = "processingTime" metricErrorTimeout = "timeout" metricErrorProxy = "proxyErr" metricErrorDenied = "denied" metricErrorAllow = "allow" dnsSourceLookup = "lookup" dnsSourceConnection = "connection" ) func identitiesForFQDNSelectorIPs(selectorsWithIPsToUpdate map[policyApi.FQDNSelector][]net.IP, identityAllocator secIDCache.IdentityAllocator) (map[policyApi.FQDNSelector][]*identity.Identity, map[string]*identity.Identity, error)
func (d *Daemon) updateSelectorCacheFQDNs(ctx context.Context, selectors map[policyApi.FQDNSelector][]*identity.Identity, selectorsWithoutIPs []policyApi.FQDNSelector) *sync.WaitGroup { // There may be nothing to update - in this case, we exit and do not need // to trigger policy updates for all endpoints. if len(selectors) == 0 && len(selectorsWithoutIPs) == 0 { return &sync.WaitGroup{} } notifyWg := &sync.WaitGroup{} // Update mapping of selector to set of IPs in selector cache. for selector, identitySlice := range selectors { log.WithFields(logrus.Fields{ "fqdnSelectorString": selector, "identitySlice": identitySlice}).Debug("updating FQDN selector") numIds := make([]identity.NumericIdentity, 0, len(identitySlice)) for _, numId := range identitySlice { // Nil check here? Hopefully not necessary... numIds = append(numIds, numId.ID) } d.policy.GetSelectorCache().UpdateFQDNSelector(selector, numIds, notifyWg) } if len(selectorsWithoutIPs) > 0 { // Selectors which no longer map to IPs (due to TTL expiry, cache being // cleared forcibly via CLI, etc.) still exist in the selector cache // since policy is imported which allows it, but the selector does // not map to any IPs anymore. log.WithFields(logrus.Fields{ "fqdnSelectors": selectorsWithoutIPs, }).Debug("removing all identities from FQDN selectors") d.policy.GetSelectorCache().RemoveIdentitiesFQDNSelectors(selectorsWithoutIPs, notifyWg) } return d.endpointManager.UpdatePolicyMaps(ctx, notifyWg) } // bootstrapFQDN initializes the toFQDNs related subsystems: dnsNameManager and the DNS proxy. // dnsNameManager will use the default resolver and, implicitly, the // default DNS cache. The proxy binds to all interfaces, and uses the // configured DNS proxy port (this may be 0 and so OS-assigned). func (d *Daemon) bootstrapFQDN(possibleEndpoints map[uint16]*endpoint.Endpoint, preCachePath string) (err error) { cfg := fqdn.Config{ MinTTL: option.Config.ToFQDNsMinTTL, Cache: fqdn.NewDNSCache(option.Config.ToFQDNsMinTTL), UpdateSelectors: d.updateSelectors, } // Disable cleanup tracking on the default DNS cache. This cache simply // tracks which api.FQDNSelector are present in policy which apply to // locally running endpoints. cfg.Cache.DisableCleanupTrack() rg := fqdn.NewNameManager(cfg) d.policy.GetSelectorCache().SetLocalIdentityNotifier(rg) d.dnsNameManager = rg // Controller to cleanup TTL expired entries from the DNS policies. // dns-garbage-collector-job runs the logic to remove stale or undesired // entries from the DNS caches. This is done for all per-EP DNSCache // instances (ep.DNSHistory) with evictions (whether due to TTL expiry or // overlimit eviction) cascaded into ep.DNSZombies. Data in DNSHistory and // DNSZombies is further collected into the global DNSCache instance. The // data there drives toFQDNs policy via NameManager and ToFQDNs selectors. // DNSCache entries expire data when the TTL elapses and when the entries for // a DNS name are above a limit. The data is then placed into // DNSZombieMappings instances. These rely on the CT GC loop to update // liveness for each to-delete IP. When an IP is not in-use it is finally // deleted from the global DNSCache. Until then, each of these IPs is // inserted into the global cache as a synthetic DNS lookup. dnsGCJobName := "dns-garbage-collector-job" dnsGCJobInterval := 1 * time.Minute controller.NewManager().UpdateController(dnsGCJobName, controller.ControllerParams{ RunInterval: dnsGCJobInterval, DoFunc: func(ctx context.Context) error { var ( GCStart = time.Now() namesToClean []string // activeConnections holds DNSName -> single IP entries that have been // marked active by the CT GC. Since we expire in this controller, we // give these entries 2 cycles of TTL to allow for timing mismatches // with the CT GC. activeConnectionsTTL = int(2 * dnsGCJobInterval.Seconds()) activeConnections = fqdn.NewDNSCache(activeConnectionsTTL) ) // Cleanup each endpoint cache, deferring deletions via DNSZombies. endpoints := d.endpointManager.GetEndpoints() for _, ep := range endpoints { namesToClean = append(namesToClean, ep.DNSHistory.GC(GCStart, ep.DNSZombies)...) alive, dead := ep.DNSZombies.GC() // Alive zombie need to be added to the global cache as name->IP // entries. // // NB: The following comment is _no longer true_ (see // DNSZombies.GC()). We keep it to maintain the original intention // of the code for future reference: // We accumulate the names into namesToClean to ensure that the // original full DNS lookup (name -> many IPs) is expired and // only the active connections (name->single IP) are re-added. // Note: Other DNS lookups may also use an active IP. This is // fine. // lookupTime := time.Now() for _, zombie := range alive { namesToClean = fqdn.KeepUniqueNames(append(namesToClean, zombie.Names...)) for _, name := range zombie.Names { activeConnections.Update(lookupTime, name, []net.IP{zombie.IP}, activeConnectionsTTL) } } // Dead entries can be deleted outright, without any replacement. // Entries here have been evicted from the DNS cache (via .GC due to // TTL expiration or overlimit) and are no longer active connections. for _, zombie := range dead { namesToClean = fqdn.KeepUniqueNames(append(namesToClean, zombie.Names...)) } } namesToClean = fqdn.KeepUniqueNames(namesToClean) if len(namesToClean) == 0 { return nil } // Collect DNS data into the global cache. This aggregates all endpoint // and existing connection data into one place for use elsewhere. // In the case where a lookup occurs in a race with .ReplaceFromCache the // result is consistent: // - If before, the ReplaceFromCache will use the new data when pulling // in from each EP cache. // - If after, the normal update process occurs after .ReplaceFromCache // releases its locks. caches := []*fqdn.DNSCache{activeConnections} for _, ep := range endpoints { caches = append(caches, ep.DNSHistory) } cfg.Cache.ReplaceFromCacheByNames(namesToClean, caches...) metrics.FQDNGarbageCollectorCleanedTotal.Add(float64(len(namesToClean))) _, err := d.dnsNameManager.ForceGenerateDNS(context.TODO(), namesToClean) namesCount := len(namesToClean) // Limit the amount of info level logging to some sane amount if namesCount > 20 { // namedsToClean is only used for logging after this so we can reslice it in place namesToClean = namesToClean[:20] } log.WithField(logfields.Controller, dnsGCJobName).Infof( "FQDN garbage collector work deleted %d name entries: %s", namesCount, strings.Join(namesToClean, ",")) return err }, Context: d.ctx, }) // Prefill the cache with the CLI provided pre-cache data. This allows various bridging arrangements during upgrades, or just ensure critical DNS mappings remain. if preCachePath != "" { log.WithField(logfields.Path, preCachePath).Info("Reading toFQDNs pre-cache data") precache, err := readPreCache(preCachePath) if err != nil { // FIXME: add a link to the "documented format" log.WithError(err).WithField(logfields.Path, preCachePath).Error("Cannot parse toFQDNs pre-cache data. Please ensure the file is JSON and follows the documented format") // We do not stop the agent here. It is safer to continue with best effort // than to enter crash backoffs when this file is broken. } else { d.dnsNameManager.GetDNSCache().UpdateFromCache(precache, nil) } } // Prefill the cache with DNS lookups from restored endpoints. This is needed // to maintain continuity of which IPs are allowed. The GC cascade logic // below mimics the logic found in the dns-garbage-collector controller. // Note: This is TTL aware, and expired data will not be used (e.g. when // restoring after a long delay). globalCache := d.dnsNameManager.GetDNSCache() now := time.Now() for _, possibleEP := range possibleEndpoints { // Upgrades from old ciliums have this nil if possibleEP.DNSHistory != nil { globalCache.UpdateFromCache(possibleEP.DNSHistory, []string{}) // GC any connections that have expired, but propagate it to the zombies // list. DNSCache.GC can handle a nil DNSZombies parameter. We use the // actual now time because we are checkpointing at restore time. possibleEP.DNSHistory.GC(now, possibleEP.DNSZombies) } if possibleEP.DNSZombies != nil { lookupTime := time.Now() alive, _ := possibleEP.DNSZombies.GC() for _, zombie := range alive { for _, name := range zombie.Names { globalCache.Update(lookupTime, name, []net.IP{zombie.IP}, int(2*dnsGCJobInterval.Seconds())) } } } } // Do not start the proxy in dry mode or if L7 proxy is disabled. // The proxy would not get any traffic in the dry mode anyway, and some of the socket // operations require privileges not available in all unit tests. if option.Config.DryMode || !option.Config.EnableL7Proxy { return nil } // Once we stop returning errors from StartDNSProxy this should live in // StartProxySupport port, err := proxy.GetProxyPort(proxy.DNSProxyName) if option.Config.ToFQDNsProxyPort != 0 { port = uint16(option.Config.ToFQDNsProxyPort) } else if port == 0 { // Try locate old DNS proxy port number from the datapath port = d.datapath.GetProxyPort(proxy.DNSProxyName) } if err != nil { return err } proxy.DefaultDNSProxy, err = dnsproxy.StartDNSProxy("", port, option.Config.ToFQDNsEnableDNSCompression, option.Config.DNSMaxIPsPerRestoredRule, d.lookupEPByIP, d.LookupSecIDByIP, d.lookupIPsBySecID, d.notifyOnDNSMsg) if err == nil { // Increase the ProxyPort reference count so that it will never get released. err = d.l7Proxy.SetProxyPort(proxy.DNSProxyName, proxy.ProxyTypeDNS, proxy.DefaultDNSProxy.GetBindPort(), false) if err == nil && port == proxy.DefaultDNSProxy.GetBindPort() { log.Infof("Reusing previous DNS proxy port: %d", port) } proxy.DefaultDNSProxy.SetRejectReply(option.Config.FQDNRejectResponse) // Restore old rules for _, possibleEP := range possibleEndpoints { // Upgrades from old ciliums have this nil if possibleEP.DNSRules != nil { proxy.DefaultDNSProxy.RestoreRules(possibleEP) } } } return err // filled by StartDNSProxy } // updateDNSDatapathRules updates the DNS proxy iptables rules. Must be // called after iptables has been initailized, and only after // successful bootstrapFQDN(). func (d *Daemon) updateDNSDatapathRules() error { return d.l7Proxy.AckProxyPort(proxy.DNSProxyName) } // updateSelectors propagates the mapping of FQDNSelector to identity, as well // as the set of FQDNSelectors which have no IPs which correspond to them // (usually due to TTL expiry), down to policy layer managed by this daemon. func (d *Daemon) updateSelectors(ctx context.Context, selectorWithIPsToUpdate map[policyApi.FQDNSelector][]net.IP, selectorsWithoutIPs []policyApi.FQDNSelector) (wg *sync.WaitGroup, newlyAllocatedIdentities map[string]*identity.Identity, err error) { // Convert set of selectors with IPs to update to set of selectors // with identities corresponding to said IPs. selectorsIdentities, newlyAllocatedIdentities, err := identitiesForFQDNSelectorIPs(selectorWithIPsToUpdate, d.identityAllocator) if err != nil { return &sync.WaitGroup{}, nil, err } // Update mapping in selector cache with new identities. return d.updateSelectorCacheFQDNs(ctx, selectorsIdentities, selectorsWithoutIPs), newlyAllocatedIdentities, nil } // lookupEPByIP returns the endpoint that this IP belongs to func (d *Daemon) lookupEPByIP(endpointIP net.IP) (endpoint *endpoint.Endpoint, err error) { e := d.endpointManager.LookupIP(endpointIP) if e == nil { return nil, fmt.Errorf("Cannot find endpoint with IP %s", endpointIP.String()) } return e, nil } func (d *Daemon) lookupIPsBySecID(nid identity.NumericIdentity) []string { return d.ipcache.LookupByIdentity(nid) } // NotifyOnDNSMsg handles DNS data in the daemon by emitting monitor // events, proxy metrics and storing DNS data in the DNS cache. This may // result in rule generation. // It will: // - Report a monitor error event and proxy metrics when the proxy sees an // error, and when it can't process something in this function // - Report the verdict in a monitor event and emit proxy metrics // - Insert the DNS data into the cache when msg is a DNS response and we // can lookup the endpoint related to it // epIPPort and serverAddr should match the original request, where epAddr is // the source for egress (the only case current). // serverID is the destination server security identity at the time of the DNS event. func (d *Daemon) notifyOnDNSMsg(lookupTime time.Time, ep *endpoint.Endpoint, epIPPort string, serverID identity.NumericIdentity, serverAddr string, msg *dns.Msg, protocol string, allowed bool, stat *dnsproxy.ProxyRequestContext) error { var protoID = u8proto.ProtoIDs[strings.ToLower(protocol)] var verdict accesslog.FlowVerdict var reason string metricError := metricErrorAllow stat.ProcessingTime.Start() endMetric := func() { stat.ProcessingTime.End(true) metrics.ProxyUpstreamTime.WithLabelValues(metrics.ErrorTimeout, metrics.L7DNS, upstream).Observe( stat.UpstreamTime.Total().Seconds()) metrics.ProxyUpstreamTime.WithLabelValues(metricError, metrics.L7DNS, processingTime).Observe( stat.ProcessingTime.Total().Seconds()) } switch { case stat.IsTimeout(): metricError = metricErrorTimeout endMetric() return nil case stat.Err != nil: metricError = metricErrorProxy verdict = accesslog.VerdictError reason = "Error: " + stat.Err.Error() case allowed: verdict = accesslog.VerdictForwarded reason = "Allowed by policy" case !allowed: metricError = metricErrorDenied verdict = accesslog.VerdictDenied reason = "Denied by policy" } if ep == nil { // This is a hard fail. We cannot proceed because record.Log requires a // non-nil ep, and we also don't want to insert this data into the // cache if we don't know that an endpoint asked for it (this is // asserted via ep != nil here and msg.Response && msg.Rcode == // dns.RcodeSuccess below). err := errors.New("DNS request cannot be associated with an existing endpoint") log.WithError(err).Error("cannot find matching endpoint") endMetric() return err } // We determine the direction based on the DNS packet. The observation // point is always Egress, however. var flowType accesslog.FlowType var addrInfo logger.AddressingInfo if msg.Response { flowType = accesslog.TypeResponse addrInfo.DstIPPort = epIPPort addrInfo.DstIdentity = ep.GetIdentity() addrInfo.SrcIPPort = serverAddr addrInfo.SrcIdentity = serverID } else { flowType = accesslog.TypeRequest addrInfo.SrcIPPort = epIPPort addrInfo.SrcIdentity = ep.GetIdentity() addrInfo.DstIPPort = serverAddr addrInfo.DstIdentity = serverID } qname, responseIPs, TTL, CNAMEs, rcode, recordTypes, qTypes, err := dnsproxy.ExtractMsgDetails(msg) if err != nil { // This error is ok because all these values are used for reporting, or filling in the cache. log.WithError(err).Error("cannot extract DNS message details") } var serverPort uint16 _, serverPortStr, err := net.SplitHostPort(serverAddr) if err != nil { log.WithError(err).Error("cannot extract destination IP from DNS request") } else { if serverPortUint64, err := strconv.ParseUint(serverPortStr, 10, 16); err != nil { log.WithError(err).WithField(logfields.Port, serverPortStr).Error("cannot parse destination port") } else { serverPort = uint16(serverPortUint64) } } ep.UpdateProxyStatistics(strings.ToUpper(protocol), serverPort, false, !msg.Response, verdict) record := logger.NewLogRecord(flowType, false, func(lr *logger.LogRecord) { lr.LogRecord.TransportProtocol = accesslog.TransportProtocol(protoID) }, logger.LogTags.Verdict(verdict, reason), logger.LogTags.Addressing(addrInfo), logger.LogTags.DNS(&accesslog.LogRecordDNS{ Query: qname, IPs: responseIPs, TTL: TTL, CNAMEs: CNAMEs, ObservationSource: accesslog.DNSSourceProxy, RCode: rcode, QTypes: qTypes, AnswerTypes: recordTypes, }), ) record.Log() if msg.Response && msg.Rcode == dns.RcodeSuccess && len(responseIPs) > 0 { // This must happen before the NameManager update below, to ensure that // this data is included in the serialized Endpoint object. // We also need to add to the cache before we purge any matching zombies // because they are locked separately and we want to keep the allowed IPs // consistent if a regeneration happens between the two steps. If an update // doesn't happen in the case, we play it safe and don't purge the zombie // in case of races. log.WithField(logfields.EndpointID, ep.ID).Debug("Recording DNS lookup in endpoint specific cache") if updated := ep.DNSHistory.Update(lookupTime, qname, responseIPs, int(TTL)); updated { ep.DNSZombies.ForceExpireByNameIP(lookupTime, qname, responseIPs...) ep.SyncEndpointHeaderFile() } log.WithFields(logrus.Fields{ "qname": qname, "ips": responseIPs, }).Debug("Updating DNS name in cache from response to to query") updateCtx, updateCancel := context.WithTimeout(context.TODO(), option.Config.FQDNProxyResponseMaxDelay) defer updateCancel() updateStart := time.Now() wg, newlyAllocatedIdentities, err := d.dnsNameManager.UpdateGenerateDNS(updateCtx, lookupTime, map[string]*fqdn.DNSIPRecords{ qname: { IPs: responseIPs, TTL: int(TTL), }}) if err != nil { log.WithError(err).Error("error updating internal DNS cache for rule generation") } updateComplete := make(chan struct{}) go func(wg *sync.WaitGroup, done chan struct{}) { wg.Wait() close(updateComplete) }(wg, updateComplete) select { case <-updateCtx.Done(): log.Error("Timed out waiting for datapath updates of FQDN IP information; returning response") case <-updateComplete: } log.WithFields(logrus.Fields{ logfields.Duration: time.Since(updateStart), logfields.EndpointID: ep.GetID(), "qname": qname, }).Debug("Waited for endpoints to regenerate due to a DNS response") // Add new identities to the ipcache after the wait for the policy updates above d.ipcache.UpsertGeneratedIdentities(newlyAllocatedIdentities) endMetric() } stat.ProcessingTime.End(true) return nil } type getFqdnCache struct { daemon *Daemon } func NewGetFqdnCacheHandler(d *Daemon) GetFqdnCacheHandler { return &getFqdnCache{daemon: d} } func (h *getFqdnCache) Handle(params GetFqdnCacheParams) middleware.Responder { // endpoints we want data from endpoints := h.daemon.endpointManager.GetEndpoints() CIDRStr := "" if params.Cidr != nil { CIDRStr = *params.Cidr } matchPatternStr := "" if params.Matchpattern != nil { matchPatternStr = *params.Matchpattern } lookups, err := extractDNSLookups(endpoints, CIDRStr, matchPatternStr) switch { case err != nil: return api.Error(GetFqdnCacheBadRequestCode, err) case len(lookups) == 0: return NewGetFqdnCacheIDNotFound() } return NewGetFqdnCacheOK().WithPayload(lookups) } type deleteFqdnCache struct { daemon *Daemon } func NewDeleteFqdnCacheHandler(d *Daemon) DeleteFqdnCacheHandler { return &deleteFqdnCache{daemon: d} } func (h *deleteFqdnCache) Handle(params DeleteFqdnCacheParams) middleware.Responder { // endpoints we want to modify endpoints := h.daemon.endpointManager.GetEndpoints() matchPatternStr := "" if params.Matchpattern != nil { matchPatternStr = *params.Matchpattern } namesToRegen, err := deleteDNSLookups( h.daemon.dnsNameManager.GetDNSCache(), endpoints, time.Now(), matchPatternStr) if err != nil { return api.Error(DeleteFqdnCacheBadRequestCode, err) } h.daemon.dnsNameManager.ForceGenerateDNS(context.TODO(), namesToRegen) return NewDeleteFqdnCacheOK() } type getFqdnCacheID struct { daemon *Daemon } func NewGetFqdnCacheIDHandler(d *Daemon) GetFqdnCacheIDHandler { return &getFqdnCacheID{daemon: d} } func (h *getFqdnCacheID) Handle(params GetFqdnCacheIDParams) middleware.Responder { var endpoints []*endpoint.Endpoint if params.ID != "" { ep, err := h.daemon.endpointManager.Lookup(params.ID) switch { case err != nil: return api.Error(GetFqdnCacheIDBadRequestCode, err) case ep == nil: return api.Error(GetFqdnCacheIDNotFoundCode, fmt.Errorf("Cannot find endpoint %s", params.ID)) default: endpoints = []*endpoint.Endpoint{ep} } } CIDRStr := "" if params.Cidr != nil { CIDRStr = *params.Cidr } matchPatternStr := "" if params.Matchpattern != nil { matchPatternStr = *params.Matchpattern } lookups, err := extractDNSLookups(endpoints, CIDRStr, matchPatternStr) switch { case err != nil: return api.Error(GetFqdnCacheBadRequestCode, err) case len(lookups) == 0: return NewGetFqdnCacheIDNotFound() } return NewGetFqdnCacheIDOK().WithPayload(lookups) } type getFqdnNamesHandler struct { daemon *Daemon } func NewGetFqdnNamesHandler(d *Daemon) GetFqdnNamesHandler { return &getFqdnNamesHandler{daemon: d} } func (h *getFqdnNamesHandler) Handle(params GetFqdnNamesParams) middleware.Responder { payload := h.daemon.dnsNameManager.GetModel() return NewGetFqdnNamesOK().WithPayload(payload) } // extractDNSLookups returns API models.DNSLookup copies of DNS data in each // endpoint's DNSHistory. These are filtered by CIDRStr and matchPatternStr if // they are non-empty. func extractDNSLookups(endpoints []*endpoint.Endpoint, CIDRStr, matchPatternStr string) (lookups []*models.DNSLookup, err error) { cidrMatcher := func(ip net.IP) bool { return true } if CIDRStr != "" { _, cidr, err := net.ParseCIDR(CIDRStr) if err != nil { return nil, err } cidrMatcher = func(ip net.IP) bool { return cidr.Contains(ip) } } nameMatcher := func(name string) bool { return true } if matchPatternStr != "" { matcher, err := matchpattern.Validate(matchpattern.Sanitize(matchPatternStr)) if err != nil { return nil, err } nameMatcher = func(name string) bool { return matcher.MatchString(name) } } for _, ep := range endpoints { for _, lookup := range ep.DNSHistory.Dump() { if !nameMatcher(lookup.Name) { continue } // The API model needs strings IPStrings := make([]string, 0, len(lookup.IPs)) // only proceed if any IP matches the cidr selector anIPMatches := false for _, ip := range lookup.IPs { anIPMatches = anIPMatches || cidrMatcher(ip) IPStrings = append(IPStrings, ip.String()) } if !anIPMatches { continue } lookups = append(lookups, &models.DNSLookup{ Fqdn: lookup.Name, Ips: IPStrings, LookupTime: strfmt.DateTime(lookup.LookupTime), TTL: int64(lookup.TTL), ExpirationTime: strfmt.DateTime(lookup.ExpirationTime), EndpointID: int64(ep.ID), Source: dnsSourceLookup, }) } for _, delete := range ep.DNSZombies.DumpAlive(cidrMatcher) { for _, name := range delete.Names { if !nameMatcher(name) { continue } lookups = append(lookups, &models.DNSLookup{ Fqdn: name, Ips: []string{delete.IP.String()}, LookupTime: strfmt.DateTime(delete.AliveAt), TTL: 0, ExpirationTime: strfmt.DateTime(delete.AliveAt), EndpointID: int64(ep.ID), Source: dnsSourceConnection, }) } } } return lookups, nil } func deleteDNSLookups(globalCache *fqdn.DNSCache, endpoints []*endpoint.Endpoint, expireLookupsBefore time.Time, matchPatternStr string) (namesToRegen []string, err error) { var nameMatcher *regexp.Regexp // nil matches all in our implementation if matchPatternStr != "" { nameMatcher, err = matchpattern.Validate(matchPatternStr) if err != nil { return nil, err } } // Clear any to-delete entries globally // Clear any to-delete entries in each endpoint, then update globally to // insert any entries that now should be in the global cache (because they // provide an IP at the latest expiration time). namesToRegen = append(namesToRegen, globalCache.ForceExpire(expireLookupsBefore, nameMatcher)...) for _, ep := range endpoints { namesToRegen = append(namesToRegen, ep.DNSHistory.ForceExpire(expireLookupsBefore, nameMatcher)...) globalCache.UpdateFromCache(ep.DNSHistory, nil) namesToRegen = append(namesToRegen, ep.DNSZombies.ForceExpire(expireLookupsBefore, nameMatcher, nil)...) activeConnections := fqdn.NewDNSCache(0) zombies, _ := ep.DNSZombies.GC() lookupTime := time.Now() for _, zombie := range zombies { namesToRegen = append(namesToRegen, zombie.Names...) for _, name := range zombie.Names { activeConnections.Update(lookupTime, name, []net.IP{zombie.IP}, 0) } } globalCache.UpdateFromCache(activeConnections, nil) } return namesToRegen, nil } // readPreCache returns a fqdn.DNSCache object created from the json data at // preCachePath func readPreCache(preCachePath string) (cache *fqdn.DNSCache, err error) { data, err := os.ReadFile(preCachePath) if err != nil { return nil, err } cache = fqdn.NewDNSCache(0) // no per-host limit here if err = cache.UnmarshalJSON(data); err != nil { return nil, err } return cache, nil }
{ var err error // Used to track identities which are allocated in calls to // AllocateCIDRs. If we for some reason cannot allocate new CIDRs, // we have to undo all of our changes and release the identities. // This is best effort, as releasing can fail as well. usedIdentities := make([]*identity.Identity, 0, len(selectorsWithIPsToUpdate)) selectorIdentitySliceMapping := make(map[policyApi.FQDNSelector][]*identity.Identity, len(selectorsWithIPsToUpdate)) newlyAllocatedIdentities := make(map[string]*identity.Identity) // Allocate identities for each IPNet and then map to selector // // The incoming IPs may already have had corresponding identities // allocated for them from a prior call to this function, even with the // exact same selector. In that case, this function will then allocate // new references to the same identities again! Ideally we would avoid // this, but at this layer we do not know which of the IPs already has // had a corresponding identity allocated to it via this selector code. // // One might be tempted to think that if the Identity shows up in // 'newlyAllocatedIdentities' that this is newly allocated by the // selector (hence this code is responsible for release), and that if // an Identity is *not* part of this slice then that means the selector // already allocated this Identity (hence this code is not responsible // for release). However, the Identity could have been previously // allocated by some other path like via regular CIDR policy. If that's // the case and we tried to use 'newlyAllocatedIdentities' to determine // when we are duplicating identity allocation from the same selector, // and then the user deleted the CIDR policy, then we could actually // end up cleaning up the last reference to that identity, even though // the selector referenced here is still using it. // // Therefore, for now we just let the duplicate allocations go through // here and then balance the dereferences over in the corresponding // SelectorCache.updateFQDNSelector() call where we have access both // to the existing set of allocated identities and the newly allocated // set here. This way we can ensure that each identity is referenced // exactly once from each selector that selects the identity. for selector, selectorIPs := range selectorsWithIPsToUpdate { log.WithFields(logrus.Fields{ "fqdnSelector": selector, "ips": selectorIPs, }).Debug("getting identities for IPs associated with FQDNSelector") var currentlyAllocatedIdentities []*identity.Identity if currentlyAllocatedIdentities, err = identityAllocator.AllocateCIDRsForIPs(selectorIPs, newlyAllocatedIdentities); err != nil { identityAllocator.ReleaseSlice(context.TODO(), nil, usedIdentities) log.WithError(err).WithField("prefixes", selectorIPs).Warn( "failed to allocate identities for IPs") return nil, nil, err } usedIdentities = append(usedIdentities, currentlyAllocatedIdentities...) selectorIdentitySliceMapping[selector] = currentlyAllocatedIdentities } return selectorIdentitySliceMapping, newlyAllocatedIdentities, nil }
index.js
var inst = require("../index").getInstance(); module.exports = inst.use("cssbase-context");
CardSubsetTest.js
import CardSet from "../../../src/artifact/js/CardSet.js"; import CardSetType from "../../../src/artifact/js/CardSetType.js"; import CardSubset from "../../../src/artifact/js/CardSubset.js"; QUnit.module("CardSubset"); var CardSubsetTest = {}; QUnit.test("CardSubset properties The Hunt for Gollum", function(assert) { var cardSubset = CardSubset.SOM1_THE_HUNT_FOR_GOLLUM; var properties = CardSubset.properties[cardSubset]; assert.equal(properties.name, "The Hunt for Gollum"); assert.equal(properties.cardSetKey, CardSet.SHADOWS_OF_MIRKWOOD); assert.equal(properties.number, 1); assert.equal(properties.typeKey, CardSetType.ADVENTURE_PACK); assert.equal(properties.key, "som1TheHuntForGollum"); }); QUnit.test("keys and values", function(assert) { // Setup. // Run. var result = CardSubset.keys(); var ownPropertyNames = Object.getOwnPropertyNames(CardSubset); // Verify. ownPropertyNames.forEach(function(key) { var key2 = CardSubset[key]; if (key !== "properties" && typeof key2 === "string") { assert.ok(CardSubset.properties[key2], "Missing value for key = " + key); } }); result.forEach(function(value) { var p = ownPropertyNames.filter(function(key) { return CardSubset[key] === value; }); assert.equal(p.length, 1, "Missing key for value = " + value);
QUnit.test("CardSubset.keys()", function(assert) { // Run. var result = CardSubset.keys(); // Verify. assert.ok(result); var length = 30; assert.equal(result.length, length); var i = 0; assert.equal(result[i++], CardSubset.AA1_THE_WASTES_OF_ERIADOR); assert.equal(result[i++], CardSubset.AA2_ESCAPE_FROM_MOUNT_GRAM); assert.equal(result[i++], CardSubset.AA3_ACROSS_THE_ETTENMOORS); assert.equal(result[i++], CardSubset.AA4_THE_TREACHERY_OF_RHUDAUR); assert.equal(result[i++], CardSubset.AA5_THE_BATTLE_OF_CARN_DUM); assert.equal(result[i++], CardSubset.AA6_THE_DREAD_REALM); assert.equal(result[i++], CardSubset.ATS1_THE_STEWARDS_FEAR); assert.equal(result[i++], CardSubset.ATS2_THE_DRUADAN_FOREST); assert.equal(result[i++], CardSubset.ATS3_ENCOUNTER_AT_AMON_DIN); assert.equal(result[i++], CardSubset.ATS4_ASSAULT_ON_OSGILIATH); assert.equal(result[i++], CardSubset.ATS5_THE_BLOOD_OF_GONDOR); assert.equal(result[i++], CardSubset.ATS6_THE_MORGUL_VALE); assert.equal(result[i++], CardSubset.D1_THE_REDHORN_GATE); assert.equal(result[i++], CardSubset.D2_ROAD_TO_RIVENDELL); assert.equal(result[i++], CardSubset.D3_THE_WATCHER_IN_THE_WATER); assert.equal(result[i++], CardSubset.D4_THE_LONG_DARK); assert.equal(result[i++], CardSubset.D5_FOUNDATIONS_OF_STONE); assert.equal(result[i++], CardSubset.D6_SHADOW_AND_FLAME); assert.equal(result[i++], CardSubset.SOM1_THE_HUNT_FOR_GOLLUM); assert.equal(result[i++], CardSubset.SOM2_CONFLICT_AT_THE_CARROCK); assert.equal(result[i++], CardSubset.SOM3_A_JOURNEY_TO_RHOSGOBEL); assert.equal(result[i++], CardSubset.SOM4_THE_HILLS_OF_EMYN_MUIL); assert.equal(result[i++], CardSubset.SOM5_THE_DEAD_MARSHES); assert.equal(result[i++], CardSubset.SOM6_RETURN_TO_MIRKWOOD); assert.equal(result[i++], CardSubset.TRM1_THE_DUNLAND_TRAP); assert.equal(result[i++], CardSubset.TRM2_THE_THREE_TRIALS); assert.equal(result[i++], CardSubset.TRM3_TROUBLE_IN_THARBAD); assert.equal(result[i++], CardSubset.TRM4_THE_NIN_IN_EILPH); assert.equal(result[i++], CardSubset.TRM5_CELEBRIMBORS_SECRET); assert.equal(result[i++], CardSubset.TRM6_THE_ANTLERED_CROWN); }); export default CardSubsetTest;
}); });
buffer.rs
use crate::metal::RafxDeviceContextMetal; use crate::{RafxBufferDef, RafxMemoryUsage, RafxResourceType, RafxResult}; #[derive(Debug)] pub struct RafxBufferMetal { device_context: RafxDeviceContextMetal, buffer_def: RafxBufferDef, buffer: metal_rs::Buffer, } // for metal_rs::Buffer unsafe impl Send for RafxBufferMetal {} unsafe impl Sync for RafxBufferMetal {} impl RafxBufferMetal { pub fn buffer_def(&self) -> &RafxBufferDef { &self.buffer_def } pub fn metal_buffer(&self) -> &metal_rs::BufferRef { self.buffer.as_ref() } pub fn map_buffer(&self) -> RafxResult<*mut u8> { if self.buffer_def.memory_usage == RafxMemoryUsage::GpuOnly { return Err("Cannot map GPU-only buffer")?; } Ok(self.buffer.contents() as *mut u8) } pub fn unmap_buffer(&self) -> RafxResult<()> { // don't do anything, buffers are always mapped in metal Ok(()) } pub fn mapped_memory(&self) -> Option<*mut u8> { Some(self.buffer.contents() as *mut u8) } pub fn copy_to_host_visible_buffer<T: Copy>( &self, data: &[T], ) -> RafxResult<()> { // Cannot check size of data == buffer because buffer size might be rounded up self.copy_to_host_visible_buffer_with_offset(data, 0) } pub fn copy_to_host_visible_buffer_with_offset<T: Copy>( &self, data: &[T], buffer_byte_offset: u64, ) -> RafxResult<()> { let data_size_in_bytes = rafx_base::memory::slice_size_in_bytes(data) as u64; assert!(buffer_byte_offset + data_size_in_bytes <= self.buffer_def.size);
let required_alignment = std::mem::align_of::<T>(); // Buffers are always mapped, but calling map/unmap is essentially free and follows the same // codepath as other backends and end-users unsafe { let dst = self.map_buffer()?.add(buffer_byte_offset as usize); assert_eq!(((dst as usize) % required_alignment), 0); std::ptr::copy_nonoverlapping(src, dst, data_size_in_bytes as usize); } self.unmap_buffer()?; Ok(()) } pub fn new( device_context: &RafxDeviceContextMetal, buffer_def: &RafxBufferDef, ) -> RafxResult<Self> { let mut allocation_size = buffer_def.size; if buffer_def .resource_type .intersects(RafxResourceType::UNIFORM_BUFFER) { allocation_size = rafx_base::memory::round_size_up_to_alignment_u64( buffer_def.size, device_context .device_info() .min_uniform_buffer_offset_alignment as u64, ) } let buffer = device_context.device().new_buffer( allocation_size, buffer_def.memory_usage.mtl_resource_options(), ); Ok(RafxBufferMetal { device_context: device_context.clone(), buffer_def: buffer_def.clone(), buffer, }) } }
let src = data.as_ptr() as *const u8;
eth.rs
// Copyright Rivtower Technologies LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use cita_cloud_proto::blockchain::raw_transaction::Tx::{NormalTx, UtxoTx}; use cita_cloud_proto::blockchain::{RawTransaction, RawTransactions}; use cloud_util::common::get_tx_hash; use prost::Message; use status_code::StatusCode; use tiny_keccak::{Hasher, Keccak}; pub const SECP256K1_SIGNATURE_BYTES_LEN: usize = 65; pub const HASH_BYTES_LEN: usize = 32; pub const ADDR_BYTES_LEN: usize = 20; fn keccak_hash(input: &[u8]) -> [u8; HASH_BYTES_LEN] { let mut result = [0u8; HASH_BYTES_LEN]; let mut keccak = Keccak::v256(); keccak.update(input); keccak.finalize(&mut result); result } lazy_static::lazy_static! { pub static ref SECP256K1: secp256k1::Secp256k1<secp256k1::All> = secp256k1::Secp256k1::new(); } fn secp256k1_sign( privkey: &[u8], msg: &[u8], ) -> Result<[u8; SECP256K1_SIGNATURE_BYTES_LEN], StatusCode> { let context = &SECP256K1; let sec = secp256k1::SecretKey::from_slice(privkey).unwrap(); if let Ok(message) = secp256k1::Message::from_slice(msg) { let s = context.sign_recoverable(&message, &sec); let (rec_id, data) = s.serialize_compact(); let mut data_arr = [0; SECP256K1_SIGNATURE_BYTES_LEN]; // no need to check if s is low, it always is data_arr[0..SECP256K1_SIGNATURE_BYTES_LEN - 1] .copy_from_slice(&data[0..SECP256K1_SIGNATURE_BYTES_LEN - 1]); data_arr[SECP256K1_SIGNATURE_BYTES_LEN - 1] = rec_id.to_i32() as u8; Ok(data_arr) } else { Err(StatusCode::SignError) } } fn secp256k1_recover(signature: &[u8], message: &[u8]) -> Result<Vec<u8>, StatusCode> { let context = &SECP256K1; if let Ok(rid) = secp256k1::recovery::RecoveryId::from_i32(i32::from( signature[SECP256K1_SIGNATURE_BYTES_LEN - 1], )) { if let Ok(rsig) = secp256k1::recovery::RecoverableSignature::from_compact( &signature[0..SECP256K1_SIGNATURE_BYTES_LEN - 1], rid, ) { if let Ok(msg) = secp256k1::Message::from_slice(message) { if let Ok(publ) = context.recover(&msg, &rsig) { let serialized = publ.serialize_uncompressed(); return Ok(serialized[1..65].to_vec()); } } } } Err(StatusCode::SigCheckError) } pub fn hash_data(data: &[u8]) -> Vec<u8> { keccak_hash(data).to_vec() } pub fn verify_data_hash(data: &[u8], hash: &[u8]) -> Result<(), StatusCode> { if hash.len() != HASH_BYTES_LEN { Err(StatusCode::HashLenError) } else if hash == hash_data(data) { Ok(()) } else { Err(StatusCode::HashCheckError) } } pub fn sk2pk(sk: &[u8]) -> Vec<u8> { let context = &SECP256K1; let sec = secp256k1::SecretKey::from_slice(sk).unwrap(); let pub_key = secp256k1::key::PublicKey::from_secret_key(context, &sec); let serialized = pub_key.serialize_uncompressed(); serialized[1..].to_vec() } #[allow(dead_code)] pub fn sk2address(sk: &[u8]) -> Vec<u8> { let pk = sk2pk(sk); pk2address(&pk) } pub fn pk2address(pk: &[u8]) -> Vec<u8> { hash_data(pk)[HASH_BYTES_LEN - ADDR_BYTES_LEN..].to_vec() } pub fn sign_message(_pubkey: &[u8], privkey: &[u8], msg: &[u8]) -> Result<Vec<u8>, StatusCode> { Ok(secp256k1_sign(privkey, msg)?.to_vec()) } pub fn recover_signature(msg: &[u8], signature: &[u8]) -> Result<Vec<u8>, StatusCode> { if signature.len() != SECP256K1_SIGNATURE_BYTES_LEN { Err(StatusCode::SigLenError) } else { secp256k1_recover(signature, msg) } } pub fn check_transactions(raw_txs: &RawTransactions) -> StatusCode { use rayon::prelude::*; match tokio::task::block_in_place(|| { raw_txs .body .par_iter() .map(|raw_tx| { check_transaction(raw_tx).map_err(|status| { log::warn!( "check_raw_tx tx(0x{}) failed: {}", hex::encode(get_tx_hash(raw_tx).unwrap()), status ); status })?; Ok(()) }) .collect::<Result<(), StatusCode>>() }) { Ok(()) => StatusCode::Success, Err(status) => status, } } fn check_transaction(raw_tx: &RawTransaction) -> Result<(), StatusCode> { match raw_tx.tx.as_ref() { Some(NormalTx(normal_tx)) => { if normal_tx.witness.is_none() { return Err(StatusCode::NoneWitness); } let witness = normal_tx.witness.as_ref().unwrap(); let signature = &witness.signature; let sender = &witness.sender; let mut tx_bytes: Vec<u8> = Vec::new(); if let Some(tx) = &normal_tx.transaction { tx.encode(&mut tx_bytes).map_err(|_| { log::warn!("check_raw_tx: encode transaction failed"); StatusCode::EncodeError })?; } else { return Err(StatusCode::NoneTransaction); } let tx_hash = &normal_tx.transaction_hash; verify_data_hash(&tx_bytes, tx_hash)?; if &pk2address(&recover_signature(tx_hash, signature)?) == sender { Ok(()) } else { Err(StatusCode::SigCheckError) } } Some(UtxoTx(utxo_tx)) => { let witnesses = &utxo_tx.witnesses; // limit witnesses length is 1 if witnesses.len() != 1 { return Err(StatusCode::InvalidWitness); } let mut tx_bytes: Vec<u8> = Vec::new(); if let Some(tx) = utxo_tx.transaction.as_ref() { tx.encode(&mut tx_bytes).map_err(|_| { log::warn!("check_raw_tx: encode utxo failed"); StatusCode::EncodeError })?; } else { return Err(StatusCode::NoneUtxo); } let tx_hash = &utxo_tx.transaction_hash; verify_data_hash(&tx_bytes, tx_hash)?; for (_i, w) in witnesses.iter().enumerate() { let signature = &w.signature; let sender = &w.sender; if &pk2address(&recover_signature(tx_hash, signature)?) != sender { return Err(StatusCode::SigCheckError); }
} None => Err(StatusCode::NoneRawTx), } } #[cfg(test)] mod tests { use super::*; const SECP256K1_PUBKEY_BYTES_LEN: usize = 64; const SECP256K1_PRIVKEY_BYTES_LEN: usize = 32; fn secp256k1_gen_keypair() -> Result< ( [u8; SECP256K1_PUBKEY_BYTES_LEN], [u8; SECP256K1_PRIVKEY_BYTES_LEN], ), StatusCode, > { let context = &SECP256K1; let (sec_key, pub_key) = context.generate_keypair(&mut rand::thread_rng()); let serialized = pub_key.serialize_uncompressed(); let mut pub_key = [0u8; SECP256K1_PUBKEY_BYTES_LEN]; pub_key.copy_from_slice(&serialized[1..65]); let mut priv_key = [0u8; SECP256K1_PRIVKEY_BYTES_LEN]; priv_key.copy_from_slice(&sec_key[0..32]); Ok((pub_key, priv_key)) } fn generate_keypair() -> Result<(Vec<u8>, Vec<u8>), StatusCode> { let (pk, sk) = secp256k1_gen_keypair()?; Ok((pk.to_vec(), sk.to_vec())) } #[test] fn keccak_test() { let hash_empty: [u8; HASH_BYTES_LEN] = [ 0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70, ]; assert_eq!(keccak_hash(&[]), hash_empty); } #[test] fn test_data_hash() { let data = vec![1u8, 2, 3, 4, 5, 6, 7]; let hash = hash_data(&data); assert!(verify_data_hash(&data, &hash).is_ok()); } #[test] fn test_signature() { // message must be 32 bytes let data: [u8; HASH_BYTES_LEN] = [ 0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70, ]; let (pubkey, privkey) = generate_keypair().unwrap(); let signature = sign_message(&pubkey, &privkey, &data).unwrap(); assert_eq!(recover_signature(&data, &signature), Ok(pubkey)); } #[test] fn test_invalid_msg() { // invalid message means len is not 32 let invalid_msg: [u8; HASH_BYTES_LEN + 1] = [ 0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70, 0x70, ]; let (pubkey, privkey) = generate_keypair().unwrap(); assert_eq!( sign_message(&pubkey, &privkey, &invalid_msg), Err(StatusCode::SignError) ); // message must be 32 bytes let data: [u8; HASH_BYTES_LEN] = [ 0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70, ]; let (pubkey, privkey) = generate_keypair().unwrap(); let signature = sign_message(&pubkey, &privkey, &data).unwrap(); assert_eq!( recover_signature(&invalid_msg, &signature), Err(StatusCode::SigCheckError) ); } }
} Ok(())
spics0.rs
#[doc = "Reader of register SPICS0"] pub type R = crate::R<u32, super::SPICS0>; #[doc = "Writer for register SPICS0"] pub type W = crate::W<u32, super::SPICS0>; #[doc = "Register SPICS0 `reset()`'s with value 0x0b00"] impl crate::ResetValue for super::SPICS0 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x0b00 } } #[doc = "Reader of field `MCU_SEL`"] pub type MCU_SEL_R = crate::R<u8, u8>; #[doc = "Write proxy for field `MCU_SEL`"] pub struct MCU_SEL_W<'a> { w: &'a mut W, } impl<'a> MCU_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 12)) | (((value as u32) & 0x03) << 12); self.w } } #[doc = "Reader of field `FUN_DRV`"] pub type FUN_DRV_R = crate::R<u8, u8>; #[doc = "Write proxy for field `FUN_DRV`"] pub struct FUN_DRV_W<'a> { w: &'a mut W, } impl<'a> FUN_DRV_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 10)) | (((value as u32) & 0x03) << 10); self.w } } #[doc = "Reader of field `FUN_IE`"] pub type FUN_IE_R = crate::R<bool, bool>; #[doc = "Write proxy for field `FUN_IE`"] pub struct FUN_IE_W<'a> { w: &'a mut W, } impl<'a> FUN_IE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9); self.w } } #[doc = "Reader of field `FUN_PU`"] pub type FUN_PU_R = crate::R<bool, bool>; #[doc = "Write proxy for field `FUN_PU`"] pub struct FUN_PU_W<'a> { w: &'a mut W, } impl<'a> FUN_PU_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8); self.w } } #[doc = "Reader of field `FUN_PD`"] pub type FUN_PD_R = crate::R<bool, bool>; #[doc = "Write proxy for field `FUN_PD`"] pub struct FUN_PD_W<'a> { w: &'a mut W, } impl<'a> FUN_PD_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7); self.w } } #[doc = "Reader of field `SLP_DRV`"] pub type SLP_DRV_R = crate::R<u8, u8>; #[doc = "Write proxy for field `SLP_DRV`"] pub struct SLP_DRV_W<'a> { w: &'a mut W, } impl<'a> SLP_DRV_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 5)) | (((value as u32) & 0x03) << 5); self.w } } #[doc = "Reader of field `SLP_IE`"] pub type SLP_IE_R = crate::R<bool, bool>; #[doc = "Write proxy for field `SLP_IE`"] pub struct SLP_IE_W<'a> { w: &'a mut W, } impl<'a> SLP_IE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn
(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "Reader of field `SLP_PU`"] pub type SLP_PU_R = crate::R<bool, bool>; #[doc = "Write proxy for field `SLP_PU`"] pub struct SLP_PU_W<'a> { w: &'a mut W, } impl<'a> SLP_PU_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "Reader of field `SLP_PD`"] pub type SLP_PD_R = crate::R<bool, bool>; #[doc = "Write proxy for field `SLP_PD`"] pub struct SLP_PD_W<'a> { w: &'a mut W, } impl<'a> SLP_PD_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Reader of field `SLP_SEL`"] pub type SLP_SEL_R = crate::R<bool, bool>; #[doc = "Write proxy for field `SLP_SEL`"] pub struct SLP_SEL_W<'a> { w: &'a mut W, } impl<'a> SLP_SEL_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Reader of field `SLP_OE`"] pub type SLP_OE_R = crate::R<bool, bool>; #[doc = "Write proxy for field `SLP_OE`"] pub struct SLP_OE_W<'a> { w: &'a mut W, } impl<'a> SLP_OE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bits 12:13 - configures IO_MUX function"] #[inline(always)] pub fn mcu_sel(&self) -> MCU_SEL_R { MCU_SEL_R::new(((self.bits >> 12) & 0x03) as u8) } #[doc = "Bits 10:11 - configures drive strength"] #[inline(always)] pub fn fun_drv(&self) -> FUN_DRV_R { FUN_DRV_R::new(((self.bits >> 10) & 0x03) as u8) } #[doc = "Bit 9 - configures input enable"] #[inline(always)] pub fn fun_ie(&self) -> FUN_IE_R { FUN_IE_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 8 - configures pull up"] #[inline(always)] pub fn fun_pu(&self) -> FUN_PU_R { FUN_PU_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 7 - configures pull down"] #[inline(always)] pub fn fun_pd(&self) -> FUN_PD_R { FUN_PD_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bits 5:6 - configures drive strength during sleep mode"] #[inline(always)] pub fn slp_drv(&self) -> SLP_DRV_R { SLP_DRV_R::new(((self.bits >> 5) & 0x03) as u8) } #[doc = "Bit 4 - configures input enable during sleep mode"] #[inline(always)] pub fn slp_ie(&self) -> SLP_IE_R { SLP_IE_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 3 - configures pull up during sleep mode"] #[inline(always)] pub fn slp_pu(&self) -> SLP_PU_R { SLP_PU_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 2 - configures pull down during sleep mode"] #[inline(always)] pub fn slp_pd(&self) -> SLP_PD_R { SLP_PD_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 1 - configures sleep mode selection"] #[inline(always)] pub fn slp_sel(&self) -> SLP_SEL_R { SLP_SEL_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 0 - configures output enable during sleep mode"] #[inline(always)] pub fn slp_oe(&self) -> SLP_OE_R { SLP_OE_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bits 12:13 - configures IO_MUX function"] #[inline(always)] pub fn mcu_sel(&mut self) -> MCU_SEL_W { MCU_SEL_W { w: self } } #[doc = "Bits 10:11 - configures drive strength"] #[inline(always)] pub fn fun_drv(&mut self) -> FUN_DRV_W { FUN_DRV_W { w: self } } #[doc = "Bit 9 - configures input enable"] #[inline(always)] pub fn fun_ie(&mut self) -> FUN_IE_W { FUN_IE_W { w: self } } #[doc = "Bit 8 - configures pull up"] #[inline(always)] pub fn fun_pu(&mut self) -> FUN_PU_W { FUN_PU_W { w: self } } #[doc = "Bit 7 - configures pull down"] #[inline(always)] pub fn fun_pd(&mut self) -> FUN_PD_W { FUN_PD_W { w: self } } #[doc = "Bits 5:6 - configures drive strength during sleep mode"] #[inline(always)] pub fn slp_drv(&mut self) -> SLP_DRV_W { SLP_DRV_W { w: self } } #[doc = "Bit 4 - configures input enable during sleep mode"] #[inline(always)] pub fn slp_ie(&mut self) -> SLP_IE_W { SLP_IE_W { w: self } } #[doc = "Bit 3 - configures pull up during sleep mode"] #[inline(always)] pub fn slp_pu(&mut self) -> SLP_PU_W { SLP_PU_W { w: self } } #[doc = "Bit 2 - configures pull down during sleep mode"] #[inline(always)] pub fn slp_pd(&mut self) -> SLP_PD_W { SLP_PD_W { w: self } } #[doc = "Bit 1 - configures sleep mode selection"] #[inline(always)] pub fn slp_sel(&mut self) -> SLP_SEL_W { SLP_SEL_W { w: self } } #[doc = "Bit 0 - configures output enable during sleep mode"] #[inline(always)] pub fn slp_oe(&mut self) -> SLP_OE_W { SLP_OE_W { w: self } } }
bit
vk_fence_import_flags.rs
// Generated by `scripts/generate.js` use utils::vk_traits::*; /// Wrapper for [VkFenceImportFlags](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkFenceImportFlags.html). /// /// Use the macro `VkFenceImportFlags!` as an alternative method to create a structure. For example, these two snippets return the same value: /// ``` /// VkFenceImportFlags!(temporary) /// ``` /// ``` /// VkFenceImportFlags { /// temporary: true, /// } /// ``` #[derive(Debug, Clone)] pub struct VkFenceImportFlags { pub temporary: bool, } #[doc(hidden)] pub type RawVkFenceImportFlags = u32; impl VkWrappedType<RawVkFenceImportFlags> for VkFenceImportFlags { fn vk_to_raw(src: &VkFenceImportFlags, dst: &mut RawVkFenceImportFlags) { *dst = 0; if src.temporary { *dst |= 0x00000001; } } } impl VkRawType<VkFenceImportFlags> for RawVkFenceImportFlags { fn
(src: &RawVkFenceImportFlags) -> VkFenceImportFlags { VkFenceImportFlags { temporary: (src & 0x00000001) != 0, } } } impl Default for VkFenceImportFlags { fn default() -> VkFenceImportFlags { VkFenceImportFlags { temporary: false, } } } impl VkFenceImportFlags { /// Return a structure with all flags to `false`. pub fn none() -> Self { VkFenceImportFlags { temporary: false, } } /// Return a structure with all flags to `true`. pub fn all() -> Self { VkFenceImportFlags { temporary: true, } } /// Return the numerical bit flags corresponding to the structure (as described in the Vulkan specs). pub fn to_u32(&self) -> u32 { 0 + if self.temporary { 0x00000001 } else { 0 } } /// Create a structure corresponding to the specified numerical bit flags. pub fn from_u32(value: u32) -> Self { VkFenceImportFlags { temporary: value & 0x00000001 > 0, } } } #[doc(hidden)] #[macro_export] macro_rules! VkFenceImportFlags { ( $( $x:ident ),* ) => { VkFenceImportFlags { $($x: true,)* ..VkFenceImportFlags::none() } } }
vk_to_wrapped
QueryAnalizer.py
import numpy as np from model.indexer_v1 import Indexer class QueryAnalizer: def __init__(self, query, document_list, enable_stemming=True, filter_stopwords=True): self.__query = Indexer([query], enable_stemming=enable_stemming, filter_stopwords=filter_stopwords) self.__indexer = Indexer(document_list, enable_stemming=enable_stemming, filter_stopwords=filter_stopwords) self.result = None def
(self): if self.result is not None: return self.result result = {} for query_term, value in self.__query.words_index.items(): indexer_term = self.__indexer.words_index[query_term] tf_idf_query_term = self.__query.words_index[query_term]["idf"] * \ self.__query.words_index[query_term]["documents"][0]["tf"] tf_documents = list(map(lambda doc: doc["tf"], indexer_term["documents"])) dot_product = np.dot(tf_idf_query_term, tf_documents) result[query_term] = list(zip( list( map( lambda doc: doc["document"].text, indexer_term["documents"])) , list( map( lambda elem: elem / (np.linalg.norm(tf_idf_query_term) + np.linalg.norm(tf_documents)), dot_product )) )) self.result = result for key, elm in self.result.items(): self.result[key] = sorted(elm, key=lambda tup: tup[1], reverse=True) return self.result
cosine_similarity
Basics.py
# Write a function called "show_excitement" where the string # "I am super excited for this course!" is returned exactly # 5 times, where each sentence is separated by a single space. # Return the string with "return". # You can only have the string once in your code. # Don't just copy/paste it 5 times into a single variable! def show_excitement(): # Your code goes here!
print show_excitement()s
here = '' for i in range(5): here += "I am super excited for this course! " return here[:-1]
tasker.py
#from mpi4py.futures import MPIPoolExecutor from mpi4py import MPI import argparse from parser import parse_input from merge_nodes import merge_field_nodes from merge_nodes import merge_analysis_nodes # based on https://github.com/jbornschein/mpi4py-examples/blob/master/09-task-pull.py #x0, x1, w = -2.0, +2.0, 640*2 #y0, y1, h = -1.5, +1.5, 480*2 #dx = (x1 - x0) / w #dy = (y1 - y0) / h # #c = complex(0, 0.65) # #def julia(x, y): # z = complex(x, y) # n = 255 # while abs(z) < 3 and n > 1: # z = z**2 + c # n -= 1 # return n # #def julia_line(k): # line = bytearray(w) # y = y1 - k * dy # for j in range(w): # x = x0 + j * dx # line[j] = julia(x, y) # return line def enum(*sequential, **named): """Handy way to fake an enumerated type in Python http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python """ enums = dict(zip(sequential, range(len(sequential))), **named) return type('Enum', (), enums) # Define MPI message tags tags = enum('READY', 'DONE', 'EXIT', 'START') class Tasker: def get_args(self):
def __init__(self): # Initializations and preliminaries self.comm = MPI.COMM_WORLD # get MPI communicator object self.size = self.comm.size # total number of processes self.rank = self.comm.rank # rank of this process self.status = MPI.Status() # get MPI status object # parse who is master self.master = False if self.rank == 0: self.master = True self.conf, self.fdir, args = self.get_args() if self.master: print("Tasker operating on dir:{} with {} workers".format(self.fdir, self.size)) ################################################## # task lists to be executed lap_tasks = [ 'test_member', 'test_other_member' ] sim_tasks = [ 'test_global' ] ################################################## # internal variables work_to_do = True # distribute tasks until this is false lap = 0 # keep track of lap we are processing task_index = 0 #keep track of lap task we are processing # task feeder def get_new_task(self): # cycle among laps if self.task_index >= len(self.lap_tasks): self.lap += self.conf.interval self.task_index = 0 task = { 'name': self.lap_tasks[self.task_index], 'args': self.lap, } self.task_index += 1 if self.lap >= 1000: self.work_to_do = False return task # task scheduling; master process executes this def schedule_tasks(self): #task_index = 0 num_workers = self.size - 1 closed_workers = 0 print("Master starting with %d workers" % num_workers) while closed_workers < num_workers: data = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=self.status) source = self.status.Get_source() tag = self.status.Get_tag() if tag == tags.READY: # Worker is ready, so send it a task #if task_index < len(tasks): if self.work_to_do: task = self.get_new_task() self.comm.send(task, dest=source, tag=tags.START) print("Sending task {} to worker {}".format(task['name'], source)) else: self.comm.send(None, dest=source, tag=tags.EXIT) elif tag == tags.DONE: results = data print("Got data from worker %d" % source) elif tag == tags.EXIT: print("Worker %d exited." % source) closed_workers += 1 print("Master finishing...") # worker that takes a task and works on it def work_on_tasks(self): # Worker processes execute code below name = MPI.Get_processor_name() print("I am a worker with rank %d on %s." % (self.rank, name)) while True: self.comm.send(None, dest=0, tag=tags.READY) task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=self.status) tag = self.status.Get_tag() if tag == tags.START: # Do the work here method_to_call = getattr(self, task['name']) result = method_to_call( task['args'] ) self.comm.send(result, dest=0, tag=tags.DONE) elif tag == tags.EXIT: break self.comm.send(None, dest=0, tag=tags.EXIT) def test_member(self, args): print("running test_member with args", args) return True def test_other_member(self, args): print("running test_other_member with args", args) return True def test_global(self, args): print("running test_global with args", args) return True def run(self): if self.master: self.schedule_tasks() else: self.work_on_tasks() if __name__ == '__main__': tasker = Tasker() tasker.run() #with MPIPoolExecutor() as executor: # image = executor.map(julia_line, range(h)) # with open('julia.pgm', 'wb') as f: # f.write(b'P5 %d %d %d\n' % (w, h, 255)) # for line in image: # f.write(line) #executor = MPIPoolExecutor() #future = executor.submit(pow, 321, 1234) #print(future.result())
conf, fdir, args = parse_input() return conf, fdir, args
type.go
// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package metrics import ( "math/rand" "reflect" ) // Type describes the type of value stored in a Datum. type Type int const ( // Int indicates this metric is an integer metric type. Int Type = iota // Float indicates this metric is a floating-point metric type. Float // String indicates this metric contains printable string values. String // Buckets indicates this metric is a histogram metric type. Buckets endType // end of enumeration for testing ) func (t Type) String() string { switch t { case Int: return "Int" case Float: return "Float" case String: return "String" case Buckets: return "Buckets" } return "?"
// Generate implements the quick.Generator interface for Type. func (Type) Generate(rand *rand.Rand, size int) reflect.Value { return reflect.ValueOf(Type(rand.Intn(int(endType)))) }
}
menu_info.go
package menu import ( "gopkg.in/chanxuehong/wechat.v2/mp/core" ) // 获取自定义菜单配置接口. func GetMenuInfo(clt *core.
fo MenuInfo, isMenuOpen bool, err error) { const incompleteURL = "https://api.weixin.qq.com/cgi-bin/get_current_selfmenu_info?access_token=" var result struct { core.Error IsMenuOpen int `json:"is_menu_open"` MenuInfo MenuInfo `json:"selfmenu_info"` } if err = clt.GetJSON(incompleteURL, &result); err != nil { return } if result.ErrCode != core.ErrCodeOK { err = &result.Error return } info = result.MenuInfo if result.IsMenuOpen != 0 { isMenuOpen = true } return } type MenuInfo struct { Buttons []ButtonEx `json:"button,omitempty"` } type ButtonEx struct { Type string `json:"type,omitempty"` Name string `json:"name,omitempty"` Key string `json:"key,omitempty"` URL string `json:"url,omitempty"` MediaId string `json:"media_id,omitempty"` Value string `json:"value,omitempty"` NewsInfo struct { Articles []Article `json:"list,omitempty"` } `json:"news_info"` SubButton struct { Buttons []ButtonEx `json:"list,omitempty"` } `json:"sub_button"` } type Article struct { Title string `json:"title,omitempty"` // 图文消息的标题 Author string `json:"author,omitempty"` // 作者 Digest string `json:"digest,omitempty"` // 摘要 ShowCover int `json:"show_cover"` // 是否显示封面, 0为不显示, 1为显示 CoverURL string `json:"cover_url,omitempty"` // 封面图片的URL ContentURL string `json:"content_url,omitempty"` // 正文的URL SourceURL string `json:"source_url,omitempty"` // 原文的URL, 若置空则无查看原文入口 }
Client) (in
query.go
package cli import ( "fmt" "github.com/Sifchain/sifnode/x/dispensation/types" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/context" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/spf13/cobra" ) func GetQueryCmd(queryRoute string, cdc *codec.Codec) *cobra.Command { // Group dispensation queries under a subcommand dispensationQueryCmd := &cobra.Command{ Use: types.ModuleName, Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), DisableFlagParsing: true, SuggestionsMinimumDistance: 2, RunE: client.ValidateCmd, } dispensationQueryCmd.AddCommand(flags.GetCommands( GetCmdDistributions(queryRoute, cdc), GetCmdDistributionRecordForRecipient(queryRoute, cdc), GetCmdDistributionRecordForDistNameAll(queryRoute, cdc), GetCmdDistributionRecordForDistNamePending(queryRoute, cdc), GetCmdDistributionRecordForDistNameCompleted(queryRoute, cdc), GetCmdClaimsByType(queryRoute, cdc), )...) return dispensationQueryCmd } //GetCmdDistributions returns a list of all distributions ever created func
(queryRoute string, cdc *codec.Codec) *cobra.Command { return &cobra.Command{ Use: "distributions-all", Short: "get a list of all distributions ", Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { cliCtx := context.NewCLIContext().WithCodec(cdc) route := fmt.Sprintf("custom/%s/%s", queryRoute, types.QueryAllDistributions) res, height, err := cliCtx.QueryWithData(route, nil) if err != nil { return err } var dr types.Distributions cdc.MustUnmarshalJSON(res, &dr) out := types.NewDistributionsResponse(dr, height) return cliCtx.PrintOutput(out) }, } } // GetCmdDistributionRecordForRecipient returns the completed and pending records for the recipient address func GetCmdDistributionRecordForRecipient(queryRoute string, cdc *codec.Codec) *cobra.Command { return &cobra.Command{ Use: "records-by-addr [recipient address]", Short: "get a list of all distribution records ", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { cliCtx := context.NewCLIContext().WithCodec(cdc) address := args[0] recipientAddress, err := sdk.AccAddressFromBech32(address) if err != nil { return err } params := types.NewQueryRecordsByRecipientAddr(recipientAddress) bz, err := cliCtx.Codec.MarshalJSON(params) if err != nil { return err } route := fmt.Sprintf("custom/%s/%s", queryRoute, types.QueryRecordsByRecipient) res, height, err := cliCtx.QueryWithData(route, bz) if err != nil { return err } var drs types.DistributionRecords cdc.MustUnmarshalJSON(res, &drs) out := types.NewDistributionRecordsResponse(drs, height) return cliCtx.PrintOutput(out) }, } } //GetCmdDistributionRecordForDistNameAll returns all records for a given distribution name func GetCmdDistributionRecordForDistNameAll(queryRoute string, cdc *codec.Codec) *cobra.Command { return &cobra.Command{ Use: "records-by-name-all [distribution name]", Short: "get a list of all distribution records ", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { cliCtx := context.NewCLIContext().WithCodec(cdc) name := args[0] params := types.NewQueryRecordsByDistributionName(name, 3) bz, err := cliCtx.Codec.MarshalJSON(params) if err != nil { return err } route := fmt.Sprintf("custom/%s/%s", queryRoute, types.QueryRecordsByDistrName) res, height, err := cliCtx.QueryWithData(route, bz) if err != nil { return err } var drs types.DistributionRecords cdc.MustUnmarshalJSON(res, &drs) out := types.NewDistributionRecordsResponse(drs, height) return cliCtx.PrintOutput(out) }, } } //GetCmdDistributionRecordForDistNamePending returns all pending records for a given distribution name func GetCmdDistributionRecordForDistNamePending(queryRoute string, cdc *codec.Codec) *cobra.Command { return &cobra.Command{ Use: "records-by-name-pending [distribution name]", Short: "get a list of all distribution records ", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { cliCtx := context.NewCLIContext().WithCodec(cdc) name := args[0] params := types.NewQueryRecordsByDistributionName(name, types.Pending) bz, err := cliCtx.Codec.MarshalJSON(params) if err != nil { return err } route := fmt.Sprintf("custom/%s/%s", queryRoute, types.QueryRecordsByDistrName) res, height, err := cliCtx.QueryWithData(route, bz) if err != nil { return err } var drs types.DistributionRecords cdc.MustUnmarshalJSON(res, &drs) out := types.NewDistributionRecordsResponse(drs, height) return cliCtx.PrintOutput(out) }, } } //GetCmdDistributionRecordForDistNamePending returns all completed records for a given distribution name func GetCmdDistributionRecordForDistNameCompleted(queryRoute string, cdc *codec.Codec) *cobra.Command { return &cobra.Command{ Use: "records-by-name-completed [distribution name]", Short: "get a list of all distribution records ", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { cliCtx := context.NewCLIContext().WithCodec(cdc) name := args[0] params := types.NewQueryRecordsByDistributionName(name, types.Completed) bz, err := cliCtx.Codec.MarshalJSON(params) if err != nil { return err } route := fmt.Sprintf("custom/%s/%s", queryRoute, types.QueryRecordsByDistrName) res, height, err := cliCtx.QueryWithData(route, bz) if err != nil { return err } var drs types.DistributionRecords cdc.MustUnmarshalJSON(res, &drs) out := types.NewDistributionRecordsResponse(drs, height) return cliCtx.PrintOutput(out) }, } } func GetCmdClaimsByType(queryRoute string, cdc *codec.Codec) *cobra.Command { return &cobra.Command{ Use: "claims-by-type [ClaimType]", Short: "get a list of all claims for mentioned type", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { cliCtx := context.NewCLIContext().WithCodec(cdc) claimType, ok := types.IsValidClaim(args[0]) if !ok { return fmt.Errorf("invalid Claim Type %s: Types supported [LiquidityMining/ValidatorSubsidy]", args[0]) } params := types.NewQueryUserClaims(claimType) bz, err := cliCtx.Codec.MarshalJSON(params) if err != nil { return err } route := fmt.Sprintf("custom/%s/%s", queryRoute, types.QueryClaimsByType) res, height, err := cliCtx.QueryWithData(route, bz) if err != nil { return err } var claims []types.UserClaim cdc.MustUnmarshalJSON(res, &claims) out := types.NewClaimsResponse(claims, height) return cliCtx.PrintOutput(out) }, } }
GetCmdDistributions
rpc_service-remote.go
// Autogenerated by Thrift Compiler (0.9.2) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package main import ( "demo/rpc" "flag" "fmt" "git.apache.org/thrift.git/lib/go/thrift" "math" "net" "net/url" "os" "strconv" "strings" ) func Usage() { fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") flag.PrintDefaults() fmt.Fprintln(os.Stderr, "\nFunctions:") fmt.Fprintln(os.Stderr, " funCall(ArgStruct argStruct, byte argByte, i16 argI16, i32 argI32, i64 argI64, double argDouble, string argString, paramMapStrStr, paramMapI32Str, paramSetStr, paramSetI64, paramListStr, bool argBool)") fmt.Fprintln(os.Stderr) os.Exit(0) } func
() { flag.Usage = Usage var host string var port int var protocol string var urlString string var framed bool var useHttp bool var parsedUrl url.URL var trans thrift.TTransport _ = strconv.Atoi _ = math.Abs flag.Usage = Usage flag.StringVar(&host, "h", "localhost", "Specify host and port") flag.IntVar(&port, "p", 9090, "Specify port") flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") flag.StringVar(&urlString, "u", "", "Specify the url") flag.BoolVar(&framed, "framed", false, "Use framed transport") flag.BoolVar(&useHttp, "http", false, "Use http") flag.Parse() if len(urlString) > 0 { parsedUrl, err := url.Parse(urlString) if err != nil { fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) flag.Usage() } host = parsedUrl.Host useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" } else if useHttp { _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) if err != nil { fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) flag.Usage() } } cmd := flag.Arg(0) var err error if useHttp { trans, err = thrift.NewTHttpClient(parsedUrl.String()) } else { portStr := fmt.Sprint(port) if strings.Contains(host, ":") { host, portStr, err = net.SplitHostPort(host) if err != nil { fmt.Fprintln(os.Stderr, "error with host:", err) os.Exit(1) } } trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) if err != nil { fmt.Fprintln(os.Stderr, "error resolving address:", err) os.Exit(1) } if framed { trans = thrift.NewTFramedTransport(trans) } } if err != nil { fmt.Fprintln(os.Stderr, "Error creating transport", err) os.Exit(1) } defer trans.Close() var protocolFactory thrift.TProtocolFactory switch protocol { case "compact": protocolFactory = thrift.NewTCompactProtocolFactory() break case "simplejson": protocolFactory = thrift.NewTSimpleJSONProtocolFactory() break case "json": protocolFactory = thrift.NewTJSONProtocolFactory() break case "binary", "": protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() break default: fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) Usage() os.Exit(1) } client := rpc.NewRpcServiceClientFactory(trans, protocolFactory) if err := trans.Open(); err != nil { fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) os.Exit(1) } switch cmd { case "funCall": if flag.NArg()-1 != 13 { fmt.Fprintln(os.Stderr, "FunCall requires 13 args") flag.Usage() } arg12 := flag.Arg(1) mbTrans13 := thrift.NewTMemoryBufferLen(len(arg12)) defer mbTrans13.Close() _, err14 := mbTrans13.WriteString(arg12) if err14 != nil { Usage() return } factory15 := thrift.NewTSimpleJSONProtocolFactory() jsProt16 := factory15.GetProtocol(mbTrans13) argvalue0 := rpc.NewArgStruct() err17 := argvalue0.Read(jsProt16) if err17 != nil { Usage() return } value0 := argvalue0 tmp1, err18 := (strconv.Atoi(flag.Arg(2))) if err18 != nil { Usage() return } argvalue1 := byte(tmp1) value1 := argvalue1 tmp2, err19 := (strconv.Atoi(flag.Arg(3))) if err19 != nil { Usage() return } argvalue2 := byte(tmp2) value2 := argvalue2 tmp3, err20 := (strconv.Atoi(flag.Arg(4))) if err20 != nil { Usage() return } argvalue3 := int32(tmp3) value3 := argvalue3 argvalue4, err21 := (strconv.ParseInt(flag.Arg(5), 10, 64)) if err21 != nil { Usage() return } value4 := argvalue4 argvalue5, err22 := (strconv.ParseFloat(flag.Arg(6), 64)) if err22 != nil { Usage() return } value5 := argvalue5 argvalue6 := flag.Arg(7) value6 := argvalue6 arg24 := flag.Arg(8) mbTrans25 := thrift.NewTMemoryBufferLen(len(arg24)) defer mbTrans25.Close() _, err26 := mbTrans25.WriteString(arg24) if err26 != nil { Usage() return } factory27 := thrift.NewTSimpleJSONProtocolFactory() jsProt28 := factory27.GetProtocol(mbTrans25) containerStruct7 := rpc.NewFunCallArgs() err29 := containerStruct7.ReadField8(jsProt28) if err29 != nil { Usage() return } argvalue7 := containerStruct7.ParamMapStrStr value7 := argvalue7 arg30 := flag.Arg(9) mbTrans31 := thrift.NewTMemoryBufferLen(len(arg30)) defer mbTrans31.Close() _, err32 := mbTrans31.WriteString(arg30) if err32 != nil { Usage() return } factory33 := thrift.NewTSimpleJSONProtocolFactory() jsProt34 := factory33.GetProtocol(mbTrans31) containerStruct8 := rpc.NewFunCallArgs() err35 := containerStruct8.ReadField9(jsProt34) if err35 != nil { Usage() return } argvalue8 := containerStruct8.ParamMapI32Str value8 := argvalue8 arg36 := flag.Arg(10) mbTrans37 := thrift.NewTMemoryBufferLen(len(arg36)) defer mbTrans37.Close() _, err38 := mbTrans37.WriteString(arg36) if err38 != nil { Usage() return } factory39 := thrift.NewTSimpleJSONProtocolFactory() jsProt40 := factory39.GetProtocol(mbTrans37) containerStruct9 := rpc.NewFunCallArgs() err41 := containerStruct9.ReadField10(jsProt40) if err41 != nil { Usage() return } argvalue9 := containerStruct9.ParamSetStr value9 := argvalue9 arg42 := flag.Arg(11) mbTrans43 := thrift.NewTMemoryBufferLen(len(arg42)) defer mbTrans43.Close() _, err44 := mbTrans43.WriteString(arg42) if err44 != nil { Usage() return } factory45 := thrift.NewTSimpleJSONProtocolFactory() jsProt46 := factory45.GetProtocol(mbTrans43) containerStruct10 := rpc.NewFunCallArgs() err47 := containerStruct10.ReadField11(jsProt46) if err47 != nil { Usage() return } argvalue10 := containerStruct10.ParamSetI64 value10 := argvalue10 arg48 := flag.Arg(12) mbTrans49 := thrift.NewTMemoryBufferLen(len(arg48)) defer mbTrans49.Close() _, err50 := mbTrans49.WriteString(arg48) if err50 != nil { Usage() return } factory51 := thrift.NewTSimpleJSONProtocolFactory() jsProt52 := factory51.GetProtocol(mbTrans49) containerStruct11 := rpc.NewFunCallArgs() err53 := containerStruct11.ReadField12(jsProt52) if err53 != nil { Usage() return } argvalue11 := containerStruct11.ParamListStr value11 := argvalue11 argvalue12 := flag.Arg(13) == "true" value12 := argvalue12 fmt.Print(client.FunCall(value0, value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12)) fmt.Print("\n") break case "": Usage() break default: fmt.Fprintln(os.Stderr, "Invalid function ", cmd) } }
main
test_version_commit_name.py
# -*- coding: utf-8 -*- from django.test import TestCase from django_dynamic_fixture import get, new from readthedocs.builds.constants import ( BRANCH, LATEST, STABLE, TAG, EXTERNAL, ) from readthedocs.builds.models import Version from readthedocs.projects.constants import REPO_TYPE_GIT, REPO_TYPE_HG from readthedocs.projects.models import Project class VersionCommitNameTests(TestCase): def test_branch_name_unicode_non_ascii(self): unicode_name = b'abc_\xd1\x84_\xe2\x99\x98'.decode('utf-8') version = new(Version, identifier=unicode_name, type=BRANCH) self.assertEqual(version.identifier_friendly, unicode_name) def test_branch_name_made_friendly_when_sha(self): commit_hash = '3d92b728b7d7b842259ac2020c2fa389f13aff0d' version = new( Version, identifier=commit_hash, slug=STABLE, verbose_name=STABLE, type=TAG, ) # we shorten commit hashes to keep things readable self.assertEqual(version.identifier_friendly, '3d92b728') def test_branch_name(self): version = new( Version, identifier='release-2.5.x', slug='release-2.5.x', verbose_name='release-2.5.x', type=BRANCH, ) self.assertEqual(version.commit_name, 'release-2.5.x') def test_tag_name(self): version = new( Version, identifier='10f1b29a2bd2', slug='release-2.5.0', verbose_name='release-2.5.0', type=TAG, ) self.assertEqual(version.commit_name, 'release-2.5.0') def
(self): version = new( Version, identifier='origin/stable', slug=STABLE, verbose_name='stable', type=BRANCH, ) self.assertEqual(version.commit_name, 'stable') def test_stable_version_tag(self): version = new( Version, identifier='3d92b728b7d7b842259ac2020c2fa389f13aff0d', slug=STABLE, verbose_name=STABLE, type=TAG, ) self.assertEqual( version.commit_name, '3d92b728b7d7b842259ac2020c2fa389f13aff0d', ) def test_hg_latest_branch(self): hg_project = get(Project, repo_type=REPO_TYPE_HG) version = new( Version, identifier='default', slug=LATEST, verbose_name=LATEST, type=BRANCH, project=hg_project, ) self.assertEqual(version.commit_name, 'default') def test_git_latest_branch(self): git_project = get(Project, repo_type=REPO_TYPE_GIT) version = new( Version, project=git_project, identifier='origin/master', slug=LATEST, verbose_name=LATEST, type=BRANCH, ) self.assertEqual(version.commit_name, 'master') def test_external_version(self): identifier = 'ec26de721c3235aad62de7213c562f8c821' version = new( Version, identifier=identifier, slug='11', verbose_name='11', type=EXTERNAL, ) self.assertEqual(version.commit_name, identifier)
test_branch_with_name_stable
Burn.tsx
import { demicrofy, formatLuna, formatLunaInput, formatUST, LUNA_INPUT_MAXIMUM_DECIMAL_POINTS, LUNA_INPUT_MAXIMUM_INTEGER_POINTS, } from '@anchor-protocol/notation'; import type { bLuna, Luna, uUST } from '@anchor-protocol/types'; import { useConnectedWallet, WalletReady, } from '@anchor-protocol/wallet-provider'; import { NativeSelect as MuiNativeSelect } from '@material-ui/core'; import { useOperation } from '@terra-dev/broadcastable-operation'; import { ActionButton } from '@terra-dev/neumorphism-ui/components/ActionButton'; import { IconSpan } from '@terra-dev/neumorphism-ui/components/IconSpan'; import { NumberMuiInput } from '@terra-dev/neumorphism-ui/components/NumberMuiInput'; import { SelectAndTextInputContainer } from '@terra-dev/neumorphism-ui/components/SelectAndTextInputContainer'; import { useBank } from 'base/contexts/bank'; import { useConstants } from 'base/contexts/contants'; import big, { Big } from 'big.js'; import { IconLineSeparator } from 'components/IconLineSeparator'; import { MessageBox } from 'components/MessageBox'; import { TransactionRenderer } from 'components/TransactionRenderer'; import { SwapListItem, TxFeeList, TxFeeListItem } from 'components/TxFeeList'; import { validateTxFee } from 'logics/validateTxFee'; import { pegRecovery } from 'pages/basset/logics/pegRecovery'; import { validateBurnAmount } from 'pages/basset/logics/validateBurnAmount'; import { useExchangeRate } from 'pages/basset/queries/exchangeRate'; import { burnOptions } from 'pages/basset/transactions/burnOptions'; import React, { ChangeEvent, useCallback, useMemo, useState } from 'react'; interface Item { label: string; value: string; } const assetCurrencies: Item[] = [{ label: 'Luna', value: 'luna' }]; const bAssetCurrencies: Item[] = [{ label: 'bLuna', value: 'bluna' }]; export function Burn() { // --------------------------------------------- // dependencies // --------------------------------------------- const connectedWallet = useConnectedWallet(); const { fixedGas } = useConstants(); const [burn, burnResult] = useOperation(burnOptions, {}); // --------------------------------------------- // states // --------------------------------------------- const [burnAmount, setBurnAmount] = useState<bLuna>('' as bLuna); const [getAmount, setGetAmount] = useState<Luna>('' as Luna); const [burnCurrency, setBurnCurrency] = useState<Item>( () => bAssetCurrencies[0], ); const [getCurrency, setGetCurrency] = useState<Item>( () => assetCurrencies[0], ); // --------------------------------------------- // queries // --------------------------------------------- const bank = useBank(); const { data: { exchangeRate, parameters }, } = useExchangeRate({ bAsset: getCurrency.value, }); // --------------------------------------------- // logics // --------------------------------------------- const pegRecoveryFee = useMemo(() => pegRecovery(exchangeRate, parameters), [ exchangeRate, parameters, ]); const invalidTxFee = useMemo( () => !!connectedWallet && validateTxFee(bank, fixedGas), [bank, fixedGas, connectedWallet], ); const invalidBurnAmount = useMemo( () => !!connectedWallet && validateBurnAmount(burnAmount, bank), [bank, burnAmount, connectedWallet], ); // --------------------------------------------- // callbacks // --------------------------------------------- const updateBurnCurrency = useCallback((nextBurnCurrencyValue: string) => { setBurnCurrency( bAssetCurrencies.find(({ value }) => nextBurnCurrencyValue === value) ?? bAssetCurrencies[0], ); }, []); const updateGetCurrency = useCallback((nextGetCurrencyValue: string) => { setGetCurrency( assetCurrencies.find(({ value }) => nextGetCurrencyValue === value) ?? assetCurrencies[0], ); }, []); const updateBurnAmount = useCallback( (nextBurnAmount: string) => { if (nextBurnAmount.trim().length === 0) { setGetAmount('' as Luna); setBurnAmount('' as bLuna); } else { const burnAmount: bLuna = nextBurnAmount as bLuna; const getAmount: Luna = formatLunaInput( big(burnAmount).mul(exchangeRate?.exchange_rate ?? 1) as Luna<Big>, ); setGetAmount(getAmount); setBurnAmount(burnAmount); } }, [exchangeRate?.exchange_rate], ); const updateGetAmount = useCallback( (nextGetAmount: string) => { if (nextGetAmount.trim().length === 0) { setBurnAmount('' as bLuna); setGetAmount('' as Luna); } else { const getAmount: Luna = nextGetAmount as Luna; const burnAmount: bLuna = formatLunaInput( big(getAmount).div(exchangeRate?.exchange_rate ?? 1) as bLuna<Big>, ); setBurnAmount(burnAmount); setGetAmount(getAmount); } }, [exchangeRate?.exchange_rate], ); const init = useCallback(() => { setGetAmount('' as Luna); setBurnAmount('' as bLuna); }, []); const proceed = useCallback( async (walletReady: WalletReady, burnAmount: bLuna) => { const broadcasted = await burn({ address: walletReady.walletAddress, amount: burnAmount, txFee: fixedGas.toString() as uUST, }); if (!broadcasted) { init(); } }, [burn, fixedGas, init], ); // --------------------------------------------- // presentation // --------------------------------------------- if ( burnResult?.status === 'in-progress' || burnResult?.status === 'done' || burnResult?.status === 'fault' ) { return <TransactionRenderer result={burnResult} onExit={init} />; } return ( <> {!!invalidTxFee && <MessageBox>{invalidTxFee}</MessageBox>} {pegRecoveryFee && ( <MessageBox level="info" hide={{ id: 'burn_peg', period: 1000 * 60 * 60 * 24 * 7 }} > When exchange rate is lower than threshold, <br /> protocol charges peg recovery fee for each Mint/Burn action. </MessageBox> )} <MessageBox level="info" hide={{ id: 'burn', period: 1000 * 60 * 60 * 24 * 7 }} > Default bLuna redemptions take at least 21 days to process. <br /> Slashing events during the 21 days may affect the final amount withdrawn. </MessageBox> {/* Burn (bAsset) */} <div className="burn-description"> <p>I want to burn</p> <p /> </div> <SelectAndTextInputContainer className="burn" gridColumns={[120, '1fr']} error={!!invalidBurnAmount} leftHelperText={invalidBurnAmount} rightHelperText={ !!connectedWallet && ( <span> Balance:{' '} <span style={{ textDecoration: 'underline', cursor: 'pointer' }} onClick={() => updateBurnAmount( formatLunaInput(demicrofy(bank.userBalances.ubLuna)), ) } > {formatLuna(demicrofy(bank.userBalances.ubLuna))}{' '} {burnCurrency.label} </span> </span> ) } > <MuiNativeSelect value={burnCurrency} onChange={({ target }) => updateBurnCurrency(target.value)} IconComponent={ bAssetCurrencies.length < 2 ? BlankComponent : undefined } disabled={bAssetCurrencies.length < 2} > {bAssetCurrencies.map(({ label, value }) => ( <option key={value} value={value}> {label} </option> ))} </MuiNativeSelect> <NumberMuiInput placeholder="0.00" error={!!invalidBurnAmount} value={burnAmount} maxIntegerPoinsts={LUNA_INPUT_MAXIMUM_INTEGER_POINTS} maxDecimalPoints={LUNA_INPUT_MAXIMUM_DECIMAL_POINTS} onChange={({ target }: ChangeEvent<HTMLInputElement>) => updateBurnAmount(target.value) } /> </SelectAndTextInputContainer> <IconLineSeparator /> {/* Get (Asset) */} <div className="gett-description"> <p>and get</p> <p /> </div> <SelectAndTextInputContainer className="gett" gridColumns={[120, '1fr']} error={!!invalidBurnAmount} > <MuiNativeSelect value={getCurrency} onChange={({ target }) => updateGetCurrency(target.value)} IconComponent={ assetCurrencies.length < 2 ? BlankComponent : undefined } disabled={assetCurrencies.length < 2} > {assetCurrencies.map(({ label, value }) => ( <option key={value} value={value}> {label} </option> ))} </MuiNativeSelect> <NumberMuiInput placeholder="0.00" error={!!invalidBurnAmount} value={getAmount} maxIntegerPoinsts={LUNA_INPUT_MAXIMUM_INTEGER_POINTS} maxDecimalPoints={LUNA_INPUT_MAXIMUM_DECIMAL_POINTS} onChange={({ target }: ChangeEvent<HTMLInputElement>) => updateGetAmount(target.value) } /> </SelectAndTextInputContainer> <TxFeeList className="receipt"> {exchangeRate && ( <SwapListItem label="Price" currencyA={burnCurrency.label} currencyB={getCurrency.label} exchangeRateAB={exchangeRate.exchange_rate} formatExchangeRate={(ratio) => formatLuna(ratio as Luna<Big>)} /> )} {!!pegRecoveryFee && getAmount.length > 0 && ( <TxFeeListItem label={<IconSpan>Peg Recovery Fee</IconSpan>}> {formatLuna(demicrofy(pegRecoveryFee(getAmount)))} Luna </TxFeeListItem> )} {burnAmount.length > 0 && ( <TxFeeListItem label={<IconSpan>Tx Fee</IconSpan>}> {formatUST(demicrofy(fixedGas))} UST
{/* Submit */} <ActionButton className="submit" disabled={ !connectedWallet || burnAmount.length === 0 || big(burnAmount).lte(0) || !!invalidTxFee || !!invalidBurnAmount } onClick={() => connectedWallet && proceed(connectedWallet, burnAmount)} > Burn </ActionButton> </> ); } function BlankComponent() { return <div />; }
</TxFeeListItem> )} </TxFeeList>
index.js
import Grid from '@material-ui/core/Grid' import { ScaleLoader } from 'halogenium' import React from 'react'
<Grid container alignItems="center" direction="column" justify="center" style={{ minHeight: '100vh' }} > <ScaleLoader color="#2196f3" /> </Grid> ) export default Loading
const Loading = () => (
horizontal.go
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package podautoscaler import ( "context" "fmt" "math" "time" autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" autoscalinginformers "k8s.io/client-go/informers/autoscaling/v1" coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes/scheme" autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1" corelisters "k8s.io/client-go/listers/core/v1" scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/controller" metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" ) var ( scaleUpLimitFactor = 2.0 scaleUpLimitMinimum = 4.0 ) type timestampedRecommendation struct { recommendation int32 timestamp time.Time } type timestampedScaleEvent struct { replicaChange int32 // positive for scaleUp, negative for scaleDown timestamp time.Time outdated bool } // HorizontalController is responsible for the synchronizing HPA objects stored // in the system with the actual deployments/replication controllers they // control. type HorizontalController struct { scaleNamespacer scaleclient.ScalesGetter hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter mapper apimeta.RESTMapper replicaCalc *ReplicaCalculator eventRecorder record.EventRecorder downscaleStabilisationWindow time.Duration // hpaLister is able to list/get HPAs from the shared cache from the informer passed in to // NewHorizontalController. hpaLister autoscalinglisters.HorizontalPodAutoscalerLister hpaListerSynced cache.InformerSynced // podLister is able to list/get Pods from the shared cache from the informer passed in to // NewHorizontalController. podLister corelisters.PodLister podListerSynced cache.InformerSynced // Controllers that need to be synced queue workqueue.RateLimitingInterface // Latest unstabilized recommendations for each autoscaler. recommendations map[string][]timestampedRecommendation // Latest autoscaler events scaleUpEvents map[string][]timestampedScaleEvent scaleDownEvents map[string][]timestampedScaleEvent } // NewHorizontalController creates a new HorizontalController. func NewHorizontalController( evtNamespacer v1core.EventsGetter, scaleNamespacer scaleclient.ScalesGetter, hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter, mapper apimeta.RESTMapper, metricsClient metricsclient.MetricsClient, hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer, podInformer coreinformers.PodInformer, resyncPeriod time.Duration, downscaleStabilisationWindow time.Duration, tolerance float64, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration, ) *HorizontalController { broadcaster := record.NewBroadcaster() broadcaster.StartLogging(klog.Infof) broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"}) hpaController := &HorizontalController{ eventRecorder: recorder, scaleNamespacer: scaleNamespacer, hpaNamespacer: hpaNamespacer, downscaleStabilisationWindow: downscaleStabilisationWindow, queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"), mapper: mapper, recommendations: map[string][]timestampedRecommendation{}, scaleUpEvents: map[string][]timestampedScaleEvent{}, scaleDownEvents: map[string][]timestampedScaleEvent{}, } hpaInformer.Informer().AddEventHandlerWithResyncPeriod( cache.ResourceEventHandlerFuncs{ AddFunc: hpaController.enqueueHPA, UpdateFunc: hpaController.updateHPA, DeleteFunc: hpaController.deleteHPA, }, resyncPeriod, ) hpaController.hpaLister = hpaInformer.Lister() hpaController.hpaListerSynced = hpaInformer.Informer().HasSynced hpaController.podLister = podInformer.Lister() hpaController.podListerSynced = podInformer.Informer().HasSynced replicaCalc := NewReplicaCalculator( metricsClient, hpaController.podLister, tolerance, cpuInitializationPeriod, delayOfInitialReadinessStatus, ) hpaController.replicaCalc = replicaCalc return hpaController } // Run begins watching and syncing. func (a *HorizontalController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer a.queue.ShutDown() klog.Infof("Starting HPA controller") defer klog.Infof("Shutting down HPA controller") if !cache.WaitForNamedCacheSync("HPA", stopCh, a.hpaListerSynced, a.podListerSynced) { return } // start a single worker (we may wish to start more in the future) go wait.Until(a.worker, time.Second, stopCh) <-stopCh } // obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item. func (a *HorizontalController) updateHPA(old, cur interface{}) { a.enqueueHPA(cur) } // obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item. func (a *HorizontalController) enqueueHPA(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) return } // Requests are always added to queue with resyncPeriod delay. If there's already // request for the HPA in the queue then a new request is always dropped. Requests spend resync // interval in queue so HPAs are processed every resync interval. a.queue.AddRateLimited(key) } func (a *HorizontalController) deleteHPA(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) return } // TODO: could we leak if we fail to get the key? a.queue.Forget(key) } func (a *HorizontalController) worker() { for a.processNextWorkItem() { } klog.Infof("horizontal pod autoscaler controller worker shutting down") } func (a *HorizontalController) processNextWorkItem() bool { key, quit := a.queue.Get() if quit { return false } defer a.queue.Done(key) deleted, err := a.reconcileKey(key.(string)) if err != nil { utilruntime.HandleError(err) } // Add request processing HPA to queue with resyncPeriod delay. // Requests are always added to queue with resyncPeriod delay. If there's already request // for the HPA in the queue then a new request is always dropped. Requests spend resyncPeriod // in queue so HPAs are processed every resyncPeriod. // Request is added here just in case last resync didn't insert request into the queue. This // happens quite often because there is race condition between adding request after resyncPeriod // and removing them from queue. Request can be added by resync before previous request is // removed from queue. If we didn't add request here then in this case one request would be dropped // and HPA would processed after 2 x resyncPeriod. if !deleted { a.queue.AddRateLimited(key) } return true } // computeReplicasForMetrics computes the desired number of replicas for the metric specifications listed in the HPA, // returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of // all metrics computed. func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale, metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) { if scale.Status.Selector == "" { errMsg := "selector is required" a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg) setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector") return 0, "", nil, time.Time{}, fmt.Errorf(errMsg) } selector, err := labels.Parse(scale.Status.Selector) if err != nil { errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg) setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg) return 0, "", nil, time.Time{}, fmt.Errorf(errMsg) } specReplicas := scale.Spec.Replicas statusReplicas := scale.Status.Replicas statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs)) invalidMetricsCount := 0 var invalidMetricError error var invalidMetricCondition autoscalingv2.HorizontalPodAutoscalerCondition for i, metricSpec := range metricSpecs { replicaCountProposal, metricNameProposal, timestampProposal, condition, err := a.computeReplicasForMetric(hpa, metricSpec, specReplicas, statusReplicas, selector, &statuses[i]) if err != nil { if invalidMetricsCount <= 0 { invalidMetricCondition = condition invalidMetricError = err } invalidMetricsCount++ } if err == nil && (replicas == 0 || replicaCountProposal > replicas) { timestamp = timestampProposal replicas = replicaCountProposal metric = metricNameProposal } } // If all metrics are invalid return error and set condition on hpa based on first invalid metric. if invalidMetricsCount >= len(metricSpecs) { setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message) return 0, "", statuses, time.Time{}, fmt.Errorf("invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError) } setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric) return replicas, metric, statuses, timestamp, nil } // Computes the desired number of replicas for a specific hpa and metric specification, // returning the metric status and a proposed condition to be set on the HPA object. func (a *HorizontalController) computeReplicasForMetric(hpa *autoscalingv2.HorizontalPodAutoscaler, spec autoscalingv2.MetricSpec, specReplicas, statusReplicas int32, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, metricNameProposal string, timestampProposal time.Time, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { switch spec.Type { case autoscalingv2.ObjectMetricSourceType: metricSelector, err := metav1.LabelSelectorAsSelector(spec.Object.Metric.Selector) if err != nil { condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) return 0, "", time.Time{}, condition, fmt.Errorf("failed to get object metric value: %v", err) } replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForObjectMetric(specReplicas, statusReplicas, spec, hpa, selector, status, metricSelector) if err != nil { return 0, "", time.Time{}, condition, fmt.Errorf("failed to get object metric value: %v", err) } case autoscalingv2.PodsMetricSourceType: metricSelector, err := metav1.LabelSelectorAsSelector(spec.Pods.Metric.Selector) if err != nil { condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetPodsMetric", err) return 0, "", time.Time{}, condition, fmt.Errorf("failed to get pods metric value: %v", err) } replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForPodsMetric(specReplicas, spec, hpa, selector, status, metricSelector) if err != nil { return 0, "", time.Time{}, condition, fmt.Errorf("failed to get pods metric value: %v", err) } case autoscalingv2.ResourceMetricSourceType: replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForResourceMetric(specReplicas, spec, hpa, selector, status) if err != nil { return 0, "", time.Time{}, condition, err } case autoscalingv2.ExternalMetricSourceType: replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForExternalMetric(specReplicas, statusReplicas, spec, hpa, selector, status) if err != nil { return 0, "", time.Time{}, condition, err } default: errMsg := fmt.Sprintf("unknown metric source type %q", string(spec.Type)) err = fmt.Errorf(errMsg) condition := a.getUnableComputeReplicaCountCondition(hpa, "InvalidMetricSourceType", err) return 0, "", time.Time{}, condition, err } return replicaCountProposal, metricNameProposal, timestampProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } func (a *HorizontalController) reconcileKey(key string) (deleted bool, err error) { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return true, err } hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name) if errors.IsNotFound(err) { klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace) delete(a.recommendations, key) delete(a.scaleUpEvents, key) delete(a.scaleDownEvents, key) return true, nil } if err != nil { return false, err } return false, a.reconcileAutoscaler(hpa, key) } // computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType. func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicas int32, timestamp time.Time, metricName string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType { replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(specReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector) if err != nil { condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) return 0, timestampProposal, "", condition, err } *status = autoscalingv2.MetricStatus{ Type: autoscalingv2.ObjectMetricSourceType, Object: &autoscalingv2.ObjectMetricStatus{ DescribedObject: metricSpec.Object.DescribedObject, Metric: autoscalingv2.MetricIdentifier{ Name: metricSpec.Object.Metric.Name, Selector: metricSpec.Object.Metric.Selector, }, Current: autoscalingv2.MetricValueStatus{ Value: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI), }, }, } return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType { replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(statusReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector) if err != nil { condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s object metric: %v", metricSpec.Object.Metric.Name, err) } *status = autoscalingv2.MetricStatus{ Type: autoscalingv2.ObjectMetricSourceType, Object: &autoscalingv2.ObjectMetricStatus{ Metric: autoscalingv2.MetricIdentifier{ Name: metricSpec.Object.Metric.Name, Selector: metricSpec.Object.Metric.Selector, }, Current: autoscalingv2.MetricValueStatus{ AverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI), }, }, } return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } errMsg := "invalid object metric source: neither a value target nor an average value target was set" err = fmt.Errorf(errMsg) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) return 0, time.Time{}, "", condition, err } // computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType. func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.Target.AverageValue.MilliValue(), metricSpec.Pods.Metric.Name, hpa.Namespace, selector, metricSelector) if err != nil { condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetPodsMetric", err) return 0, timestampProposal, "", condition, err } *status = autoscalingv2.MetricStatus{ Type: autoscalingv2.PodsMetricSourceType, Pods: &autoscalingv2.PodsMetricStatus{ Metric: autoscalingv2.MetricIdentifier{ Name: metricSpec.Pods.Metric.Name, Selector: metricSpec.Pods.Metric.Selector, }, Current: autoscalingv2.MetricValueStatus{ AverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI), }, }, } return replicaCountProposal, timestampProposal, fmt.Sprintf("pods metric %s", metricSpec.Pods.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } // computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType. func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { if metricSpec.Resource.Target.AverageValue != nil { var rawProposal int64 replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(currentReplicas, metricSpec.Resource.Target.AverageValue.MilliValue(), metricSpec.Resource.Name, hpa.Namespace, selector) if err != nil { condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err) return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err) } metricNameProposal = fmt.Sprintf("%s resource", metricSpec.Resource.Name) *status = autoscalingv2.MetricStatus{ Type: autoscalingv2.ResourceMetricSourceType, Resource: &autoscalingv2.ResourceMetricStatus{ Name: metricSpec.Resource.Name, Current: autoscalingv2.MetricValueStatus{ AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI), }, }, } return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } if metricSpec.Resource.Target.AverageUtilization == nil { errMsg := "invalid resource metric source: neither a utilization target nor a value target was set" err = fmt.Errorf(errMsg) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err) return 0, time.Time{}, "", condition, fmt.Errorf(errMsg) } targetUtilization := *metricSpec.Resource.Target.AverageUtilization replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector) if err != nil { condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err) return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err) } metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name) *status = autoscalingv2.MetricStatus{ Type: autoscalingv2.ResourceMetricSourceType, Resource: &autoscalingv2.ResourceMetricStatus{ Name: metricSpec.Resource.Name, Current: autoscalingv2.MetricValueStatus{ AverageUtilization: &percentageProposal, AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI), }, }, } return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } // computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType. func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { if metricSpec.External.Target.AverageValue != nil { replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(statusReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector) if err != nil { condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err) return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.Metric.Name, err) } *status = autoscalingv2.MetricStatus{ Type: autoscalingv2.ExternalMetricSourceType, External: &autoscalingv2.ExternalMetricStatus{ Metric: autoscalingv2.MetricIdentifier{ Name: metricSpec.External.Metric.Name, Selector: metricSpec.External.Metric.Selector, }, Current: autoscalingv2.MetricValueStatus{ AverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI), }, }, } return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } if metricSpec.External.Target.Value != nil { replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(specReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector, selector) if err != nil { condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err) return 0, time.Time{}, "", condition, fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.Metric.Name, err) } *status = autoscalingv2.MetricStatus{ Type: autoscalingv2.ExternalMetricSourceType, External: &autoscalingv2.ExternalMetricStatus{ Metric: autoscalingv2.MetricIdentifier{ Name: metricSpec.External.Metric.Name, Selector: metricSpec.External.Metric.Selector, }, Current: autoscalingv2.MetricValueStatus{ Value: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI), }, }, } return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil } errMsg := "invalid external metric source: neither a value target nor an average value target was set" err = fmt.Errorf(errMsg) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err) return 0, time.Time{}, "", condition, fmt.Errorf(errMsg) } func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) { if a.recommendations[key] == nil { a.recommendations[key] = []timestampedRecommendation{{currentReplicas, time.Now()}} } } func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.HorizontalPodAutoscaler, key string) error { // make a copy so that we never mutate the shared informer cache (conversion can mutate the object) hpav1 := hpav1Shared.DeepCopy() // then, convert to autoscaling/v2, which makes our lives easier when calculating metrics hpaRaw, err := unsafeConvertToVersionVia(hpav1, autoscalingv2.SchemeGroupVersion) if err != nil { a.eventRecorder.Event(hpav1, v1.EventTypeWarning, "FailedConvertHPA", err.Error()) return fmt.Errorf("failed to convert the given HPA to %s: %v", autoscalingv2.SchemeGroupVersion.String(), err) } hpa := hpaRaw.(*autoscalingv2.HorizontalPodAutoscaler) hpaStatusOriginal := hpa.Status.DeepCopy() reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name) targetGV, err := schema.ParseGroupVersion(hpa.Spec.ScaleTargetRef.APIVersion) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error()) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err) a.updateStatusIfNeeded(hpaStatusOriginal, hpa) return fmt.Errorf("invalid API version in scale target reference: %v", err) } targetGK := schema.GroupKind{ Group: targetGV.Group, Kind: hpa.Spec.ScaleTargetRef.Kind, } mappings, err := a.mapper.RESTMappings(targetGK) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error()) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err) a.updateStatusIfNeeded(hpaStatusOriginal, hpa) return fmt.Errorf("unable to determine resource for scale target reference: %v", err) } scale, targetGR, err := a.scaleForResourceMappings(hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, mappings) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error()) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err) a.updateStatusIfNeeded(hpaStatusOriginal, hpa) return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err) } setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededGetScale", "the HPA controller was able to get the target's current scale") currentReplicas := scale.Spec.Replicas a.recordInitialRecommendation(currentReplicas, key) var ( metricStatuses []autoscalingv2.MetricStatus metricDesiredReplicas int32 metricName string ) desiredReplicas := int32(0) rescaleReason := "" var minReplicas int32 if hpa.Spec.MinReplicas != nil { minReplicas = *hpa.Spec.MinReplicas } else { // Default value minReplicas = 1 } rescale := true if scale.Spec.Replicas == 0 && minReplicas != 0 { // Autoscaling is disabled for this resource desiredReplicas = 0 rescale = false setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "ScalingDisabled", "scaling is disabled since the replica count of the target is zero") } else if currentReplicas > hpa.Spec.MaxReplicas { rescaleReason = "Current number of replicas above Spec.MaxReplicas" desiredReplicas = hpa.Spec.MaxReplicas } else if currentReplicas < minReplicas { rescaleReason = "Current number of replicas below Spec.MinReplicas" desiredReplicas = minReplicas } else { var metricTimestamp time.Time metricDesiredReplicas, metricName, metricStatuses, metricTimestamp, err = a.computeReplicasForMetrics(hpa, scale, hpa.Spec.Metrics) if err != nil { a.setCurrentReplicasInStatus(hpa, currentReplicas) if err := a.updateStatusIfNeeded(hpaStatusOriginal, hpa); err != nil { utilruntime.HandleError(err) } a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedComputeMetricsReplicas", err.Error()) return fmt.Errorf("failed to compute desired number of replicas based on listed metrics for %s: %v", reference, err) } klog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, metricTimestamp, reference) rescaleMetric := "" if metricDesiredReplicas > desiredReplicas { desiredReplicas = metricDesiredReplicas rescaleMetric = metricName } if desiredReplicas > currentReplicas { rescaleReason = fmt.Sprintf("%s above target", rescaleMetric) } if desiredReplicas < currentReplicas { rescaleReason = "All metrics below target" } if hpa.Spec.Behavior == nil { desiredReplicas = a.normalizeDesiredReplicas(hpa, key, currentReplicas, desiredReplicas, minReplicas) } else { desiredReplicas = a.normalizeDesiredReplicasWithBehaviors(hpa, key, currentReplicas, desiredReplicas, minReplicas) } rescale = desiredReplicas != currentReplicas } if rescale { scale.Spec.Replicas = desiredReplicas _, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(context.TODO(), targetGR, scale, metav1.UpdateOptions{}) if err != nil { a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error()) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err) a.setCurrentReplicasInStatus(hpa, currentReplicas) if err := a.updateStatusIfNeeded(hpaStatusOriginal, hpa); err != nil { utilruntime.HandleError(err) } return fmt.Errorf("failed to rescale %s: %v", reference, err) } setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededRescale", "the HPA controller was able to update the target scale to %d", desiredReplicas) a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason) a.storeScaleEvent(hpa.Spec.Behavior, key, currentReplicas, desiredReplicas) klog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s", hpa.Name, currentReplicas, desiredReplicas, rescaleReason) } else { klog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime) desiredReplicas = currentReplicas } a.setStatus(hpa, currentReplicas, desiredReplicas, metricStatuses, rescale) return a.updateStatusIfNeeded(hpaStatusOriginal, hpa) } // stabilizeRecommendation: // - replaces old recommendation with the newest recommendation, // - returns max of recommendations that are not older than downscaleStabilisationWindow. func (a *HorizontalController) stabilizeRecommendation(key string, prenormalizedDesiredReplicas int32) int32 { maxRecommendation := prenormalizedDesiredReplicas foundOldSample := false oldSampleIndex := 0 cutoff := time.Now().Add(-a.downscaleStabilisationWindow) for i, rec := range a.recommendations[key] { if rec.timestamp.Before(cutoff) { foundOldSample = true oldSampleIndex = i } else if rec.recommendation > maxRecommendation { maxRecommendation = rec.recommendation } } if foundOldSample { a.recommendations[key][oldSampleIndex] = timestampedRecommendation{prenormalizedDesiredReplicas, time.Now()} } else { a.recommendations[key] = append(a.recommendations[key], timestampedRecommendation{prenormalizedDesiredReplicas, time.Now()}) } return maxRecommendation } // normalizeDesiredReplicas takes the metrics desired replicas value and normalizes it based on the appropriate conditions (i.e. < maxReplicas, > // minReplicas, etc...) func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, key string, currentReplicas int32, prenormalizedDesiredReplicas int32, minReplicas int32) int32 { stabilizedRecommendation := a.stabilizeRecommendation(key, prenormalizedDesiredReplicas) if stabilizedRecommendation != prenormalizedDesiredReplicas { setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ScaleDownStabilized", "recent recommendations were higher than current one, applying the highest recent recommendation") } else { setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size") } desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas) if desiredReplicas == stabilizedRecommendation { setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason) } else { setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason) } return desiredReplicas } // NormalizationArg is used to pass all needed information between functions as one structure type NormalizationArg struct { Key string ScaleUpBehavior *autoscalingv2.HPAScalingRules ScaleDownBehavior *autoscalingv2.HPAScalingRules MinReplicas int32 MaxReplicas int32 CurrentReplicas int32 DesiredReplicas int32 } // normalizeDesiredReplicasWithBehaviors takes the metrics desired replicas value and normalizes it: // 1. Apply the basic conditions (i.e. < maxReplicas, > minReplicas, etc...) // 2. Apply the scale up/down limits from the hpaSpec.Behaviors (i.e. add no more than 4 pods) // 3. Apply the constraints period (i.e. add no more than 4 pods per minute) // 4. Apply the stabilization (i.e. add no more than 4 pods per minute, and pick the smallest recommendation during last 5 minutes) func (a *HorizontalController) normalizeDesiredReplicasWithBehaviors(hpa *autoscalingv2.HorizontalPodAutoscaler, key string, currentReplicas, prenormalizedDesiredReplicas, minReplicas int32) int32 { a.maybeInitScaleDownStabilizationWindow(hpa) normalizationArg := NormalizationArg{ Key: key, ScaleUpBehavior: hpa.Spec.Behavior.ScaleUp, ScaleDownBehavior: hpa.Spec.Behavior.ScaleDown, MinReplicas: minReplicas, MaxReplicas: hpa.Spec.MaxReplicas, CurrentReplicas: currentReplicas, DesiredReplicas: prenormalizedDesiredReplicas} stabilizedRecommendation, reason, message := a.stabilizeRecommendationWithBehaviors(normalizationArg) normalizationArg.DesiredReplicas = stabilizedRecommendation if stabilizedRecommendation != prenormalizedDesiredReplicas { // "ScaleUpStabilized" || "ScaleDownStabilized" setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, message) } else { setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size") } desiredReplicas, reason, message := a.convertDesiredReplicasWithBehaviorRate(normalizationArg) if desiredReplicas == stabilizedRecommendation { setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, message) } else { setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, message) } return desiredReplicas } func (a *HorizontalController) maybeInitScaleDownStabilizationWindow(hpa *autoscalingv2.HorizontalPodAutoscaler) { behavior := hpa.Spec.Behavior if behavior != nil && behavior.ScaleDown != nil && behavior.ScaleDown.StabilizationWindowSeconds == nil { stabilizationWindowSeconds := (int32)(a.downscaleStabilisationWindow.Seconds()) hpa.Spec.Behavior.ScaleDown.StabilizationWindowSeconds = &stabilizationWindowSeconds } } // getReplicasChangePerPeriod function find all the replica changes per period func getReplicasChangePerPeriod(periodSeconds int32, scaleEvents []timestampedScaleEvent) int32 { period := time.Second * time.Duration(periodSeconds) cutoff := time.Now().Add(-period) var replicas int32 for _, rec := range scaleEvents { if rec.timestamp.After(cutoff) { replicas += rec.replicaChange } } return replicas } func (a *HorizontalController) getUnableComputeReplicaCountCondition(hpa *autoscalingv2.HorizontalPodAutoscaler, reason string, err error) (condition autoscalingv2.HorizontalPodAutoscalerCondition) { a.eventRecorder.Event(hpa, v1.EventTypeWarning, reason, err.Error()) return autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.ScalingActive, Status: v1.ConditionFalse, Reason: reason, Message: fmt.Sprintf("the HPA was unable to compute the replica count: %v", err), } } // storeScaleEvent stores (adds or replaces outdated) scale event. // outdated events to be replaced were marked as outdated in the `markScaleEventsOutdated` function func (a *HorizontalController) storeScaleEvent(behavior *autoscalingv2.HorizontalPodAutoscalerBehavior, key string, prevReplicas, newReplicas int32) { if behavior == nil { return // we should not store any event as they will not be used } var oldSampleIndex int var longestPolicyPeriod int32 foundOldSample := false if newReplicas > prevReplicas { longestPolicyPeriod = getLongestPolicyPeriod(behavior.ScaleUp) markScaleEventsOutdated(a.scaleUpEvents[key], longestPolicyPeriod) replicaChange := newReplicas - prevReplicas for i, event := range a.scaleUpEvents[key] { if event.outdated { foundOldSample = true oldSampleIndex = i } } newEvent := timestampedScaleEvent{replicaChange, time.Now(), false} if foundOldSample { a.scaleUpEvents[key][oldSampleIndex] = newEvent } else { a.scaleUpEvents[key] = append(a.scaleUpEvents[key], newEvent) } } else { longestPolicyPeriod = getLongestPolicyPeriod(behavior.ScaleDown) markScaleEventsOutdated(a.scaleDownEvents[key], longestPolicyPeriod) replicaChange := prevReplicas - newReplicas for i, event := range a.scaleDownEvents[key] { if event.outdated { foundOldSample = true oldSampleIndex = i } } newEvent := timestampedScaleEvent{replicaChange, time.Now(), false} if foundOldSample { a.scaleDownEvents[key][oldSampleIndex] = newEvent } else { a.scaleDownEvents[key] = append(a.scaleDownEvents[key], newEvent) } } } // stabilizeRecommendationWithBehaviors: // - replaces old recommendation with the newest recommendation, // - returns {max,min} of recommendations that are not older than constraints.Scale{Up,Down}.DelaySeconds func (a *HorizontalController) stabilizeRecommendationWithBehaviors(args NormalizationArg) (int32, string, string) { recommendation := args.DesiredReplicas foundOldSample := false oldSampleIndex := 0 var scaleDelaySeconds int32 var reason, message string var betterRecommendation func(int32, int32) int32 if args.DesiredReplicas >= args.CurrentReplicas { scaleDelaySeconds = *args.ScaleUpBehavior.StabilizationWindowSeconds betterRecommendation = min reason = "ScaleUpStabilized" message = "recent recommendations were lower than current one, applying the lowest recent recommendation" } else { scaleDelaySeconds = *args.ScaleDownBehavior.StabilizationWindowSeconds betterRecommendation = max reason = "ScaleDownStabilized" message = "recent recommendations were higher than current one, applying the highest recent recommendation" } maxDelaySeconds := max(*args.ScaleUpBehavior.StabilizationWindowSeconds, *args.ScaleDownBehavior.StabilizationWindowSeconds) obsoleteCutoff := time.Now().Add(-time.Second * time.Duration(maxDelaySeconds)) cutoff := time.Now().Add(-time.Second * time.Duration(scaleDelaySeconds)) for i, rec := range a.recommendations[args.Key] { if rec.timestamp.After(cutoff) { recommendation = betterRecommendation(rec.recommendation, recommendation) } if rec.timestamp.Before(obsoleteCutoff) { foundOldSample = true oldSampleIndex = i } } if foundOldSample { a.recommendations[args.Key][oldSampleIndex] = timestampedRecommendation{args.DesiredReplicas, time.Now()} } else { a.recommendations[args.Key] = append(a.recommendations[args.Key], timestampedRecommendation{args.DesiredReplicas, time.Now()}) } return recommendation, reason, message } // convertDesiredReplicasWithBehaviorRate performs the actual normalization, given the constraint rate // It doesn't consider the stabilizationWindow, it is done separately func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args NormalizationArg) (int32, string, string) { var possibleLimitingReason, possibleLimitingMessage string if args.DesiredReplicas > args.CurrentReplicas { scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], args.ScaleUpBehavior) if scaleUpLimit < args.CurrentReplicas { // We shouldn't scale up further until the scaleUpEvents will be cleaned up scaleUpLimit = args.CurrentReplicas } maximumAllowedReplicas := args.MaxReplicas if maximumAllowedReplicas > scaleUpLimit { maximumAllowedReplicas = scaleUpLimit possibleLimitingReason = "ScaleUpLimit" possibleLimitingMessage = "the desired replica count is increasing faster than the maximum scale rate" } else { possibleLimitingReason = "TooManyReplicas" possibleLimitingMessage = "the desired replica count is more than the maximum replica count" } if args.DesiredReplicas > maximumAllowedReplicas { return maximumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage } } else if args.DesiredReplicas < args.CurrentReplicas { scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleDownEvents[args.Key], args.ScaleDownBehavior) if scaleDownLimit > args.CurrentReplicas { // We shouldn't scale down further until the scaleDownEvents will be cleaned up scaleDownLimit = args.CurrentReplicas } minimumAllowedReplicas := args.MinReplicas if minimumAllowedReplicas < scaleDownLimit { minimumAllowedReplicas = scaleDownLimit possibleLimitingReason = "ScaleDownLimit" possibleLimitingMessage = "the desired replica count is decreasing faster than the maximum scale rate" } else { possibleLimitingMessage = "the desired replica count is less than the minimum replica count" possibleLimitingReason = "TooFewReplicas" } if args.DesiredReplicas < minimumAllowedReplicas { return minimumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage } } return args.DesiredReplicas, "DesiredWithinRange", "the desired count is within the acceptable range" } // convertDesiredReplicas performs the actual normalization, without depending on `HorizontalController` or `HorizontalPodAutoscaler` func convertDesiredReplicasWithRules(currentReplicas, desiredReplicas, hpaMinReplicas, hpaMaxReplicas int32) (int32, string, string) { var minimumAllowedReplicas int32 var maximumAllowedReplicas int32 var possibleLimitingCondition string var possibleLimitingReason string minimumAllowedReplicas = hpaMinReplicas // Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by // bogus CPU usage report from heapster/kubelet (like in issue #32304). scaleUpLimit := calculateScaleUpLimit(currentReplicas) if hpaMaxReplicas > scaleUpLimit { maximumAllowedReplicas = scaleUpLimit possibleLimitingCondition = "ScaleUpLimit" possibleLimitingReason = "the desired replica count is increasing faster than the maximum scale rate" } else { maximumAllowedReplicas = hpaMaxReplicas possibleLimitingCondition = "TooManyReplicas" possibleLimitingReason = "the desired replica count is more than the maximum replica count" } if desiredReplicas < minimumAllowedReplicas { possibleLimitingCondition = "TooFewReplicas" possibleLimitingReason = "the desired replica count is less than the minimum replica count" return minimumAllowedReplicas, possibleLimitingCondition, possibleLimitingReason } else if desiredReplicas > maximumAllowedReplicas { return maximumAllowedReplicas, possibleLimitingCondition, possibleLimitingReason } return desiredReplicas, "DesiredWithinRange", "the desired count is within the acceptable range" } func calculateScaleUpLimit(currentReplicas int32) int32 { return int32(math.Max(scaleUpLimitFactor*float64(currentReplicas), scaleUpLimitMinimum)) } // markScaleEventsOutdated set 'outdated=true' flag for all scale events that are not used by any HPA object func markScaleEventsOutdated(scaleEvents []timestampedScaleEvent, longestPolicyPeriod int32) { period := time.Second * time.Duration(longestPolicyPeriod) cutoff := time.Now().Add(-period) for i, event := range scaleEvents { if event.timestamp.Before(cutoff) { // outdated scale event are marked for later reuse scaleEvents[i].outdated = true } } } func
(scalingRules *autoscalingv2.HPAScalingRules) int32 { var longestPolicyPeriod int32 for _, policy := range scalingRules.Policies { if policy.PeriodSeconds > longestPolicyPeriod { longestPolicyPeriod = policy.PeriodSeconds } } return longestPolicyPeriod } // calculateScaleUpLimitWithScalingRules returns the maximum number of pods that could be added for the given HPAScalingRules func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 { var result int32 var proposed int32 var selectPolicyFn func(int32, int32) int32 if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect { return currentReplicas // Scaling is disabled } else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect { selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value } else { selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change } for _, policy := range scalingRules.Policies { replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents) periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod if policy.Type == autoscalingv2.PodsScalingPolicy { proposed = int32(periodStartReplicas + policy.Value) } else if policy.Type == autoscalingv2.PercentScalingPolicy { // the proposal has to be rounded up because the proposed change might not increase the replica count causing the target to never scale up proposed = int32(math.Ceil(float64(periodStartReplicas) * (1 + float64(policy.Value)/100))) } result = selectPolicyFn(result, proposed) } return result } // calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 { var result int32 = math.MaxInt32 var proposed int32 var selectPolicyFn func(int32, int32) int32 if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect { return currentReplicas // Scaling is disabled } else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect { selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value } else { selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change } for _, policy := range scalingRules.Policies { replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents) periodStartReplicas := currentReplicas + replicasDeletedInCurrentPeriod if policy.Type == autoscalingv2.PodsScalingPolicy { proposed = periodStartReplicas - policy.Value } else if policy.Type == autoscalingv2.PercentScalingPolicy { proposed = int32(float64(periodStartReplicas) * (1 - float64(policy.Value)/100)) } result = selectPolicyFn(result, proposed) } return result } // scaleForResourceMappings attempts to fetch the scale for the // resource with the given name and namespace, trying each RESTMapping // in turn until a working one is found. If none work, the first error // is returned. It returns both the scale, as well as the group-resource from // the working mapping. func (a *HorizontalController) scaleForResourceMappings(namespace, name string, mappings []*apimeta.RESTMapping) (*autoscalingv1.Scale, schema.GroupResource, error) { var firstErr error for i, mapping := range mappings { targetGR := mapping.Resource.GroupResource() scale, err := a.scaleNamespacer.Scales(namespace).Get(context.TODO(), targetGR, name, metav1.GetOptions{}) if err == nil { return scale, targetGR, nil } // if this is the first error, remember it, // then go on and try other mappings until we find a good one if i == 0 { firstErr = err } } // make sure we handle an empty set of mappings if firstErr == nil { firstErr = fmt.Errorf("unrecognized resource") } return nil, schema.GroupResource{}, firstErr } // setCurrentReplicasInStatus sets the current replica count in the status of the HPA. func (a *HorizontalController) setCurrentReplicasInStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32) { a.setStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentMetrics, false) } // setStatus recreates the status of the given HPA, updating the current and // desired replicas, as well as the metric statuses func (a *HorizontalController) setStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, metricStatuses []autoscalingv2.MetricStatus, rescale bool) { hpa.Status = autoscalingv2.HorizontalPodAutoscalerStatus{ CurrentReplicas: currentReplicas, DesiredReplicas: desiredReplicas, LastScaleTime: hpa.Status.LastScaleTime, CurrentMetrics: metricStatuses, Conditions: hpa.Status.Conditions, } if rescale { now := metav1.NewTime(time.Now()) hpa.Status.LastScaleTime = &now } } // updateStatusIfNeeded calls updateStatus only if the status of the new HPA is not the same as the old status func (a *HorizontalController) updateStatusIfNeeded(oldStatus *autoscalingv2.HorizontalPodAutoscalerStatus, newHPA *autoscalingv2.HorizontalPodAutoscaler) error { // skip a write if we wouldn't need to update if apiequality.Semantic.DeepEqual(oldStatus, &newHPA.Status) { return nil } return a.updateStatus(newHPA) } // updateStatus actually does the update request for the status of the given HPA func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAutoscaler) error { // convert back to autoscalingv1 hpaRaw, err := unsafeConvertToVersionVia(hpa, autoscalingv1.SchemeGroupVersion) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedConvertHPA", err.Error()) return fmt.Errorf("failed to convert the given HPA to %s: %v", autoscalingv2.SchemeGroupVersion.String(), err) } hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler) _, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1, metav1.UpdateOptions{}) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error()) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) } klog.V(2).Infof("Successfully updated status for %s", hpa.Name) return nil } // unsafeConvertToVersionVia is like Scheme.UnsafeConvertToVersion, but it does so via an internal version first. // We use it since working with v2alpha1 is convenient here, but we want to use the v1 client (and // can't just use the internal version). Note that conversion mutates the object, so you need to deepcopy // *before* you call this if the input object came out of a shared cache. func unsafeConvertToVersionVia(obj runtime.Object, externalVersion schema.GroupVersion) (runtime.Object, error) { objInt, err := legacyscheme.Scheme.UnsafeConvertToVersion(obj, schema.GroupVersion{Group: externalVersion.Group, Version: runtime.APIVersionInternal}) if err != nil { return nil, fmt.Errorf("failed to convert the given object to the internal version: %v", err) } objExt, err := legacyscheme.Scheme.UnsafeConvertToVersion(objInt, externalVersion) if err != nil { return nil, fmt.Errorf("failed to convert the given object back to the external version: %v", err) } return objExt, err } // setCondition sets the specific condition type on the given HPA to the specified value with the given reason // and message. The message and args are treated like a format string. The condition will be added if it is // not present. func setCondition(hpa *autoscalingv2.HorizontalPodAutoscaler, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType, status v1.ConditionStatus, reason, message string, args ...interface{}) { hpa.Status.Conditions = setConditionInList(hpa.Status.Conditions, conditionType, status, reason, message, args...) } // setConditionInList sets the specific condition type on the given HPA to the specified value with the given // reason and message. The message and args are treated like a format string. The condition will be added if // it is not present. The new list will be returned. func setConditionInList(inputList []autoscalingv2.HorizontalPodAutoscalerCondition, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType, status v1.ConditionStatus, reason, message string, args ...interface{}) []autoscalingv2.HorizontalPodAutoscalerCondition { resList := inputList var existingCond *autoscalingv2.HorizontalPodAutoscalerCondition for i, condition := range resList { if condition.Type == conditionType { // can't take a pointer to an iteration variable existingCond = &resList[i] break } } if existingCond == nil { resList = append(resList, autoscalingv2.HorizontalPodAutoscalerCondition{ Type: conditionType, }) existingCond = &resList[len(resList)-1] } if existingCond.Status != status { existingCond.LastTransitionTime = metav1.Now() } existingCond.Status = status existingCond.Reason = reason existingCond.Message = fmt.Sprintf(message, args...) return resList } func max(a, b int32) int32 { if a >= b { return a } return b } func min(a, b int32) int32 { if a <= b { return a } return b }
getLongestPolicyPeriod
admin.py
# Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Controllers for the admin view.""" from __future__ import absolute_import from __future__ import unicode_literals import io import logging import random from core import feconf from core import python_utils from core import utils from core.constants import constants from core.controllers import acl_decorators from core.controllers import base from core.controllers import domain_objects_validator as validation_method from core.domain import auth_services from core.domain import blog_services from core.domain import collection_services from core.domain import config_domain from core.domain import config_services from core.domain import email_manager from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services from core.domain import opportunity_services from core.domain import platform_feature_services as feature_services from core.domain import platform_parameter_domain as parameter_domain from core.domain import question_domain from core.domain import question_services from core.domain import recommendations_services from core.domain import rights_manager from core.domain import role_services from core.domain import search_services from core.domain import skill_domain from core.domain import skill_services from core.domain import state_domain from core.domain import stats_services from core.domain import story_domain from core.domain import story_services from core.domain import subtopic_page_domain from core.domain import subtopic_page_services from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services from core.domain import user_services from core.domain import wipeout_service class AdminPage(base.BaseHandler): """Admin page shown in the App Engine admin console.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): """Handles GET requests.""" self.render_template('admin-page.mainpage.html') class AdminHandler(base.BaseHandler): """Handler for the admin page.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': {}, 'POST': { 'action': { 'schema': { 'type': 'basestring', 'choices': [ 'reload_exploration', 'reload_collection', 'generate_dummy_explorations', 'clear_search_index', 'generate_dummy_new_structures_data', 'generate_dummy_new_skill_data', 'save_config_properties', 'revert_config_property', 'upload_topic_similarities', 'regenerate_topic_related_opportunities', 'update_feature_flag_rules' ] }, # TODO(#13331): Remove default_value when it is confirmed that, # for clearing the search indices of exploration & collection # 'action' field must be provided in the payload. 'default_value': None }, 'exploration_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'collection_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'num_dummy_exps_to_generate': { 'schema': { 'type': 'int' }, 'default_value': None }, 'num_dummy_exps_to_publish': { 'schema': { 'type': 'int' }, 'default_value': None }, 'new_config_property_values': { 'schema': { 'type': 'object_dict', 'validation_method': ( validation_method.validate_new_config_property_values) }, 'default_value': None }, 'config_property_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'data': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'topic_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'feature_name': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'commit_message': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'new_rules': { 'schema': { 'type': 'list', 'items': { 'type': 'object_dict', 'object_class': parameter_domain.PlatformParameterRule } }, 'default_value': None } } } @acl_decorators.can_access_admin_page def get(self): """Handles GET requests.""" demo_exploration_ids = list(feconf.DEMO_EXPLORATIONS.keys()) topic_summaries = topic_fetchers.get_all_topic_summaries() topic_summary_dicts = [ summary.to_dict() for summary in topic_summaries] feature_flag_dicts = feature_services.get_all_feature_flag_dicts() config_properties = config_domain.Registry.get_config_property_schemas() # Removes promo-bar related configs as promo-bar is handlded by # release coordinators in /release-coordinator page. del config_properties['promo_bar_enabled'] del config_properties['promo_bar_message'] # Remove blog related configs as they will be handled by 'blog admins' # on blog admin page. del config_properties['max_number_of_tags_assigned_to_blog_post'] del config_properties['list_of_default_tags_for_blog_post'] self.render_json({ 'config_properties': config_properties, 'demo_collections': sorted(feconf.DEMO_COLLECTIONS.items()), 'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.items()), 'demo_exploration_ids': demo_exploration_ids, 'updatable_roles': role_services.UPDATABLE_ROLES, 'viewable_roles': role_services.VIEWABLE_ROLES, 'human_readable_roles': role_services.HUMAN_READABLE_ROLES, 'role_to_actions': role_services.get_role_actions(), 'topic_summaries': topic_summary_dicts, 'feature_flags': feature_flag_dicts, }) @acl_decorators.can_access_admin_page def post(self): """Handles POST requests.""" action = self.normalized_payload.get('action') try: result = {} if action == 'reload_exploration': exploration_id = self.normalized_payload.get('exploration_id') self._reload_exploration(exploration_id) elif action == 'reload_collection': collection_id = self.normalized_payload.get('collection_id') self._reload_collection(collection_id) elif action == 'generate_dummy_explorations': num_dummy_exps_to_generate = self.normalized_payload.get( 'num_dummy_exps_to_generate') num_dummy_exps_to_publish = self.normalized_payload.get( 'num_dummy_exps_to_publish') if num_dummy_exps_to_generate < num_dummy_exps_to_publish: raise self.InvalidInputException( 'Generate count cannot be less than publish count') else: self._generate_dummy_explorations( num_dummy_exps_to_generate, num_dummy_exps_to_publish) elif action == 'clear_search_index': search_services.clear_collection_search_index() search_services.clear_exploration_search_index() elif action == 'generate_dummy_new_structures_data': self._load_dummy_new_structures_data() elif action == 'generate_dummy_new_skill_data': self._generate_dummy_skill_and_questions() elif action == 'save_config_properties': new_config_property_values = self.normalized_payload.get( 'new_config_property_values') logging.info( '[ADMIN] %s saved config property values: %s' % (self.user_id, new_config_property_values)) for (name, value) in new_config_property_values.items(): config_services.set_property(self.user_id, name, value) elif action == 'revert_config_property': config_property_id = self.normalized_payload.get( 'config_property_id') logging.info( '[ADMIN] %s reverted config property: %s' % (self.user_id, config_property_id)) config_services.revert_property( self.user_id, config_property_id) elif action == 'upload_topic_similarities': data = self.normalized_payload.get('data') recommendations_services.update_topic_similarities(data) elif action == 'regenerate_topic_related_opportunities': topic_id = self.normalized_payload.get('topic_id') opportunities_count = ( opportunity_services .regenerate_opportunities_related_to_topic( topic_id, delete_existing_opportunities=True)) result = { 'opportunities_count': opportunities_count } elif action == 'update_feature_flag_rules': feature_name = self.normalized_payload.get('feature_name') new_rule_dicts = self.normalized_payload.get('new_rules') commit_message = self.normalized_payload.get('commit_message') try: feature_services.update_feature_flag_rules( feature_name, self.user_id, commit_message, new_rule_dicts) except ( utils.ValidationError, feature_services.FeatureFlagNotFoundException) as e: raise self.InvalidInputException(e) logging.info( '[ADMIN] %s updated feature %s with new rules: ' '%s.' % (self.user_id, feature_name, new_rule_dicts)) self.render_json(result) except Exception as e: logging.exception('[ADMIN] %s', e) self.render_json({'error': python_utils.UNICODE(e)}) python_utils.reraise_exception() def _reload_exploration(self, exploration_id): """Reloads the exploration in dev_mode corresponding to the given exploration id. Args: exploration_id: str. The exploration id. Raises: Exception. Cannot reload an exploration in production. """ if constants.DEV_MODE: logging.info( '[ADMIN] %s reloaded exploration %s' % (self.user_id, exploration_id)) exp_services.load_demo(python_utils.UNICODE(exploration_id)) rights_manager.release_ownership_of_exploration( user_services.get_system_user(), python_utils.UNICODE(exploration_id)) else: raise Exception('Cannot reload an exploration in production.') def _create_dummy_question( self, question_id, question_content, linked_skill_ids): """Creates a dummy question object with the given question ID. Args: question_id: str. The ID of the question to be created. question_content: str. The question content. linked_skill_ids: list(str). The IDs of the skills to which the question is linked to. Returns: Question. The dummy question with given values. """ state = state_domain.State.create_default_state( 'ABC', is_initial_state=True) state.update_interaction_id('TextInput') state.update_interaction_customization_args({ 'placeholder': { 'value': { 'content_id': 'ca_placeholder_0', 'unicode_str': '' } }, 'rows': {'value': 1} }) state.update_next_content_id_index(1) state.update_linked_skill_id(None) state.update_content(state_domain.SubtitledHtml('1', question_content)) recorded_voiceovers = state_domain.RecordedVoiceovers({}) written_translations = state_domain.WrittenTranslations({}) recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0') recorded_voiceovers.add_content_id_for_voiceover('1') recorded_voiceovers.add_content_id_for_voiceover('default_outcome') written_translations.add_content_id_for_translation('ca_placeholder_0') written_translations.add_content_id_for_translation('1') written_translations.add_content_id_for_translation('default_outcome') state.update_recorded_voiceovers(recorded_voiceovers) state.update_written_translations(written_translations) solution = state_domain.Solution( 'TextInput', False, 'Solution', state_domain.SubtitledHtml( 'solution', '<p>This is a solution.</p>')) hints_list = [ state_domain.Hint( state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>') ) ] state.update_interaction_solution(solution) state.update_interaction_hints(hints_list) state.update_interaction_default_outcome( state_domain.Outcome( None, state_domain.SubtitledHtml( 'feedback_id', '<p>Dummy Feedback</p>'), True, [], None, None ) ) question = question_domain.Question( question_id, state, feconf.CURRENT_STATE_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, []) return question def _create_dummy_skill(self, skill_id, skill_description, explanation): """Creates a dummy skill object with the given values. Args: skill_id: str. The ID of the skill to be created. skill_description: str. The description of the skill. explanation: str. The review material for the skill. Returns: Skill. The dummy skill with given values. """ rubrics = [ skill_domain.Rubric( constants.SKILL_DIFFICULTIES[0], ['Explanation 1']), skill_domain.Rubric( constants.SKILL_DIFFICULTIES[1], ['Explanation 2']), skill_domain.Rubric( constants.SKILL_DIFFICULTIES[2], ['Explanation 3'])] skill = skill_domain.Skill.create_default_skill( skill_id, skill_description, rubrics) skill.update_explanation(state_domain.SubtitledHtml('1', explanation)) return skill def _load_dummy_new_structures_data(self): """Loads the database with two topics (one of which is empty), a story and three skills in the topic (two of them in a subtopic) and a question attached to each skill. Raises: Exception. Cannot load new structures data in production mode. Exception. User does not have enough rights to generate data. """ if constants.DEV_MODE: if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: raise Exception( 'User does not have enough rights to generate data.') topic_id_1 = topic_fetchers.get_new_topic_id() topic_id_2 = topic_fetchers.get_new_topic_id() story_id = story_services.get_new_story_id() skill_id_1 = skill_services.get_new_skill_id() skill_id_2 = skill_services.get_new_skill_id() skill_id_3 = skill_services.get_new_skill_id() question_id_1 = question_services.get_new_question_id() question_id_2 = question_services.get_new_question_id() question_id_3 = question_services.get_new_question_id() skill_1 = self._create_dummy_skill( skill_id_1, 'Dummy Skill 1', '<p>Dummy Explanation 1</p>') skill_2 = self._create_dummy_skill( skill_id_2, 'Dummy Skill 2', '<p>Dummy Explanation 2</p>') skill_3 = self._create_dummy_skill( skill_id_3, 'Dummy Skill 3', '<p>Dummy Explanation 3</p>') question_1 = self._create_dummy_question( question_id_1, 'Question 1', [skill_id_1]) question_2 = self._create_dummy_question( question_id_2, 'Question 2', [skill_id_2]) question_3 = self._create_dummy_question( question_id_3, 'Question 3', [skill_id_3]) question_services.add_question(self.user_id, question_1) question_services.add_question(self.user_id, question_2) question_services.add_question(self.user_id, question_3) question_services.create_new_question_skill_link( self.user_id, question_id_1, skill_id_1, 0.3) question_services.create_new_question_skill_link( self.user_id, question_id_2, skill_id_2, 0.5) question_services.create_new_question_skill_link( self.user_id, question_id_3, skill_id_3, 0.7) topic_1 = topic_domain.Topic.create_default_topic( topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description') topic_2 = topic_domain.Topic.create_default_topic( topic_id_2, 'Empty Topic', 'empty-topic', 'description') topic_1.add_canonical_story(story_id) topic_1.add_uncategorized_skill_id(skill_id_1) topic_1.add_uncategorized_skill_id(skill_id_2) topic_1.add_uncategorized_skill_id(skill_id_3) topic_1.add_subtopic(1, 'Dummy Subtopic Title') topic_1.move_skill_id_to_subtopic(None, 1, skill_id_2) topic_1.move_skill_id_to_subtopic(None, 1, skill_id_3) subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, topic_id_1)) # These explorations were chosen since they pass the validations # for published stories. self._reload_exploration('15') self._reload_exploration('25') self._reload_exploration('13') exp_services.update_exploration( self.user_id, '15', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') exp_services.update_exploration( self.user_id, '25', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') exp_services.update_exploration( self.user_id, '13', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') story = story_domain.Story.create_default_story( story_id, 'Help Jaime win the Arcade', 'Description', topic_id_1, 'help-jamie-win-arcade') story_node_dicts = [{ 'exp_id': '15', 'title': 'What are the place values?', 'description': 'Jaime learns the place value of each digit ' + 'in a big number.' }, { 'exp_id': '25', 'title': 'Finding the value of a number', 'description': 'Jaime understands the value of his ' + 'arcade score.' }, { 'exp_id': '13', 'title': 'Comparing Numbers', 'description': 'Jaime learns if a number is smaller or ' + 'greater than another number.' }] def generate_dummy_story_nodes(node_id, exp_id, title, description): """Generates and connects sequential story nodes. Args: node_id: int. The node id. exp_id: str. The exploration id. title: str. The title of the story node. description: str. The description of the story node. """ story.add_node( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), title) story.update_node_description( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), description) story.update_node_exploration_id( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), exp_id) if node_id != len(story_node_dicts): story.update_node_destination_node_ids( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), ['%s%d' % (story_domain.NODE_ID_PREFIX, node_id + 1)]) exp_services.update_exploration( self.user_id, exp_id, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'category', 'new_value': 'Astronomy' })], 'Change category') for i, story_node_dict in enumerate(story_node_dicts): generate_dummy_story_nodes(i + 1, **story_node_dict) skill_services.save_new_skill(self.user_id, skill_1) skill_services.save_new_skill(self.user_id, skill_2) skill_services.save_new_skill(self.user_id, skill_3) story_services.save_new_story(self.user_id, story) topic_services.save_new_topic(self.user_id, topic_1) topic_services.save_new_topic(self.user_id, topic_2) subtopic_page_services.save_subtopic_page( self.user_id, subtopic_page, 'Added subtopic', [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, 'title': 'Dummy Subtopic Title' })] ) # Generates translation opportunities for the Contributor Dashboard. exp_ids_in_story = story.story_contents.get_all_linked_exp_ids() opportunity_services.add_new_exploration_opportunities( story_id, exp_ids_in_story) topic_services.publish_story(topic_id_1, story_id, self.user_id) else: raise Exception('Cannot load new structures data in production.') def _generate_dummy_skill_and_questions(self): """Generate and loads the database with a skill and 15 questions linked to the skill. Raises: Exception. Cannot load new structures data in production mode. Exception. User does not have enough rights to generate data. """ if constants.DEV_MODE: if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: raise Exception( 'User does not have enough rights to generate data.') skill_id = skill_services.get_new_skill_id() skill_name = 'Dummy Skill %s' % python_utils.UNICODE( random.getrandbits(32)) skill = self._create_dummy_skill( skill_id, skill_name, '<p>Dummy Explanation 1</p>') skill_services.save_new_skill(self.user_id, skill) for i in range(15): question_id = question_services.get_new_question_id() question_name = 'Question number %s %s' % ( python_utils.UNICODE(i), skill_name) question = self._create_dummy_question( question_id, question_name, [skill_id]) question_services.add_question(self.user_id, question) question_difficulty = list( constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values()) random_difficulty = random.choice(question_difficulty) question_services.create_new_question_skill_link( self.user_id, question_id, skill_id, random_difficulty) else: raise Exception('Cannot generate dummy skills in production.') def _reload_collection(self, collection_id): """Reloads the collection in dev_mode corresponding to the given collection id. Args: collection_id: str. The collection id. Raises: Exception. Cannot reload a collection in production. """ if constants.DEV_MODE: logging.info( '[ADMIN] %s reloaded collection %s' % (self.user_id, collection_id)) collection_services.load_demo(collection_id) rights_manager.release_ownership_of_collection( user_services.get_system_user(), collection_id) else: raise Exception('Cannot reload a collection in production.') def _generate_dummy_explorations( self, num_dummy_exps_to_generate, num_dummy_exps_to_publish): """Generates and publishes the given number of dummy explorations. Args: num_dummy_exps_to_generate: int. Count of dummy explorations to be generated. num_dummy_exps_to_publish: int. Count of explorations to be published. Raises: Exception. Environment is not DEVMODE. """ if constants.DEV_MODE: logging.info( '[ADMIN] %s generated %s number of dummy explorations' % (self.user_id, num_dummy_exps_to_generate)) possible_titles = ['Hulk Neuroscience', 'Quantum Starks', 'Wonder Anatomy', 'Elvish, language of "Lord of the Rings', 'The Science of Superheroes'] exploration_ids_to_publish = [] for i in range(num_dummy_exps_to_generate): title = random.choice(possible_titles) category = random.choice(constants.SEARCH_DROPDOWN_CATEGORIES) new_exploration_id = exp_fetchers.get_new_exploration_id() exploration = exp_domain.Exploration.create_default_exploration( new_exploration_id, title=title, category=category, objective='Dummy Objective') exp_services.save_new_exploration(self.user_id, exploration) if i <= num_dummy_exps_to_publish - 1: exploration_ids_to_publish.append(new_exploration_id) rights_manager.publish_exploration( self.user, new_exploration_id) exp_services.index_explorations_given_ids( exploration_ids_to_publish) else: raise Exception('Cannot generate dummy explorations in production.') class AdminRoleHandler(base.BaseHandler): """Handler for roles tab of admin page. Used to view and update roles.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'filter_criterion': { 'schema': { 'type': 'basestring', 'choices': [ feconf.USER_FILTER_CRITERION_ROLE, feconf.USER_FILTER_CRITERION_USERNAME ] } }, 'role': { 'schema': { 'type': 'basestring', 'choices': role_services.VIEWABLE_ROLES }, 'default_value': None }, 'username': { 'schema': { 'type': 'basestring' }, 'default_value': None } }, 'PUT': { 'role': { 'schema': { 'type': 'basestring', 'choices': feconf.ALLOWED_USER_ROLES } }, 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'role': { 'schema': { 'type': 'basestring', 'choices': feconf.ALLOWED_USER_ROLES } }, 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def get(self): filter_criterion = self.normalized_request.get( 'filter_criterion') if filter_criterion == feconf.USER_FILTER_CRITERION_ROLE: role = self.normalized_request.get( feconf.USER_FILTER_CRITERION_ROLE) role_services.log_role_query( self.user_id, feconf.ROLE_ACTION_VIEW_BY_ROLE, role=role) self.render_json({ 'usernames': user_services.get_usernames_by_role(role) }) elif filter_criterion == feconf.USER_FILTER_CRITERION_USERNAME: username = self.normalized_request.get( feconf.USER_FILTER_CRITERION_USERNAME) user_id = user_services.get_user_id_from_username(username) role_services.log_role_query( self.user_id, feconf.ROLE_ACTION_VIEW_BY_USERNAME, username=username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') user_settings = user_services.get_user_settings(user_id) user_roles = user_settings.roles managed_topic_ids = [] if feconf.ROLE_ID_TOPIC_MANAGER in user_roles: managed_topic_ids = [ rights.id for rights in topic_fetchers.get_topic_rights_with_user(user_id)] user_roles_dict = { 'roles': user_roles, 'managed_topic_ids': managed_topic_ids, 'banned': user_settings.banned } self.render_json(user_roles_dict) @acl_decorators.can_access_admin_page def put(self): username = self.payload.get('username') role = self.payload.get('role') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException( 'User with given username does not exist.') if role == feconf.ROLE_ID_TOPIC_MANAGER: # The Topic manager role assignment is handled via # TopicManagerRoleHandler. raise self.InvalidInputException( 'Unsupported role for this handler.') user_services.add_user_role(user_settings.user_id, role) self.render_json({}) @acl_decorators.can_access_admin_page def delete(self): username = self.request.get('username') role = self.request.get('role') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') if role == feconf.ROLE_ID_TOPIC_MANAGER: topic_services.deassign_user_from_all_topics(self.user, user_id) user_services.remove_user_role(user_id, role) self.render_json({}) class TopicManagerRoleHandler(base.BaseHandler): """Handler to assign or deassigning manager to a topic.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } }, 'action': { 'schema': { 'type': 'basestring', 'choices': ['assign', 'deassign'] } }, 'topic_id': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): username = self.normalized_payload.get('username') action = self.normalized_payload.get('action') topic_id = self.normalized_payload.get('topic_id') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException( 'User with given username does not exist.') user_id = user_settings.user_id if action == 'assign': if not feconf.ROLE_ID_TOPIC_MANAGER in user_settings.roles: user_services.add_user_role( user_id, feconf.ROLE_ID_TOPIC_MANAGER) topic_manager = user_services.get_user_actions_info(user_id) topic_services.assign_role( user_services.get_system_user(), topic_manager, topic_domain.ROLE_MANAGER, topic_id) elif action == 'deassign': topic_services.deassign_manager_role_from_topic( user_services.get_system_user(), user_id, topic_id) if not topic_fetchers.get_topic_rights_with_user(user_id): user_services.remove_user_role( user_id, feconf.ROLE_ID_TOPIC_MANAGER) self.render_json({}) class BannedUsersHandler(base.BaseHandler): """Handler to ban and unban users.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): username = self.normalized_payload.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') topic_services.deassign_user_from_all_topics(self.user, user_id) user_services.mark_user_banned(user_id) self.render_json({}) @acl_decorators.can_access_admin_page def delete(self): username = self.normalized_request.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') user_services.unmark_user_banned(user_id) self.render_json({}) class AdminSuperAdminPrivilegesHandler(base.BaseHandler): """Handler for granting a user super admin privileges.""" PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): if self.email != feconf.ADMIN_EMAIL_ADDRESS: raise self.UnauthorizedUserException( 'Only the default system admin can manage super admins') username = self.normalized_payload.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException('No such user exists') auth_services.grant_super_admin_privileges(user_id) self.render_json(self.values) @acl_decorators.can_access_admin_page def delete(self): if self.email != feconf.ADMIN_EMAIL_ADDRESS: raise self.UnauthorizedUserException( 'Only the default system admin can manage super admins') username = self.normalized_request.get('username') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException('No such user exists') if user_settings.email == feconf.ADMIN_EMAIL_ADDRESS: raise self.InvalidInputException( 'Cannot revoke privileges from the default super admin account') auth_services.revoke_super_admin_privileges(user_settings.user_id) self.render_json(self.values) class AdminTopicsCsvFileDownloader(base.BaseHandler): """Retrieves topic similarity data for download.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_DOWNLOADABLE URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): topic_similarities = ( recommendations_services.get_topic_similarities_as_csv() ) # Downloadable file accepts only bytes, so we need to encode # topic_similarities to bytes. self.render_downloadable_file( io.BytesIO(topic_similarities.encode('utf-8')), 'topic_similarities.csv', 'text/csv' ) class DataExtractionQueryHandler(base.BaseHandler): """Handler for data extraction query.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'exp_id': { 'schema': { 'type': 'basestring' } }, 'exp_version': { 'schema': { 'type': 'int' } }, 'state_name': { 'schema': { 'type': 'basestring' } }, 'num_answers': { 'schema': { 'type': 'int' } } } } @acl_decorators.can_access_admin_page def get(self): exp_id = self.normalized_request.get('exp_id') exp_version = self.normalized_request.get('exp_version') exploration = exp_fetchers.get_exploration_by_id( exp_id, strict=False, version=exp_version) if exploration is None: raise self.InvalidInputException( 'Entity for exploration with id %s and version %s not found.' % (exp_id, exp_version)) state_name = self.normalized_request.get('state_name') num_answers = self.normalized_request.get('num_answers') if state_name not in exploration.states: raise self.InvalidInputException( 'Exploration \'%s\' does not have \'%s\' state.' % (exp_id, state_name)) state_answers = stats_services.get_state_answers( exp_id, exp_version, state_name) extracted_answers = state_answers.get_submitted_answer_dict_list() if num_answers > 0: extracted_answers = extracted_answers[:num_answers] response = { 'data': extracted_answers } self.render_json(response) class SendDummyMailToAdminHandler(base.BaseHandler): """This function handles sending test emails.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'POST': {}} @acl_decorators.can_access_admin_page def post(self): username = self.username if feconf.CAN_SEND_EMAILS: email_manager.send_dummy_mail_to_admin(username) self.render_json({}) else: raise self.InvalidInputException('This app cannot send emails.') class UpdateUsernameHandler(base.BaseHandler): """Handler for renaming usernames.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'old_username': { 'schema': { 'type': 'basestring' } }, 'new_username': { 'schema': { 'type': 'basestring', 'validators': [{ 'id': 'has_length_at_most', 'max_value': constants.MAX_USERNAME_LENGTH }] } } } } @acl_decorators.can_access_admin_page def put(self): old_username = self.normalized_payload.get('old_username') new_username = self.normalized_payload.get('new_username') user_id = user_services.get_user_id_from_username(old_username) if user_id is None: raise self.InvalidInputException( 'Invalid username: %s' % old_username) if user_services.is_username_taken(new_username): raise self.InvalidInputException('Username already taken.') user_services.set_username(user_id, new_username) user_services.log_username_change( self.user_id, old_username, new_username) self.render_json({}) class NumberOfDeletionRequestsHandler(base.BaseHandler): """Handler for getting the number of pending deletion requests via admin page. """ GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): self.render_json({ 'number_of_pending_deletion_models': ( wipeout_service.get_number_of_pending_deletion_requests()) }) class VerifyUserModelsDeletedHandler(base.BaseHandler): """Handler for getting whether any models exist for specific user ID.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'user_id': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def get(self): user_id = self.normalized_request.get('user_id') user_is_deleted = wipeout_service.verify_user_deleted( user_id, include_delete_at_end_models=True) self.render_json({'related_models_exist': not user_is_deleted}) class DeleteUserHandler(base.BaseHandler): """Handler for deleting a user with specific ID.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'DELETE': { 'user_id': { 'schema': { 'type': 'basestring' } }, 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_delete_any_user def delete(self): user_id = self.normalized_request.get('user_id') username = self.normalized_request.get('username') user_id_from_username = ( user_services.get_user_id_from_username(username)) if user_id_from_username is None: raise self.InvalidInputException( 'The username doesn\'t belong to any user' ) if user_id_from_username != user_id: raise self.InvalidInputException( 'The user ID retrieved from the username and ' 'the user ID provided by admin differ.' ) wipeout_service.pre_delete_user(user_id) self.render_json({'success': True}) class UpdateBlogPostHandler(base.BaseHandler): """Handler for changing author ids and published on date in blog posts.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'blog_post_id': { 'schema': { 'type': 'basestring' } }, 'author_username': { 'schema': { 'type': 'basestring', 'validators': [{ 'id': 'has_length_at_most', 'max_value': constants.MAX_USERNAME_LENGTH }] } }, 'published_on': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def
(self): blog_post_id = self.normalized_payload.get('blog_post_id') author_username = self.normalized_payload.get('author_username') published_on = self.normalized_payload.get('published_on') author_id = user_services.get_user_id_from_username(author_username) if author_id is None: raise self.InvalidInputException( 'Invalid username: %s' % author_username) user_actions = user_services.get_user_actions_info(author_id).actions if role_services.ACTION_ACCESS_BLOG_DASHBOARD not in user_actions: raise self.InvalidInputException( 'User does not have enough rights to be blog post author.') blog_post = ( blog_services.get_blog_post_by_id(blog_post_id, strict=False)) if blog_post is None: raise self.PageNotFoundException( Exception( 'The blog post with the given id or url doesn\'t exist.')) blog_services.update_blog_models_author_and_published_on_date( blog_post_id, author_id, published_on) self.render_json({})
put
find_major.py
#!/usr/bin/env python ''' Input: An array of integers.
''' from __future__ import print_function from collections import Counter given_array = [2,2,3,7,5,7,7,7,4,7,2,7,4,5,6,7,7,8,6,7,7,8,10,12,29,30,19,10,7,7,7,7,7,7,7,7,7] def find_major(array): counted = Counter(array) return counted.most_common(1)[0][0] print(find_major(given_array))
Output: The single integer that occurs most often.
parts.go
package dub import ( "io" "mime/multipart" "os" "path/filepath" ) type Part interface { Build(*multipart.Writer) error } type FieldPart struct { param, value string } func (f *FieldPart) Build(w *multipart.Writer) error { return w.WriteField(f.param, f.value) } func NewFieldPart(param, value string) *FieldPart { return &FieldPart{param: param, value: value} } type FilePart struct { param, filepath string } func (f *FilePart) Build(w *multipart.Writer) (err error) { var file *os.File var part io.Writer if file, err = os.Open(f.filepath); err != nil { return } defer file.Close() if part, err = w.CreateFormFile(f.param, filepath.Base(f.filepath)); err != nil { return } _, err = io.Copy(part, file) return } func
(param, path string) *FilePart { return &FilePart{param: param, filepath: path} } type StreamPart struct { param, filename string data io.Reader } func (s *StreamPart) Build(w *multipart.Writer) (err error) { var part io.Writer if c, ok := s.data.(io.Closer); ok { defer c.Close() } if part, err = w.CreateFormFile(s.param, s.filename); err != nil { return } _, err = io.Copy(part, s.data) return } func NewStreamPart(param, filename string, data io.Reader) *StreamPart { return &StreamPart{param: param, filename: filename, data: data} }
NewFilePart
plugin.py
# -*- encoding: utf-8 -*- from __future__ import division, print_function, unicode_literals import objc from GlyphsApp import * from GlyphsApp.plugins import * if int(Glyphs.versionNumber) == 3: from GlyphsApp import GSMouseOverButton, GSScriptingHandler from AppKit import ( NSButton, NSMiniControlSize, NSShadowlessSquareBezelStyle, NSCircularBezelStyle, NSLayoutConstraint, NSLayoutAttributeHeight, NSLayoutAttributeWidth, NSLayoutAttributeTop, NSLayoutAttributeLeading, NSLayoutAttributeTrailing, NSLayoutAttributeBottom, NSLayoutRelationEqual, NSLineBreakByTruncatingTail, NSLayoutConstraintOrientationHorizontal ) import re import io import os try: scriptsPath = ( GSGlyphsInfo.applicationSupportPath() + "/Scripts" ) # Glyphs 3 except: scriptsPath = ( GSGlyphsInfo.applicationSupportFolder() + "/Scripts" ) # Glyphs 2 button_height = 14 button_gap = 4 defaultsName = "com.ViktorRubenko.FastScripts.button_scripts" notificationName = "com.ViktorRubenko.FastScripts.reload" def newButton(frame, title, action, target): new_button = NSButton.alloc().initWithFrame_(frame) new_button.setBezelStyle_(NSShadowlessSquareBezelStyle) new_button.setControlSize_(NSMiniControlSize) new_button.setTitle_(title) new_button.setAction_(action) new_button.setTarget_(target) new_button.setTranslatesAutoresizingMaskIntoConstraints_(False) constraint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( new_button, NSLayoutAttributeHeight, NSLayoutRelationEqual, None, 0, 1.0, button_height, ) new_button.addConstraint_(constraint) new_button.setContentCompressionResistancePriority_forOrientation_(100, NSLayoutConstraintOrientationHorizontal) return new_button def removeButton(frame, imageName, action, target): if int(Glyphs.versionNumber) == 2: new_button = NSButton.alloc().initWithFrame_(frame) else: new_button = GSMouseOverButton.alloc().initWithFrame_(frame) new_button.setBezelStyle_(NSCircularBezelStyle) new_button.setBordered_(False) new_button.setImage_(NSImage.imageNamed_(imageName)) new_button.setControlSize_(NSMiniControlSize) new_button.setTitle_("") new_button.setAction_(action) new_button.setTarget_(target) new_button.setTranslatesAutoresizingMaskIntoConstraints_(False) constraint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( new_button, NSLayoutAttributeHeight, NSLayoutRelationEqual, None, 0, 1.0, 18, ) new_button.addConstraint_(constraint) constraint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( new_button, NSLayoutAttributeWidth, NSLayoutRelationEqual, None, 0, 1.0, 18, ) new_button.addConstraint_(constraint) return new_button class FastScripts(PalettePlugin): @objc.python_method def settings(self): self.name = Glyphs.localize({"en": "FastScripts"}) self.button_scripts = [] self.dialog = NSView.alloc().initWithFrame_(NSMakeRect(0, 0, 150, 100)) self.dialog.setTranslatesAutoresizingMaskIntoConstraints_(False) self.heightConstraint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( self.dialog, NSLayoutAttributeHeight, NSLayoutRelationEqual, None, 0, 1.0, 0, ) self.dialog.addConstraint_(self.heightConstraint) self.buttonContainer = NSView.alloc().initWithFrame_( NSMakeRect(0, 15, 150, 85) ) self.buttonContainer.setTranslatesAutoresizingMaskIntoConstraints_( False ) self.dialog.addSubview_(self.buttonContainer) constaint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( self.dialog, NSLayoutAttributeTop, NSLayoutRelationEqual, self.buttonContainer, NSLayoutAttributeTop, 1.0, 0, ) self.dialog.addConstraint_(constaint) constaint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( self.dialog, NSLayoutAttributeLeading, NSLayoutRelationEqual, self.buttonContainer, NSLayoutAttributeLeading, 1.0, 0, ) self.dialog.addConstraint_(constaint) constaint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( self.dialog, NSLayoutAttributeTrailing, NSLayoutRelationEqual, self.buttonContainer, NSLayoutAttributeTrailing, 1.0, 0, ) self.dialog.addConstraint_(constaint) constaint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( self.dialog, NSLayoutAttributeBottom, NSLayoutRelationEqual, self.buttonContainer, NSLayoutAttributeBottom, 1.0, 15, ) self.dialog.addConstraint_(constaint) self.add_button = removeButton( NSMakeRect(8, 0, 18, 18), "NSAddTemplate", self.addScript_, self, ) self.dialog.addSubview_(self.add_button) self.setupButtons_() NSNotificationCenter.defaultCenter().addObserver_selector_name_object_( self, self.setupButtons_, notificationName, None ) def __del__(self): NSNotificationCenter.defaultCenter().removeObserver_name_object_( self, notificationName, None ) def
(self, notification=None): self.load_data() button_start = 0 quantity = len(self.button_scripts) width, height = 160, quantity * (button_height + button_gap) self.heightConstraint.setConstant_(height + 15) if quantity == 0: return self.buttonContainer.setSubviews_([]) for button_script in self.button_scripts: script_button = newButton( NSMakeRect( 8, height - button_start - button_height, width - 26, button_height, ), "_", self.runScriptCallback_, self, ) self.init_button(script_button, button_script) script_button.setLineBreakMode_(NSLineBreakByTruncatingTail) self.buttonContainer.addSubview_(script_button) remove_button = removeButton( NSMakeRect(width - 16, height - button_start - 17, 18, 18), "NSRemoveTemplate", self.removeScriptCallback_, self, ) remove_button.setRepresentedObject_(button_script) self.buttonContainer.addSubview_(remove_button) constaint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( script_button, NSLayoutAttributeLeading, NSLayoutRelationEqual, self.buttonContainer, NSLayoutAttributeLeading, 1.0, 8, ) self.buttonContainer.addConstraint_(constaint) constaint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( script_button, NSLayoutAttributeTrailing, NSLayoutRelationEqual, remove_button, NSLayoutAttributeLeading, 1.0, -2, ) self.buttonContainer.addConstraint_(constaint) constaint = NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_( remove_button, NSLayoutAttributeTrailing, NSLayoutRelationEqual, self.buttonContainer, NSLayoutAttributeTrailing, 1.0, -8, ) self.buttonContainer.addConstraint_(constaint) button_start += button_height + button_gap self.dialog.invalidateIntrinsicContentSize() @objc.python_method def load_data(self): if Glyphs.defaults[defaultsName]: self.button_scripts = list( sp for sp in Glyphs.defaults[defaultsName] if os.path.exists(sp) ) @objc.python_method def save_data(self): Glyphs.defaults[defaultsName] = self.button_scripts @objc.python_method def dataHasChanged(self): self.save_data() NSNotificationCenter.defaultCenter().postNotificationName_object_( notificationName, None ) def runScriptCallback_(self, button): if int(Glyphs.versionNumber) == 3: scriptPath = button.representedObject() scriptHandler = GSScriptingHandler.alloc() scriptHandler.runMacroFile_(scriptPath) else: code = button.representedObject() exec(code, globals()) def removeScriptCallback_(self, button): self.button_scripts.remove(button.representedObject()) self.dataHasChanged() def addScript_(self, sender): try: filepaths = GetOpenFile( path=scriptsPath, filetypes=["py"], allowsMultipleSelection=True, ) except: import traceback print(traceback.format_exc()) if not filepaths or len(filepaths) == 0: return self.button_scripts.extend(filepaths) self.dataHasChanged() @objc.python_method def init_button(self, button, script_path): with io.open(script_path, "r", encoding="utf-8") as f: code = f.read() menu_title = re.findall( r"^#\s*MenuTitle:\s*(.*)", code, flags=re.IGNORECASE ) if not menu_title: return if int(Glyphs.versionNumber) == 2: code = code.splitlines() main_code = False for line_index, line in enumerate(code): if line.startswith("#") and "utf" in line: code[line_index] = "" continue if "__main__" in line: code[line_index] = "" main_code = True continue if main_code: if line.startswith("\t"): rep = "\t" else: rep = " " code[line_index] = line.replace(rep, "", 1) code = "\n".join(code) button.setRepresentedObject_(code) else: button.setRepresentedObject_(script_path) menu_title = menu_title[0] button.setTitle_(menu_title)
setupButtons_
models.rs
use super::schema::Users; #[derive(Queryable)] pub struct User { pub id: i32, pub name: String, pub email: String, pub age: i32, } #[derive(Insertable)] #[table_name="Users"] pub struct
<'a> { pub name: &'a str, pub email: &'a str, pub age: &'a i32, }
NewUser
sortByWeather.py
#clothes by weather import random def pickTop(clothesList): return random.choice(clothesList[0]) def pickBottoms(clothesList): return random.choice(clothesList[1]) #sorts clothes into weather type and returns a list of clothes of the correct weather def sortWeather(clothesList, weather): #eventually combine the two loops ''' for i in range(0,len(clothesList)): for j in range(0, len(clothesList[i]): ''' #change to switch later #go through tops i=0 for top in clothesList[0]: if top[2] != weather: clothesList[0].pop(i) i+=1 #go through bottoms i=0 for bottom in clothesList[1]: if bottom[2] != weather: clothesList[1].pop(i) i+=1 return clothesList #Asks user for their weather choice def requestWeather(clothesList):
weather = input("Is the weather hot or cold?\n") clothesList = sortWeather(clothesList, weather) finalChoice = [] finalChoice.append(pickTop(clothesList)) finalChoice.append(pickBottoms(clothesList)) return finalChoice
container_manager_linux.go
// +build linux /* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cm import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "path" "strconv" "sync" "time" "github.com/blang/semver" "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/kubelet/cadvisor" cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/util" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" "k8s.io/kubernetes/pkg/util/wait" ) const ( // The percent of the machine memory capacity. The value is used to calculate // docker memory resource container's hardlimit to workaround docker memory // leakage issue. Please see kubernetes/issues/9881 for more detail. DockerMemoryLimitThresholdPercent = 70 // The minimum memory limit allocated to docker container: 150Mi MinDockerMemoryLimit = 150 * 1024 * 1024 dockerProcessName = "docker" dockerPidFile = "/var/run/docker.pid" containerdProcessName = "docker-containerd" containerdPidFile = "/run/docker/libcontainerd/docker-containerd.pid" ) var ( // The docker version in which containerd was introduced. containerdVersion = semver.MustParse("1.11.0") ) // A non-user container tracked by the Kubelet. type systemContainer struct { // Absolute name of the container. name string // CPU limit in millicores. cpuMillicores int64 // Function that ensures the state of the container. // m is the cgroup manager for the specified container. ensureStateFunc func(m *fs.Manager) error // Manager for the cgroups of the external container. manager *fs.Manager } func newSystemCgroups(containerName string) *systemContainer { return &systemContainer{ name: containerName, manager: createManager(containerName), } } type containerManagerImpl struct { sync.RWMutex cadvisorInterface cadvisor.Interface mountUtil mount.Interface NodeConfig status Status // External containers being managed. systemContainers []*systemContainer qosContainers QOSContainersInfo periodicTasks []func() // holds all the mounted cgroup subsystems subsystems *CgroupSubsystems nodeInfo *api.Node } type features struct { cpuHardcapping bool } var _ ContainerManager = &containerManagerImpl{} // checks if the required cgroups subsystems are mounted. // As of now, only 'cpu' and 'memory' are required. // cpu quota is a soft requirement. func validateSystemRequirements(mountUtil mount.Interface) (features, error) { const ( cgroupMountType = "cgroup" localErr = "system validation failed" ) var ( cpuMountPoint string f features ) mountPoints, err := mountUtil.List() if err != nil { return f, fmt.Errorf("%s - %v", localErr, err) } expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory") for _, mountPoint := range mountPoints { if mountPoint.Type == cgroupMountType { for _, opt := range mountPoint.Opts { if expectedCgroups.Has(opt) { expectedCgroups.Delete(opt) } if opt == "cpu" { cpuMountPoint = mountPoint.Path } } } } if expectedCgroups.Len() > 0 { return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, expectedCgroups.List()) } // Check if cpu quota is available. // CPU cgroup is required and so it expected to be mounted at this point. periodExists, err := util.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us")) if err != nil { glog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err) } quotaExists, err := util.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us")) if err != nil { glog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err) } if quotaExists && periodExists { f.cpuHardcapping = true } return f, nil } // TODO(vmarmol): Add limits to the system containers. // Takes the absolute name of the specified containers. // Empty container name disables use of the specified container. func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool) (ContainerManager, error) { subsystems, err := GetCgroupSubsystems() if err != nil { return nil, fmt.Errorf("failed to get mounted cgroup subsystems: %v", err) } // Check whether swap is enabled. The Kubelet does not support running with swap enabled. cmd := exec.Command("cat", "/proc/swaps") stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } var buf []string scanner := bufio.NewScanner(stdout) for scanner.Scan() { // Splits on newlines by default buf = append(buf, scanner.Text()) } if err := cmd.Wait(); err != nil { // Clean up return nil, err } // TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled. // Running with swap enabled should be considered an error, but in order to maintain legacy // behavior we have to require an opt-in to this error for a period of time. // If there is more than one line (table headers) in /proc/swaps, swap is enabled and we should error out. if len(buf) > 1 { if failSwapOn { return nil, fmt.Errorf("Running with swap on is not supported, please disable swap! /proc/swaps contained: %v", buf) } glog.Warningf("Running with swap on is not supported, please disable swap! " + "This will be a fatal error by default starting in K8s v1.6! " + "In the meantime, you can opt-in to making this a fatal error by enabling --experimental-fail-swap-on.") } // Check if Cgroup-root actually exists on the node if nodeConfig.CgroupsPerQOS { // this does default to / when enabled, but this tests against regressions. if nodeConfig.CgroupRoot == "" { return nil, fmt.Errorf("invalid configuration: experimental-cgroups-per-qos was specified and cgroup-root was not specified. To enable the QoS cgroup hierarchy you need to specify a valid cgroup-root") } // we need to check that the cgroup root actually exists for each subsystem // of note, we always use the cgroupfs driver when performing this check since // the input is provided in that format. // this is important because we do not want any name conversion to occur. cgroupManager := NewCgroupManager(subsystems, "cgroupfs") if !cgroupManager.Exists(CgroupName(nodeConfig.CgroupRoot)) { return nil, fmt.Errorf("invalid configuration: cgroup-root doesn't exist: %v", err) } } return &containerManagerImpl{ cadvisorInterface: cadvisorInterface, mountUtil: mountUtil, NodeConfig: nodeConfig, subsystems: subsystems, }, nil } // NewPodContainerManager is a factory method returns a PodContainerManager object // If qosCgroups are enabled then it returns the general pod container manager // otherwise it returns a no-op manager which essentially does nothing func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager { if cm.NodeConfig.CgroupsPerQOS { return &podContainerManagerImpl{ qosContainersInfo: cm.qosContainers, nodeInfo: cm.nodeInfo, subsystems: cm.subsystems, cgroupManager: NewCgroupManager(cm.subsystems, cm.NodeConfig.CgroupDriver), } } return &podContainerManagerNoop{ cgroupRoot: CgroupName(cm.NodeConfig.CgroupRoot), } } // Create a cgroup container manager. func
(containerName string) *fs.Manager { allowAllDevices := true return &fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: containerName, Resources: &configs.Resources{ AllowAllDevices: &allowAllDevices, }, }, } } type KernelTunableBehavior string const ( KernelTunableWarn KernelTunableBehavior = "warn" KernelTunableError KernelTunableBehavior = "error" KernelTunableModify KernelTunableBehavior = "modify" ) // InitQOS creates the top level qos cgroup containers // We create top level QoS containers for only Burstable and Best Effort // and not Guaranteed QoS class. All guaranteed pods are nested under the // RootContainer by default. InitQOS is called only once during kubelet bootstrapping. func InitQOS(cgroupDriver, rootContainer string, subsystems *CgroupSubsystems) (QOSContainersInfo, error) { cm := NewCgroupManager(subsystems, cgroupDriver) // Top level for Qos containers are created only for Burstable // and Best Effort classes qosClasses := [2]qos.QOSClass{qos.Burstable, qos.BestEffort} // Create containers for both qos classes for _, qosClass := range qosClasses { // get the container's absolute name absoluteContainerName := CgroupName(path.Join(rootContainer, string(qosClass))) // containerConfig object stores the cgroup specifications containerConfig := &CgroupConfig{ Name: absoluteContainerName, ResourceParameters: &ResourceConfig{}, } // check if it exists if !cm.Exists(absoluteContainerName) { if err := cm.Create(containerConfig); err != nil { return QOSContainersInfo{}, fmt.Errorf("failed to create top level %v QOS cgroup : %v", qosClass, err) } } } // Store the top level qos container names qosContainersInfo := QOSContainersInfo{ Guaranteed: rootContainer, Burstable: path.Join(rootContainer, string(qos.Burstable)), BestEffort: path.Join(rootContainer, string(qos.BestEffort)), } return qosContainersInfo, nil } // setupKernelTunables validates kernel tunable flags are set as expected // depending upon the specified option, it will either warn, error, or modify the kernel tunable flags func setupKernelTunables(option KernelTunableBehavior) error { desiredState := map[string]int{ utilsysctl.VmOvercommitMemory: utilsysctl.VmOvercommitMemoryAlways, utilsysctl.VmPanicOnOOM: utilsysctl.VmPanicOnOOMInvokeOOMKiller, utilsysctl.KernelPanic: utilsysctl.KernelPanicRebootTimeout, utilsysctl.KernelPanicOnOops: utilsysctl.KernelPanicOnOopsAlways, } sysctl := utilsysctl.New() errList := []error{} for flag, expectedValue := range desiredState { val, err := sysctl.GetSysctl(flag) if err != nil { errList = append(errList, err) continue } if val == expectedValue { continue } switch option { case KernelTunableError: errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)) case KernelTunableWarn: glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) case KernelTunableModify: glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) err = sysctl.SetSysctl(flag, expectedValue) if err != nil { errList = append(errList, err) } } } return utilerrors.NewAggregate(errList) } func (cm *containerManagerImpl) setupNode() error { f, err := validateSystemRequirements(cm.mountUtil) if err != nil { return err } if !f.cpuHardcapping { cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported") } b := KernelTunableModify if cm.GetNodeConfig().ProtectKernelDefaults { b = KernelTunableError } if err := setupKernelTunables(b); err != nil { return err } // Setup top level qos containers only if CgroupsPerQOS flag is specified as true if cm.NodeConfig.CgroupsPerQOS { qosContainersInfo, err := InitQOS(cm.NodeConfig.CgroupDriver, cm.NodeConfig.CgroupRoot, cm.subsystems) if err != nil { return fmt.Errorf("failed to initialise top level QOS containers: %v", err) } cm.qosContainers = qosContainersInfo } systemContainers := []*systemContainer{} if cm.ContainerRuntime == "docker" { dockerVersion := getDockerVersion(cm.cadvisorInterface) if cm.EnableCRI { // If kubelet uses CRI, dockershim will manage the cgroups and oom // score for the docker processes. // In the future, NodeSpec should mandate the cgroup that the // runtime processes need to be in. For now, we still check the // cgroup for docker periodically, so that kubelet can recognize // the cgroup for docker and serve stats for the runtime. // TODO(#27097): Fix this after NodeSpec is clearly defined. cm.periodicTasks = append(cm.periodicTasks, func() { glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration") cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { glog.Error(err) return } glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont }) } else if cm.RuntimeCgroupsName != "" { cont := newSystemCgroups(cm.RuntimeCgroupsName) var capacity = api.ResourceList{} if info, err := cm.cadvisorInterface.MachineInfo(); err == nil { capacity = cadvisor.CapacityFromMachineInfo(info) } memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100)) if memoryLimit < MinDockerMemoryLimit { glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeCgroupsName, MinDockerMemoryLimit) memoryLimit = MinDockerMemoryLimit } glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeCgroupsName, memoryLimit) allowAllDevices := true dockerContainer := &fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: cm.RuntimeCgroupsName, Resources: &configs.Resources{ Memory: memoryLimit, MemorySwap: -1, AllowAllDevices: &allowAllDevices, }, }, } cont.ensureStateFunc = func(manager *fs.Manager) error { return EnsureDockerInContainer(dockerVersion, qos.DockerOOMScoreAdj, dockerContainer) } systemContainers = append(systemContainers, cont) } else { cm.periodicTasks = append(cm.periodicTasks, func() { glog.V(10).Infof("Adding docker daemon periodic tasks") if err := EnsureDockerInContainer(dockerVersion, qos.DockerOOMScoreAdj, nil); err != nil { glog.Error(err) return } cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { glog.Error(err) return } glog.V(2).Infof("Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont }) } } if cm.SystemCgroupsName != "" { if cm.SystemCgroupsName == "/" { return fmt.Errorf("system container cannot be root (\"/\")") } cont := newSystemCgroups(cm.SystemCgroupsName) cont.ensureStateFunc = func(manager *fs.Manager) error { return ensureSystemCgroups("/", manager) } systemContainers = append(systemContainers, cont) } if cm.KubeletCgroupsName != "" { cont := newSystemCgroups(cm.KubeletCgroupsName) allowAllDevices := true manager := fs.Manager{ Cgroups: &configs.Cgroup{ Parent: "/", Name: cm.KubeletCgroupsName, Resources: &configs.Resources{ AllowAllDevices: &allowAllDevices, }, }, } cont.ensureStateFunc = func(_ *fs.Manager) error { return ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, &manager) } systemContainers = append(systemContainers, cont) } else { cm.periodicTasks = append(cm.periodicTasks, func() { if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil { glog.Error(err) return } cont, err := getContainer(os.Getpid()) if err != nil { glog.Errorf("failed to find cgroups of kubelet - %v", err) return } cm.Lock() defer cm.Unlock() cm.KubeletCgroupsName = cont }) } cm.systemContainers = systemContainers return nil } func getContainerNameForProcess(name, pidFile string) (string, error) { pids, err := getPidsForProcess(name, pidFile) if err != nil { return "", fmt.Errorf("failed to detect process id for %q - %v", name, err) } if len(pids) == 0 { return "", nil } cont, err := getContainer(pids[0]) if err != nil { return "", err } return cont, nil } func (cm *containerManagerImpl) GetNodeConfig() NodeConfig { cm.RLock() defer cm.RUnlock() return cm.NodeConfig } func (cm *containerManagerImpl) GetMountedSubsystems() *CgroupSubsystems { return cm.subsystems } func (cm *containerManagerImpl) GetQOSContainersInfo() QOSContainersInfo { return cm.qosContainers } func (cm *containerManagerImpl) Status() Status { cm.RLock() defer cm.RUnlock() return cm.status } func (cm *containerManagerImpl) Start(node *api.Node) error { // cache the node Info including resource capacity and // allocatable of the node cm.nodeInfo = node // Setup the node if err := cm.setupNode(); err != nil { return err } // Don't run a background thread if there are no ensureStateFuncs. hasEnsureStateFuncs := false for _, cont := range cm.systemContainers { if cont.ensureStateFunc != nil { hasEnsureStateFuncs = true break } } if hasEnsureStateFuncs { // Run ensure state functions every minute. go wait.Until(func() { for _, cont := range cm.systemContainers { if cont.ensureStateFunc != nil { if err := cont.ensureStateFunc(cont.manager); err != nil { glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err) } } } }, time.Minute, wait.NeverStop) } if len(cm.periodicTasks) > 0 { go wait.Until(func() { for _, task := range cm.periodicTasks { if task != nil { task() } } }, 5*time.Minute, wait.NeverStop) } return nil } func (cm *containerManagerImpl) SystemCgroupsLimit() api.ResourceList { cpuLimit := int64(0) // Sum up resources of all external containers. for _, cont := range cm.systemContainers { cpuLimit += cont.cpuMillicores } return api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity( cpuLimit, resource.DecimalSI), } } func isProcessRunningInHost(pid int) (bool, error) { // Get init pid namespace. initPidNs, err := os.Readlink("/proc/1/ns/pid") if err != nil { return false, fmt.Errorf("failed to find pid namespace of init process") } glog.V(10).Infof("init pid ns is %q", initPidNs) processPidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", pid)) if err != nil { return false, fmt.Errorf("failed to find pid namespace of process %q", pid) } glog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs) return initPidNs == processPidNs, nil } func getPidFromPidFile(pidFile string) (int, error) { file, err := os.Open(pidFile) if err != nil { return 0, fmt.Errorf("error opening pid file %s: %v", pidFile, err) } defer file.Close() data, err := ioutil.ReadAll(file) if err != nil { return 0, fmt.Errorf("error reading pid file %s: %v", pidFile, err) } pid, err := strconv.Atoi(string(data)) if err != nil { return 0, fmt.Errorf("error parsing %s as a number: %v", string(data), err) } return pid, nil } func getPidsForProcess(name, pidFile string) ([]int, error) { if len(pidFile) > 0 { if pid, err := getPidFromPidFile(pidFile); err == nil { return []int{pid}, nil } else { // log the error and fall back to pidof runtime.HandleError(err) } } return procfs.PidOf(name) } // Ensures that the Docker daemon is in the desired container. // Temporarily export the function to be used by dockershim. // TODO(yujuhong): Move this function to dockershim once kubelet migrates to // dockershim as the default. func EnsureDockerInContainer(dockerVersion semver.Version, oomScoreAdj int, manager *fs.Manager) error { type process struct{ name, file string } dockerProcs := []process{{dockerProcessName, dockerPidFile}} if dockerVersion.GTE(containerdVersion) { dockerProcs = append(dockerProcs, process{containerdProcessName, containerdPidFile}) } var errs []error for _, proc := range dockerProcs { pids, err := getPidsForProcess(proc.name, proc.file) if err != nil { errs = append(errs, fmt.Errorf("failed to get pids for %q: %v", proc.name, err)) continue } // Move if the pid is not already in the desired container. for _, pid := range pids { if err := ensureProcessInContainerWithOOMScore(pid, oomScoreAdj, manager); err != nil { errs = append(errs, fmt.Errorf("errors moving %q pid: %v", proc.name, err)) } } } return utilerrors.NewAggregate(errs) } func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs.Manager) error { if runningInHost, err := isProcessRunningInHost(pid); err != nil { // Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context. return err } else if !runningInHost { // Process is running inside a container. Don't touch that. glog.V(2).Infof("pid %d is not running in the host namespaces", pid) return nil } var errs []error if manager != nil { cont, err := getContainer(pid) if err != nil { errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err)) } if cont != manager.Cgroups.Name { err = manager.Apply(pid) if err != nil { errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q: %v", pid, cont, manager.Cgroups.Name, err)) } } } // Also apply oom-score-adj to processes oomAdjuster := oom.NewOOMAdjuster() glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid) if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil { glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err) errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d: %v", oomScoreAdj, pid, err)) } return utilerrors.NewAggregate(errs) } // getContainer returns the cgroup associated with the specified pid. // It enforces a unified hierarchy for memory and cpu cgroups. // On systemd environments, it uses the name=systemd cgroup for the specified pid. func getContainer(pid int) (string, error) { cgs, err := cgroups.ParseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", pid)) if err != nil { return "", err } cpu, found := cgs["cpu"] if !found { return "", cgroups.NewNotFoundError("cpu") } memory, found := cgs["memory"] if !found { return "", cgroups.NewNotFoundError("memory") } // since we use this container for accounting, we need to ensure its a unified hierarchy. if cpu != memory { return "", fmt.Errorf("cpu and memory cgroup hierarchy not unified. cpu: %s, memory: %s", cpu, memory) } // on systemd, every pid is in a unified cgroup hierarchy (name=systemd as seen in systemd-cgls) // cpu and memory accounting is off by default, users may choose to enable it per unit or globally. // users could enable CPU and memory accounting globally via /etc/systemd/system.conf (DefaultCPUAccounting=true DefaultMemoryAccounting=true). // users could also enable CPU and memory accounting per unit via CPUAccounting=true and MemoryAccounting=true // we only warn if accounting is not enabled for CPU or memory so as to not break local development flows where kubelet is launched in a terminal. // for example, the cgroup for the user session will be something like /user.slice/user-X.slice/session-X.scope, but the cpu and memory // cgroup will be the closest ancestor where accounting is performed (most likely /) on systems that launch docker containers. // as a result, on those systems, you will not get cpu or memory accounting statistics for kubelet. // in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally). if systemd, found := cgs["name=systemd"]; found { if systemd != cpu { glog.Warningf("CPUAccounting not enabled for pid: %d", pid) } if systemd != memory { glog.Warningf("MemoryAccounting not enabled for pid: %d", pid) } return systemd, nil } return cpu, nil } // Ensures the system container is created and all non-kernel threads and process 1 // without a container are moved to it. // // The reason of leaving kernel threads at root cgroup is that we don't want to tie the // execution of these threads with to-be defined /system quota and create priority inversions. // func ensureSystemCgroups(rootCgroupPath string, manager *fs.Manager) error { // Move non-kernel PIDs to the system container. attemptsRemaining := 10 var errs []error for attemptsRemaining >= 0 { // Only keep errors on latest attempt. errs = []error{} attemptsRemaining-- allPids, err := cmutil.GetPids(rootCgroupPath) if err != nil { errs = append(errs, fmt.Errorf("failed to list PIDs for root: %v", err)) continue } // Remove kernel pids and other protected PIDs (pid 1, PIDs already in system & kubelet containers) pids := make([]int, 0, len(allPids)) for _, pid := range allPids { if pid == 1 || isKernelPid(pid) { continue } pids = append(pids, pid) } glog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids)) // Check if we have moved all the non-kernel PIDs. if len(pids) == 0 { break } glog.Infof("Moving non-kernel processes: %v", pids) for _, pid := range pids { err := manager.Apply(pid) if err != nil { errs = append(errs, fmt.Errorf("failed to move PID %d into the system container %q: %v", pid, manager.Cgroups.Name, err)) } } } if attemptsRemaining < 0 { errs = append(errs, fmt.Errorf("ran out of attempts to create system containers %q", manager.Cgroups.Name)) } return utilerrors.NewAggregate(errs) } // Determines whether the specified PID is a kernel PID. func isKernelPid(pid int) bool { // Kernel threads have no associated executable. _, err := os.Readlink(fmt.Sprintf("/proc/%d/exe", pid)) return err != nil } // Helper for getting the docker version. func getDockerVersion(cadvisor cadvisor.Interface) semver.Version { var fallback semver.Version // Fallback to zero-value by default. versions, err := cadvisor.VersionInfo() if err != nil { glog.Errorf("Error requesting cAdvisor VersionInfo: %v", err) return fallback } dockerVersion, err := semver.Parse(versions.DockerVersion) if err != nil { glog.Errorf("Error parsing docker version %q: %v", versions.DockerVersion, err) return fallback } return dockerVersion }
createManager
dashboard_id_test.go
package validate // NOTE: this file is generated via 'go:generate' - manual changes will be overwritten import "testing" func TestDashboardID(t *testing.T)
{ cases := []struct { Input string Valid bool }{ { // empty Input: "", Valid: false, }, { // missing SubscriptionId Input: "/", Valid: false, }, { // missing value for SubscriptionId Input: "/subscriptions/", Valid: false, }, { // missing ResourceGroup Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", Valid: false, }, { // missing value for ResourceGroup Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", Valid: false, }, { // missing Name Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Portal/", Valid: false, }, { // missing value for Name Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Portal/dashboards/", Valid: false, }, { // valid Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/group1/providers/Microsoft.Portal/dashboards/dashboard1", Valid: true, }, { // upper-cased Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/GROUP1/PROVIDERS/MICROSOFT.PORTAL/DASHBOARDS/DASHBOARD1", Valid: false, }, } for _, tc := range cases { t.Logf("[DEBUG] Testing Value %s", tc.Input) _, errors := DashboardID(tc.Input, "test") valid := len(errors) == 0 if tc.Valid != valid { t.Fatalf("Expected %t but got %t", tc.Valid, valid) } } }
local_node.go
// Copyright 2019 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package helpers import ( "context" "fmt" "io" "os" "os/exec" "strings" "sync" "time" ginkgoext "github.com/cilium/cilium/test/ginkgo-ext" "github.com/sirupsen/logrus" ) var ( //LocalExecutorLogs is a buffer where all commands sent over ssh are saved. LocalExecutorLogs = ginkgoext.NewWriter(new(Buffer)) ) // Executor executes commands type Executor interface { CloseSSHClient() Exec(cmd string, options ...ExecOptions) *CmdRes ExecContext(ctx context.Context, cmd string, options ...ExecOptions) *CmdRes ExecContextShort(ctx context.Context, cmd string, options ...ExecOptions) *CmdRes ExecInBackground(ctx context.Context, cmd string, options ...ExecOptions) *CmdRes ExecMiddle(cmd string, options ...ExecOptions) *CmdRes ExecShort(cmd string, options ...ExecOptions) *CmdRes ExecWithSudo(cmd string, options ...ExecOptions) *CmdRes ExecuteContext(ctx context.Context, cmd string, stdout io.Writer, stderr io.Writer) error String() string BasePath() string setBasePath() Logger() *logrus.Entry } // LocalExecutor executes commands, implements Executor interface type LocalExecutor struct { env []string logger *logrus.Entry basePath string } // CreateLocalExecutor returns a local executor func
(env []string) *LocalExecutor { return &LocalExecutor{env: env} } // Logger returns logger for executor func (s *LocalExecutor) Logger() *logrus.Entry { return s.logger } func (s *LocalExecutor) String() string { return fmt.Sprintf("environment: %s", s.env) } // CloseSSHClient is a no-op func (s *LocalExecutor) CloseSSHClient() { return } func (s *LocalExecutor) setBasePath() { wd, err := os.Getwd() if err != nil { ginkgoext.Fail(fmt.Sprintf("Cannot set base path: %s", err.Error()), 1) return } s.basePath = wd } func (s LocalExecutor) getLocalCmd(ctx context.Context, command string, stdout io.Writer, stderr io.Writer) *exec.Cmd { com := "bash" args := []string{"-c", command} cmd := exec.CommandContext(ctx, com, args...) if stdout == nil { stdout = os.Stdout } if stderr == nil { stderr = os.Stderr } fmt.Fprintln(LocalExecutorLogs, cmd) cmd.Stdin = os.Stdin cmd.Stdout = stdout cmd.Stderr = stderr cmd.Env = s.env return cmd } // ExecuteContext executes the given `cmd` and writes the cmd's stdout and // stderr into the given io.Writers. // Returns an error if context Deadline() is reached or if there was an error // executing the command. func (s *LocalExecutor) ExecuteContext(ctx context.Context, command string, stdout io.Writer, stderr io.Writer) error { cmd := s.getLocalCmd(ctx, command, stdout, stderr) return cmd.Run() } // ExecWithSudo returns the result of executing the provided cmd via SSH using // sudo. func (s *LocalExecutor) ExecWithSudo(cmd string, options ...ExecOptions) *CmdRes { command := fmt.Sprintf("sudo %s", cmd) return s.Exec(command, options...) } // Exec returns the results of executing the provided cmd via SSH. func (s *LocalExecutor) Exec(cmd string, options ...ExecOptions) *CmdRes { // Bound all command executions to be at most the timeout used by the CI // so that commands do not block forever. ctx, cancel := context.WithTimeout(context.Background(), HelperTimeout) defer cancel() return s.ExecContext(ctx, cmd, options...) } // ExecShort runs command with the provided options. It will take up to // ShortCommandTimeout seconds to run the command before it times out. func (s *LocalExecutor) ExecShort(cmd string, options ...ExecOptions) *CmdRes { ctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout) defer cancel() return s.ExecContext(ctx, cmd, options...) } // ExecMiddle runs command with the provided options. It will take up to // MidCommandTimeout seconds to run the command before it times out. func (s *LocalExecutor) ExecMiddle(cmd string, options ...ExecOptions) *CmdRes { ctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout) defer cancel() return s.ExecContext(ctx, cmd, options...) } // ExecContextShort is a wrapper around ExecContext which creates a child // context with a timeout of ShortCommandTimeout. func (s *LocalExecutor) ExecContextShort(ctx context.Context, cmd string, options ...ExecOptions) *CmdRes { shortCtx, cancel := context.WithTimeout(ctx, ShortCommandTimeout) defer cancel() return s.ExecContext(shortCtx, cmd, options...) } // ExecContext returns the results of executing the provided cmd via SSH. func (s *LocalExecutor) ExecContext(ctx context.Context, cmd string, options ...ExecOptions) *CmdRes { var ops ExecOptions if len(options) > 0 { ops = options[0] } log.Debugf("running command: %s", cmd) stdout := new(Buffer) stderr := new(Buffer) start := time.Now() err := s.ExecuteContext(ctx, cmd, stdout, stderr) res := CmdRes{ cmd: cmd, stdout: stdout, stderr: stderr, success: true, // this may be toggled when err != nil below duration: time.Since(start), } if err != nil { if exitError, ok := err.(*exec.ExitError); ok { res.exitcode = exitError.ExitCode() } res.success = false log.WithError(err).Errorf("Error executing command '%s'", cmd) res.err = err } res.SendToLog(ops.SkipLog) return &res } // ExecInBackground returns the results of running cmd in the specified // context. The command will be executed in the background until context.Context // is canceled or the command has finish its execution. func (s *LocalExecutor) ExecInBackground(ctx context.Context, cmd string, options ...ExecOptions) *CmdRes { if ctx == nil { panic("no context provided") } var ops ExecOptions if len(options) > 0 { ops = options[0] } fmt.Fprintln(LocalExecutorLogs, cmd) stdout := new(Buffer) stderr := new(Buffer) command := s.getLocalCmd(ctx, cmd, stdout, stderr) var wg sync.WaitGroup res := &CmdRes{ cmd: cmd, stdout: stdout, stderr: stderr, success: true, wg: &wg, } res.wg.Add(1) go func(cmd *exec.Cmd, res *CmdRes) { defer res.wg.Done() start := time.Now() err := cmd.Run() res.duration = time.Since(start) if err != nil { if exitError, ok := err.(*exec.ExitError); ok { res.exitcode = exitError.ExitCode() } res.success = false log.WithError(err).Errorf("Error executing command '%s'", strings.Join(append([]string{cmd.Path}, cmd.Args...), " ")) res.err = err } res.SendToLog(ops.SkipLog) }(command, res) return res } func (s *LocalExecutor) BasePath() string { return s.basePath }
CreateLocalExecutor
lib.rs
// BEGIN - Embark standard lints v5 for Rust 1.55+ // do not change or add/remove here, but one can add exceptions after this section // for more info see: <https://github.com/EmbarkStudios/rust-ecosystem/issues/59> #![deny(unsafe_code)] #![warn( clippy::all, clippy::await_holding_lock, clippy::char_lit_as_u8, clippy::checked_conversions, clippy::dbg_macro, clippy::debug_assert_with_mut_call, clippy::doc_markdown, clippy::empty_enum, clippy::enum_glob_use, clippy::exit, clippy::expl_impl_clone_on_copy, clippy::explicit_deref_methods, clippy::explicit_into_iter_loop, clippy::fallible_impl_from, clippy::filter_map_next, clippy::flat_map_option, clippy::float_cmp_const, clippy::fn_params_excessive_bools, clippy::from_iter_instead_of_collect, clippy::if_let_mutex, clippy::implicit_clone, clippy::imprecise_flops, clippy::inefficient_to_string, clippy::invalid_upcast_comparisons, clippy::large_digit_groups, clippy::large_stack_arrays, clippy::large_types_passed_by_value, clippy::let_unit_value, clippy::linkedlist, clippy::lossy_float_literal, clippy::macro_use_imports, clippy::manual_ok_or, clippy::map_err_ignore, clippy::map_flatten, clippy::map_unwrap_or, clippy::match_on_vec_items, clippy::match_same_arms, clippy::match_wild_err_arm, clippy::match_wildcard_for_single_variants, clippy::mem_forget, clippy::mismatched_target_os, clippy::missing_enforced_import_renames, clippy::mut_mut, clippy::mutex_integer, clippy::needless_borrow, clippy::needless_continue, clippy::needless_for_each, clippy::option_option, clippy::path_buf_push_overwrite, clippy::ptr_as_ptr, clippy::rc_mutex, clippy::ref_option_ref, clippy::rest_pat_in_fully_bound_structs, clippy::same_functions_in_if_condition, clippy::semicolon_if_nothing_returned, clippy::single_match_else, clippy::string_add_assign, clippy::string_add, clippy::string_lit_as_bytes, clippy::string_to_string, clippy::todo, clippy::trait_duplication_in_bounds, clippy::unimplemented, clippy::unnested_or_patterns, clippy::unused_self, clippy::useless_transmute, clippy::verbose_file_reads, clippy::zero_sized_map_values, future_incompatible, nonstandard_style, rust_2018_idioms )] // END - Embark standard lints v0.5 for Rust 1.55+ // crate-specific exceptions: #![allow(clippy::single_match_else)] use anyhow::Error; use std::{ convert::From, fmt, path::{Path, PathBuf}, sync::Arc, }; use tracing::warn; pub use url::Url; pub mod backends; pub mod cargo; mod fetch; pub(crate) mod git; pub mod mirror; pub mod sync; pub mod util; pub type HttpClient = reqwest::blocking::Client; pub use cargo::{read_cargo_config, Registry, Source}; #[derive(Eq, Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct Krate { pub name: String, pub version: String, // We just treat versions as opaque strings pub source: Source, } // impl tracing::Value for Krate { // fn record(&self, key: &tracing::field::Field, visitor: &mut dyn tracing::field::Visit) { // visitor.record_debug(key, self) // } // } impl Ord for Krate { fn cmp(&self, b: &Self) -> std::cmp::Ordering { self.source.cmp(&b.source) } } impl PartialOrd for Krate { fn partial_cmp(&self, b: &Self) -> Option<std::cmp::Ordering> { self.source.partial_cmp(&b.source) } } impl PartialEq for Krate { fn eq(&self, b: &Self) -> bool { self.source.eq(&b.source) } } impl PartialEq<Registry> for Krate { fn eq(&self, b: &Registry) -> bool { match &self.source { Source::Git { .. } => false, Source::Registry { registry, .. } => b.eq(registry), } } } impl Krate { pub fn cloud_id(&self) -> CloudId<'_> { CloudId { inner: self } } pub fn
(&self) -> LocalId<'_> { LocalId { inner: self } } } impl fmt::Display for Krate { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let typ = match &self.source { Source::Git { .. } => "git", Source::Registry { .. } => "registry", }; write!(f, "{}-{}({})", self.name, self.version, typ) } } pub struct LocalId<'a> { inner: &'a Krate, } impl<'a> fmt::Display for LocalId<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.inner.source { Source::Git { ident, .. } => write!(f, "{}", &ident), Source::Registry { .. } => { write!(f, "{}-{}.crate", self.inner.name, self.inner.version) } } } } pub struct CloudId<'a> { inner: &'a Krate, } impl<'a> fmt::Display for CloudId<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.inner.source { Source::Git { ident, rev, .. } => write!(f, "{}-{}", ident, rev), Source::Registry { chksum, .. } => write!(f, "{}", chksum), } } } #[allow(dead_code)] pub struct GcsLocation<'a> { bucket: &'a str, prefix: &'a str, } #[allow(dead_code)] pub struct S3Location<'a> { pub bucket: &'a str, pub region: &'a str, pub host: &'a str, pub prefix: &'a str, } pub struct FilesystemLocation<'a> { pub path: &'a Path, } pub struct BlobLocation<'a> { pub prefix: &'a str, pub container: &'a str, } pub enum CloudLocation<'a> { Gcs(GcsLocation<'a>), S3(S3Location<'a>), Fs(FilesystemLocation<'a>), Blob(BlobLocation<'a>), } pub type Storage = Arc<dyn Backend + Sync + Send>; pub struct Ctx { pub client: HttpClient, pub backend: Storage, pub krates: Vec<Krate>, pub registries: Vec<Arc<Registry>>, pub root_dir: PathBuf, } impl Ctx { pub fn new( root_dir: Option<PathBuf>, backend: Storage, krates: Vec<Krate>, registries: Vec<Arc<Registry>>, ) -> Result<Self, Error> { Ok(Self { client: HttpClient::builder().build()?, backend, krates, registries, root_dir: root_dir.unwrap_or_else(|| PathBuf::from(".")), }) } /// Create the registry and git directories as they are the root of multiple other ones pub fn prep_sync_dirs(&self) -> Result<(), Error> { std::fs::create_dir_all(self.root_dir.join("registry"))?; std::fs::create_dir_all(self.root_dir.join("git"))?; Ok(()) } pub fn registry_sets(&self) -> Vec<mirror::RegistrySet> { self.registries .iter() .map(|registry| { // Gather the names of all of the crates sourced in the registry so we // can add .cache entries let krates = self .krates .iter() .filter_map(|krate| { if krate == registry.as_ref() { Some(krate.name.clone()) } else { None } }) .collect(); mirror::RegistrySet { registry: registry.clone(), krates, } }) .collect() } } impl fmt::Debug for Ctx { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "krates: {}", self.krates.len()) } } pub type Timestamp = time::OffsetDateTime; pub trait Backend: fmt::Debug { fn fetch(&self, krate: &Krate) -> Result<bytes::Bytes, Error>; fn upload(&self, source: bytes::Bytes, krate: &Krate) -> Result<usize, Error>; fn list(&self) -> Result<Vec<String>, Error>; fn updated(&self, krate: &Krate) -> Result<Option<Timestamp>, Error>; fn set_prefix(&mut self, prefix: &str); }
local_id
SingleProduct.js
import React from 'react'; import { connect } from 'react-redux'; import ReviewForm from './ReviewForm'; import ListReviews from './ListReviews'; import { getSingleProductThunk } from '../store/singleProduct'; import { Button, Input, Label, Modal, Header, Image, Container, Segment, Grid } from 'semantic-ui-react'; import { addToCartThunk, setCartIdThunk } from '../store/cart'; import { getAllProductsThunk } from '../store/allProducts'; import { NavLink } from 'react-router-dom'; class
extends React.Component { constructor(props) { super(props); this.state = { quantity: 1 }; this.handleChange = this.handleChange.bind(this); } async componentDidMount() { this.props.getProduct(this.props.match.params.id); await this.props.setCartId(this.props.user.id); } handleChange(evt) { this.setState({ [evt.target.name]: evt.target.value }); } async addProduct(product) { await this.props.quickAdd({ quantity: this.state.quantity, unitPrice: product.price, productId: product.id, orderId: this.props.cart.id, productName: product.name, imageUrl: product.imageUrl }); } render() { const { product } = this.props; const oldReviews = product.reviews; const newReviews = this.props.reviews; const categories = product.categories; return ( <Container> <Segment> <Grid columns="two" stackable divided> <Grid.Column> <Image src={product.imageUrl} /> </Grid.Column> <Grid.Column> <h1>{product.name}</h1> {this.props.user.isAdmin && ( <NavLink to={`/products/${product.id}/edit`}> <Button color="blue">EDIT PRODUCT</Button> </NavLink> )} <h3>{`Price: $${product.price}`}</h3> <h3>{product.description}</h3> <Input onChange={this.handleChange} name="quantity" type="number" value={this.state.quantity} placeholder="Enter quantity" min="0" step="1" />{' '} <Modal trigger={ <Button className="addToCart" color="teal" onClick={() => this.addProduct(product)} type="button" > Add To Cart </Button> } basic size="small" > <Header icon="shopping cart" content="Added to your cart!!" /> <Modal.Content> <p>very cool.</p> </Modal.Content> </Modal> <br /> <br /> {categories && categories.map(category => ( <div key={category.id}> This product belongs to these categories: <br /> <br /> <NavLink to={`/products?categoryTag=${category.Name}`} key={category.id} > <Label color="teal" tag size="large"> {category.name} </Label> </NavLink> </div> ))} </Grid.Column> </Grid> </Segment> <Segment> <ReviewForm className="reviewForm" productId={product.id} userName={`${this.props.user.firstName} ${ this.props.user.lastName }`} imageUrl={this.props.user.imageUrl} /> <ListReviews oldReviews={oldReviews} newReviews={newReviews} /> </Segment> </Container> ); } } const mapStateToProps = state => ({ product: state.singleProductReducer, user: state.user, cart: state.cartReducer, reviews: state.productReviewsReducer }); const mapDispatchToProps = dispatch => ({ getProduct: productId => dispatch(getSingleProductThunk(productId)), quickAdd: item => dispatch(addToCartThunk(item)), setCartId: userId => dispatch(setCartIdThunk(userId || '')), getCategoryProduct: categoryTag => dispatch(getAllProductsThunk(categoryTag)) }); export default connect(mapStateToProps, mapDispatchToProps)(SingleProduct);
SingleProduct
types.rs
//! Types for Bittrex API. use chrono::NaiveDateTime; use getset::{CopyGetters, Getters}; use serde::Deserialize; /// Market information structure. #[derive(Debug, Clone, Deserialize, CopyGetters, Getters)] #[serde(rename_all = "PascalCase")] pub struct MarketInfo { /// Currency of the market. #[get = "pub"] market_currency: String, /// Base currency of the market. #[get = "pub"] base_currency: String, /// Long name of the currency of the market. #[get = "pub"] market_currency_long: String, /// Long name of the base currency of the market. #[get = "pub"] base_currency_long: String, /// Minimum trade size. #[get_copy = "pub"] min_trade_size: f64, /// Market name. #[get = "pub"] market_name: String, /// Wether the market is active or not. #[get_copy = "pub"] is_active: bool, /// Creation date and time of the market. #[get_copy = "pub"] created: NaiveDateTime, /// Notice about the market. #[get = "pub"] notice: Option<String>, /// Wether the market is sponsored. #[get_copy = "pub"] is_sponsored: Option<bool>, /// The logo URL for the market. #[get = "pub"] logo_url: Option<String>, } /// Currency information structure. #[derive(Debug, Clone, Deserialize, Getters, CopyGetters)] #[serde(rename_all = "PascalCase")] pub struct CurrencyInfo { /// 3-letter currency code. #[get = "pub"] currency: String, /// Long currency name. #[get = "pub"] currency_long: String, /// Minimum number of confirmations to credit the account. #[get_copy = "pub"] min_confirmation: u32, /// Transaction fee. #[get_copy = "pub"] tx_fee: f32, /// Wether the currency is active or not. #[get_copy = "pub"] is_active: bool, /// Coin type string constant. #[get = "pub"] coin_type: String, /// Optional base address for the coin at Bittrex. #[get = "pub"] base_address: Option<String>, /// Optional notice about the currency. #[get = "pub"] notice: Option<String>, } /// Ticker information structure. #[derive(Debug, Copy, Clone, Deserialize, CopyGetters)] #[serde(rename_all = "PascalCase")] pub struct TickerInfo { /// Current bidding/buying price for the market. #[get_copy = "pub"] bid: f32, /// Current asking/selling price for the market. #[get_copy = "pub"] ask: f32, /// Last transaction price. #[get_copy = "pub"] last: f32, } /// Market summary structure #[derive(Debug, Clone, Deserialize, CopyGetters, Getters)] #[serde(rename_all = "PascalCase")] pub struct MarketSummary { /// Name of the market. #[get = "pub"] market_name: String, /// Highest transaction value in the last 24 hours for the market. #[get_copy = "pub"] high: Option<f32>, /// Lowest transaction value in the last 24 hours for the market. #[get_copy = "pub"] low: Option<f32>, /// Last transaction price. #[get_copy = "pub"] last: Option<f32>, /// Current bidding/buying price. #[get_copy = "pub"] bid: Option<f32>, /// Current asking/selling price. #[get_copy = "pub"] ask: Option<f32>, /// Volume of the market. #[get_copy = "pub"] volume: Option<f32>, /// Base volume of the market. #[get_copy = "pub"] base_volume: Option<f32>, /// Timestamp of the information. #[get_copy = "pub"] time_stamp: NaiveDateTime, /// Number of open buying orders. #[get_copy = "pub"] open_buy_orders: Option<u32>, /// Number of open selling orders. #[get_copy = "pub"] open_sell_orders: Option<u32>, /// Tthe price of the previous day. #[get_copy = "pub"] prev_day: Option<f32>, /// Market creation time. #[get_copy = "pub"] created: NaiveDateTime, /// Name to display for the market. #[get = "pub"] display_market_name: Option<String>, } /// Structure representing an order book. #[derive(Debug, Clone, Deserialize)] pub struct OrderBook { /// List of buying orders. buy: Box<[Order]>, /// List of selling orders. sell: Box<[Order]>, } impl OrderBook { /// Creates a new order book. pub(crate) fn new<B, S>(buy: B, sell: S) -> Self where B: Into<Box<[Order]>>, S: Into<Box<[Order]>>, { Self { buy: buy.into(), sell: sell.into(), } } /// Gets the list of buying orders. pub fn buy(&self) -> &[Order] { &self.buy } /// Gets the list of selling orders. pub fn sell(&self) -> &[Order]
} /// Structure representing an order. #[derive(Debug, Copy, Clone, Deserialize, CopyGetters)] #[serde(rename_all = "PascalCase")] pub struct Order { /// Quantity being ordered. #[get_copy = "pub"] quantity: f32, /// Rate/price of the order #[get_copy = "pub"] rate: f32, } /// Structure representing a currency balance information. #[derive(Debug, Clone, Deserialize, CopyGetters, Getters)] #[serde(rename_all = "PascalCase")] pub struct BalanceInfo { /// Currency code. #[get = "pub"] currency: String, /// Balance for the currency. #[get_copy = "pub"] balance: f32, /// Available balance for the currency. #[get_copy = "pub"] available: f32, /// Pending balance for the currency. #[get_copy = "pub"] pending: f32, /// Address of the currency for deposits. #[get = "pub"] crypto_address: Option<String>, /// Wether a withdrawal has been requested. #[get_copy = "pub"] requested: Option<bool>, /// UUID of the currency. #[get = "pub"] uuid: Option<String>, }
{ &self.sell }
ffmpeg.go
// Copyright 2020 Spencer Small // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package video import ( "context" "fmt" "io" "io/ioutil" "log" "os" "os/exec" "time" ) type ffmpegExtractor struct { } // NewExtractor creates a new Extractor that uses ffmpeg as a backend func NewExtractor() Extractor
func (f *ffmpegExtractor) Clip(ctx context.Context, filename string, start time.Duration, end time.Duration) (io.ReadCloser, error) { if _, err := os.Stat(filename); err != nil { return nil, err } tmpFile, err := ioutil.TempFile(os.TempDir(), "ffmpeg-*.mp4") if err != nil { return nil, err } log.Println("Created temp file for transcoding:", tmpFile.Name()) dur := end - start cmd := exec.CommandContext(ctx, "ffmpeg", "-noaccurate_seek", "-ss", formatHHMMSS(start), "-i", filename, "-t", formatHHMMSS(dur), "-avoid_negative_ts", "make_zero", "-y", "-c", "copy", tmpFile.Name()) log.Println("Running command:", cmd) stderr, err := cmd.StderrPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } e, err := ioutil.ReadAll(stderr) if err != nil { return nil, err } if err := cmd.Wait(); err != nil { log.Println(string(e)) return nil, err } log.Printf("File %q finished", tmpFile.Name()) return &tmpFileAutoCleanup{tmpFile}, nil } func formatHHMMSS(d time.Duration) string { d = d.Round(time.Second) h := d / time.Hour d -= h * time.Hour m := d / time.Minute d -= m * time.Minute s := d / time.Second return fmt.Sprintf("%02d:%02d:%02d", h, m, s) } type tmpFileAutoCleanup struct { file *os.File } func (f *tmpFileAutoCleanup) Read(p []byte) (n int, err error) { return f.file.Read(p) } func (f *tmpFileAutoCleanup) Close() error { defer func() { if err := os.Remove(f.file.Name()); err != nil { log.Println("Error deleting file:", err) } else { log.Println("Deleted", f.file.Name()) } }() return f.file.Close() }
{ return &ffmpegExtractor{} }
supervisor.go
/* Copyright 2015 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package service import ( "context" "fmt" "sync" "github.com/gravitational/teleport" "github.com/gravitational/trace" "github.com/sirupsen/logrus" ) // Supervisor implements the simple service logic - registering // service functions and de-registering the service goroutines type Supervisor interface { // Register adds the service to the pool, if supervisor is in // the started state, the service will be started immediately // otherwise, it will be started after Start() has been called Register(srv Service) // RegisterFunc creates a service from function spec and registers // it within the system RegisterFunc(name string, fn Func) // RegisterCriticalFunc creates a critical service from function spec and registers // it within the system, if this service exits with error, // the process shuts down. RegisterCriticalFunc(name string, fn Func) // ServiceCount returns the number of registered and actively running // services ServiceCount() int // Start starts all unstarted services Start() error // Wait waits until all services exit Wait() error // Run starts and waits for the service to complete // it's a combination Start() and Wait() Run() error // Services returns list of running services Services() []string // BroadcastEvent generates event and broadcasts it to all // subscribed parties. BroadcastEvent(Event) // WaitForEvent waits for event to be broadcasted, if the event // was already broadcasted, eventC will receive current event immediately. WaitForEvent(ctx context.Context, name string, eventC chan Event) // RegisterEventMapping registers event mapping - // when the sequence in the event mapping triggers, the // outbound event will be generated. RegisterEventMapping(EventMapping) // ExitContext returns context that will be closed when // a hard TeleportExitEvent is broadcasted. ExitContext() context.Context // GracefulExitContext returns context that will be closed when // a graceful or hard TeleportExitEvent is broadcast. GracefulExitContext() context.Context // ReloadContext returns context that will be closed when // TeleportReloadEvent is broadcasted. ReloadContext() context.Context } // EventMapping maps a sequence of incoming // events and if triggered, generates an out event. type EventMapping struct { // In is the incoming event sequence. In []string // Out is the outbound event to generate. Out string } // String returns user-friendly representation of the mapping. func (e EventMapping) String() string { return fmt.Sprintf("EventMapping(in=%v, out=%v)", e.In, e.Out) } func (e EventMapping) matches(currentEvent string, m map[string]Event) bool { // existing events that have been fired should match for _, in := range e.In { if _, ok := m[in]; !ok { return false } } // current event that is firing should match one of the expected events for _, in := range e.In { if currentEvent == in { return true } } return false } // LocalSupervisor is a Teleport's implementation of the Supervisor interface. type LocalSupervisor struct { state int sync.Mutex wg *sync.WaitGroup services []Service events map[string]Event eventsC chan Event eventWaiters map[string][]*waiter closeContext context.Context signalClose context.CancelFunc // exitContext is closed when someone emits a hard Exit event exitContext context.Context signalExit context.CancelFunc // gracefulExitContext is closed when someone emits a graceful or hard Exit event gracefulExitContext context.Context signalGracefulExit context.CancelFunc reloadContext context.Context signalReload context.CancelFunc eventMappings []EventMapping id string // log specifies the logger log logrus.FieldLogger } // NewSupervisor returns new instance of initialized supervisor func NewSupervisor(id string, parentLog logrus.FieldLogger) Supervisor { ctx := context.TODO() closeContext, cancel := context.WithCancel(ctx) exitContext, signalExit := context.WithCancel(ctx) // graceful exit context is a subcontext of exit context since any work that terminates // in the event of graceful exit must also terminate in the event of an immediate exit. gracefulExitContext, signalGracefulExit := context.WithCancel(exitContext) reloadContext, signalReload := context.WithCancel(ctx) srv := &LocalSupervisor{ state: stateCreated, id: id, services: []Service{}, wg: &sync.WaitGroup{}, events: map[string]Event{}, eventsC: make(chan Event, 1024), eventWaiters: make(map[string][]*waiter), closeContext: closeContext, signalClose: cancel, exitContext: exitContext, signalExit: signalExit, gracefulExitContext: gracefulExitContext, signalGracefulExit: signalGracefulExit, reloadContext: reloadContext, signalReload: signalReload, log: parentLog.WithField(trace.Component, teleport.Component(teleport.ComponentProcess, id)), } go srv.fanOut() return srv } // Event is a special service event that can be generated // by various goroutines in the supervisor type Event struct { Name string Payload interface{} } func (e *Event) String() string { return e.Name } func (s *LocalSupervisor) Register(srv Service) { s.log.WithField("service", srv.Name()).Debug("Adding service to supervisor.") s.Lock() defer s.Unlock() s.services = append(s.services, srv) if s.state == stateStarted { s.serve(srv) } } // ServiceCount returns the number of registered and actively running services func (s *LocalSupervisor) ServiceCount() int { s.Lock() defer s.Unlock() return len(s.services) } // RegisterFunc creates a service from function spec and registers // it within the system func (s *LocalSupervisor) RegisterFunc(name string, fn Func) { s.Register(&LocalService{Function: fn, ServiceName: name}) } // RegisterCriticalFunc creates a critical service from function spec and registers // it within the system, if this service exits with error, // the process shuts down. func (s *LocalSupervisor) RegisterCriticalFunc(name string, fn Func) { s.Register(&LocalService{Function: fn, ServiceName: name, Critical: true}) } // RemoveService removes service from supervisor tracking list func (s *LocalSupervisor) RemoveService(srv Service) error { l := s.log.WithField("service", srv.Name()) s.Lock() defer s.Unlock() for i, el := range s.services { if el == srv { s.services = append(s.services[:i], s.services[i+1:]...) l.Debug("Service is completed and removed.") return nil } } l.Warning("Service is completed but not found.") return trace.NotFound("service %v is not found", srv) } // ExitEventPayload contains information about service // name, and service error if it exited with error type ExitEventPayload struct { // Service is the service that exited Service Service // Error is the error of the service exit Error error } func (s *LocalSupervisor) serve(srv Service) { s.wg.Add(1) go func() { defer s.wg.Done() defer s.RemoveService(srv) l := s.log.WithField("service", srv.Name()) l.Debug("Service has started.") err := srv.Serve() if err != nil { if err == ErrTeleportExited { l.Info("Teleport process has shut down.") } else { l.WithError(err).Warning("Teleport process has exited with error.") s.BroadcastEvent(Event{ Name: ServiceExitedWithErrorEvent, Payload: ExitEventPayload{Service: srv, Error: err}, }) } } }() } func (s *LocalSupervisor) Start() error { s.Lock() defer s.Unlock() s.state = stateStarted if len(s.services) == 0 { s.log.Warning("Supervisor has no services to run. Exiting.") return nil } for _, srv := range s.services { s.serve(srv) } return nil } func (s *LocalSupervisor) Services() []string { s.Lock() defer s.Unlock() out := make([]string, len(s.services)) for i, srv := range s.services { out[i] = srv.Name() } return out } func (s *LocalSupervisor) Wait() error { defer s.signalClose() s.wg.Wait() return nil } func (s *LocalSupervisor) Run() error { if err := s.Start(); err != nil { return trace.Wrap(err) } return s.Wait() } // ExitContext returns context that will be closed when // a hard TeleportExitEvent is broadcasted. func (s *LocalSupervisor) ExitContext() context.Context { return s.exitContext } // GracefulExitContext returns context that will be closed when // a hard or graceful TeleportExitEvent is broadcasted. func (s *LocalSupervisor) GracefulExitContext() context.Context { return s.gracefulExitContext } // ReloadContext returns context that will be closed when // TeleportReloadEvent is broadcasted. func (s *LocalSupervisor) ReloadContext() context.Context { return s.reloadContext } // BroadcastEvent generates event and broadcasts it to all // subscribed parties. func (s *LocalSupervisor) BroadcastEvent(event Event) { s.Lock() defer s.Unlock() switch event.Name { case TeleportExitEvent: // if exit event includes a context payload, it is a "graceful" exit, and // we need to hold off closing the supervisor's exit context until after // the graceful context has closed. If not, it is an immediate exit. if ctx, ok := event.Payload.(context.Context); ok { s.signalGracefulExit() go func() { select { case <-s.exitContext.Done(): case <-ctx.Done(): s.signalExit() } }() } else { s.signalExit() } case TeleportReloadEvent: s.signalReload() } sendEvent := func(e Event) { select { case s.eventsC <- e: case <-s.closeContext.Done(): } } s.events[event.Name] = event go sendEvent(event) // Log all events other than recovered events to prevent the logs from // being flooded. if event.String() != TeleportOKEvent { s.log.WithField("event", event.String()).Debug("Broadcasting event.") } for _, m := range s.eventMappings { if m.matches(event.Name, s.events) { mappedEvent := Event{Name: m.Out} s.events[mappedEvent.Name] = mappedEvent go sendEvent(mappedEvent) s.log.WithFields(logrus.Fields{ "in": event.String(), "out": m.String(), }).Debug("Broadcasting mapped event.")
} } // RegisterEventMapping registers event mapping - // when the sequence in the event mapping triggers, the // outbound event will be generated. func (s *LocalSupervisor) RegisterEventMapping(m EventMapping) { s.Lock() defer s.Unlock() s.eventMappings = append(s.eventMappings, m) } // WaitForEvent waits for event to be broadcasted, if the event // was already broadcasted, eventC will receive current event immediately. func (s *LocalSupervisor) WaitForEvent(ctx context.Context, name string, eventC chan Event) { s.Lock() defer s.Unlock() waiter := &waiter{eventC: eventC, context: ctx} event, ok := s.events[name] if ok { go waiter.notify(event) return } s.eventWaiters[name] = append(s.eventWaiters[name], waiter) } func (s *LocalSupervisor) getWaiters(name string) []*waiter { s.Lock() defer s.Unlock() waiters := s.eventWaiters[name] out := make([]*waiter, len(waiters)) copy(out, waiters) return out } func (s *LocalSupervisor) fanOut() { for { select { case event := <-s.eventsC: waiters := s.getWaiters(event.Name) for _, waiter := range waiters { go waiter.notify(event) } case <-s.closeContext.Done(): return } } } type waiter struct { eventC chan Event context context.Context } func (w *waiter) notify(event Event) { select { case w.eventC <- event: case <-w.context.Done(): } } // Service is a running teleport service function type Service interface { // Serve starts the function Serve() error // String returns user-friendly description of service String() string // Name returns service name Name() string // IsCritical returns true if the service is critical // and program can't continue without it IsCritical() bool } // LocalService is a locally defined service type LocalService struct { // Function is a function to call Function Func // ServiceName is a service name ServiceName string // Critical is set to true // when the service is critical and program can't continue // without it Critical bool } // IsCritical returns true if the service is critical // and program can't continue without it func (l *LocalService) IsCritical() bool { return l.Critical } // Serve starts the function func (l *LocalService) Serve() error { return l.Function() } // String returns user-friendly service name func (l *LocalService) String() string { return l.ServiceName } // Name returns unique service name func (l *LocalService) Name() string { return l.ServiceName } // Func is a service function type Func func() error const ( stateCreated = iota stateStarted )
}
test.js
// - -------------------------------------------------------------------- - // // - libs var z = require("../"); var fs = require("fs"); var cwd = __dirname + "/../"; var assert = require("assert"); // - -------------------------------------------------------------------- - // // - Zip describe("Zip",function() { it("zip sync",function() { var file = __dirname + "/test-sync.zip"; z.zip(cwd,file); assert.ok(fs.existsSync(file)); fs.unlinkSync(file); }); it("zip sync empty dir",function() { var dir = __dirname + "/empty"; var file = __dirname + "/test-sync-empty.zip"; z.zip(dir,file); assert.ok(fs.existsSync(file)); fs.unlinkSync(file);
it("zip async",function(done) { var file = __dirname + "/test-async.zip"; z.zip(cwd,file,function(error) { fs.exists(file,function(exists) { assert.ok(exists); fs.unlink(file,function(error) { done(error); }); }); }); }); it("unzip async",function(done) { var file = __dirname + "/test.zip"; z.unzip(file,__dirname,function(error) { var content = __dirname + "/test-zip-content"; fs.exists(content,function(exists) { assert.ok(exists); fs.unlink(content,function(error) { done(error); }); }) }); }); }); // - -------------------------------------------------------------------- - //
});
06-ptp_reject.py
""" ======================================================== 06. Remove epochs based on peak-to-peak (PTP) amplitudes ======================================================== Epochs containing peak-to-peak above the thresholds defined in the 'reject' parameter are removed from the data. This step will drop epochs containing non-biological artifacts but also epochs containing biological artifacts not sufficiently corrected by the ICA or the SSP processing. """ import itertools import logging from typing import Optional import mne from mne.utils import BunchConst from mne.parallel import parallel_func from mne_bids import BIDSPath import config from config import gen_log_kwargs, on_error, failsafe_run logger = logging.getLogger('mne-bids-pipeline') @failsafe_run(on_error=on_error, script_path=__file__) def
(*, cfg, subject, session=None): bids_path = BIDSPath(subject=subject, session=session, task=cfg.task, acquisition=cfg.acq, run=None, recording=cfg.rec, space=cfg.space, suffix='epo', extension='.fif', datatype=cfg.datatype, root=cfg.deriv_root, check=False) infile_processing = cfg.spatial_filter fname_in = bids_path.copy().update(processing=infile_processing) fname_out = bids_path.copy().update(processing='clean') msg = f'Input: {fname_in}, Output: {fname_out}' logger.info(**gen_log_kwargs(message=msg, subject=subject, session=session)) # Get rejection parameters and drop bad epochs epochs = mne.read_epochs(fname_in, preload=True) reject = config.get_reject(epochs=epochs) if cfg.ica_reject is not None: for ch_type, threshold in cfg.ica_reject.items(): if (ch_type in reject and threshold < reject[ch_type]): # This can only ever happen in case of # reject = 'autoreject_global' msg = (f'Adjusting PTP rejection threshold proposed by ' f'autoreject, as it is greater than ica_reject: ' f'{ch_type}: {reject[ch_type]} -> {threshold}') logger.info(**gen_log_kwargs(message=msg, subject=subject, session=session)) reject[ch_type] = threshold msg = f'Using PTP rejection thresholds: {reject}' logger.info(**gen_log_kwargs(message=msg, subject=subject, session=session)) n_epochs_before_reject = len(epochs) epochs.reject_tmin = cfg.reject_tmin epochs.reject_tmax = cfg.reject_tmax epochs.drop_bad(reject=reject) n_epochs_after_reject = len(epochs) if 0 < n_epochs_after_reject < 0.5 * n_epochs_before_reject: msg = ('More than 50% of all epochs rejected. Please check the ' 'rejection thresholds.') logger.warning(**gen_log_kwargs(message=msg, subject=subject, session=session)) elif n_epochs_after_reject == 0: raise RuntimeError('No epochs remaining after peak-to-peak-based ' 'rejection. Cannot continue.') msg = 'Saving cleaned, baseline-corrected epochs …' epochs.apply_baseline(cfg.baseline) epochs.save(fname_out, overwrite=True) def get_config( subject: Optional[str] = None, session: Optional[str] = None ) -> BunchConst: cfg = BunchConst( task=config.get_task(), datatype=config.get_datatype(), acq=config.acq, rec=config.rec, space=config.space, baseline=config.baseline, reject_tmin=config.reject_tmin, reject_tmax=config.reject_tmax, spatial_filter=config.spatial_filter, ica_reject=config.get_ica_reject(), deriv_root=config.get_deriv_root(), decim=config.decim ) return cfg def main(): """Run epochs.""" parallel, run_func, _ = parallel_func(drop_ptp, n_jobs=config.get_n_jobs()) logs = parallel( run_func(cfg=get_config(), subject=subject, session=session) for subject, session in itertools.product(config.get_subjects(), config.get_sessions()) ) config.save_logs(logs) if __name__ == '__main__': main()
drop_ptp
map_phys_mem.rs
use std::process::Command;
#[test] fn check_boot_info() { run_test_binary("check_boot_info"); } #[test] fn access_phys_mem() { run_test_binary("access_phys_mem"); } fn run_test_binary(bin_name: &str) { let mut cmd = Command::new(env!("CARGO")); cmd.current_dir("tests/test_kernels/map_phys_mem"); cmd.arg("run"); cmd.arg("--bin").arg(bin_name); cmd.arg("--target").arg("x86_64-map_phys_mem.json"); cmd.arg("-Zbuild-std=core"); cmd.arg("-Zbuild-std-features=compiler-builtins-mem"); assert!(cmd.status().unwrap().success()); }
mounts_linux_test.go
package container // import "github.com/ellcrys/docker/integration/container" import ( "context" "fmt" "path/filepath" "testing" "github.com/ellcrys/docker/api/types" "github.com/ellcrys/docker/api/types/container" "github.com/ellcrys/docker/api/types/mount" "github.com/ellcrys/docker/api/types/network" "github.com/ellcrys/docker/client" "github.com/ellcrys/docker/internal/test/request" "github.com/ellcrys/docker/pkg/system" "github.com/gotestyourself/gotestyourself/assert" is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" "github.com/gotestyourself/gotestyourself/skip" ) func TestContainerNetworkMountsNoChown(t *testing.T) { // chown only applies to Linux bind mounted volumes; must be same host to verify skip.If(t, testEnv.DaemonInfo.OSType != "linux" || testEnv.IsRemoteDaemon()) defer setupTest(t)() ctx := context.Background() tmpDir := fs.NewDir(t, "network-file-mounts", fs.WithMode(0755), fs.WithFile("nwfile", "network file bind mount", fs.WithMode(0644))) defer tmpDir.Remove() tmpNWFileMount := tmpDir.Join("nwfile") config := container.Config{ Image: "busybox", } hostConfig := container.HostConfig{ Mounts: []mount.Mount{ { Type: "bind", Source: tmpNWFileMount, Target: "/etc/resolv.conf", }, { Type: "bind", Source: tmpNWFileMount, Target: "/etc/hostname", }, { Type: "bind", Source: tmpNWFileMount, Target: "/etc/hosts", }, }, } cli, err := client.NewEnvClient() assert.NilError(t, err) defer cli.Close() ctrCreate, err := cli.ContainerCreate(ctx, &config, &hostConfig, &network.NetworkingConfig{}, "") assert.NilError(t, err) // container will exit immediately because of no tty, but we only need the start sequence to test the condition err = cli.ContainerStart(ctx, ctrCreate.ID, types.ContainerStartOptions{}) assert.NilError(t, err) // Check that host-located bind mount network file did not change ownership when the container was started // Note: If the user specifies a mountpath from the host, we should not be // attempting to chown files outside the daemon's metadata directory // (represented by `daemon.repository` at init time). // This forces users who want to use user namespaces to handle the // ownership needs of any external files mounted as network files // (/etc/resolv.conf, /etc/hosts, /etc/hostname) separately from the // daemon. In all other volume/bind mount situations we have taken this // same line--we don't chown host file content. // See GitHub PR 34224 for details. statT, err := system.Stat(tmpNWFileMount) assert.NilError(t, err) assert.Check(t, is.Equal(uint32(0), statT.UID()), "bind mounted network file should not change ownership from root") } func
(t *testing.T) { skip.If(t, testEnv.DaemonInfo.OSType != "linux" || testEnv.IsRemoteDaemon()) t.Parallel() client := request.NewAPIClient(t) ctx := context.Background() info, err := client.Info(ctx) if err != nil { t.Fatal(err) } for _, test := range []struct { desc string propagation mount.Propagation expected mount.Propagation }{ { desc: "default", propagation: "", expected: mount.PropagationRSlave, }, { desc: "private", propagation: mount.PropagationPrivate, }, { desc: "rprivate", propagation: mount.PropagationRPrivate, }, { desc: "slave", propagation: mount.PropagationSlave, }, { desc: "rslave", propagation: mount.PropagationRSlave, expected: mount.PropagationRSlave, }, { desc: "shared", propagation: mount.PropagationShared, }, { desc: "rshared", propagation: mount.PropagationRShared, expected: mount.PropagationRShared, }, } { t.Run(test.desc, func(t *testing.T) { test := test t.Parallel() propagationSpec := fmt.Sprintf(":%s", test.propagation) if test.propagation == "" { propagationSpec = "" } bindSpecRoot := info.DockerRootDir + ":" + "/foo" + propagationSpec bindSpecSub := filepath.Join(info.DockerRootDir, "containers") + ":/foo" + propagationSpec for name, hc := range map[string]*container.HostConfig{ "bind root": {Binds: []string{bindSpecRoot}}, "bind subpath": {Binds: []string{bindSpecSub}}, "mount root": { Mounts: []mount.Mount{ { Type: mount.TypeBind, Source: info.DockerRootDir, Target: "/foo", BindOptions: &mount.BindOptions{Propagation: test.propagation}, }, }, }, "mount subpath": { Mounts: []mount.Mount{ { Type: mount.TypeBind, Source: filepath.Join(info.DockerRootDir, "containers"), Target: "/foo", BindOptions: &mount.BindOptions{Propagation: test.propagation}, }, }, }, } { t.Run(name, func(t *testing.T) { hc := hc t.Parallel() c, err := client.ContainerCreate(ctx, &container.Config{ Image: "busybox", Cmd: []string{"true"}, }, hc, nil, "") if err != nil { if test.expected != "" { t.Fatal(err) } // expected an error, so this is ok and should not continue return } if test.expected == "" { t.Fatal("expected create to fail") } defer func() { if err := client.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{Force: true}); err != nil { panic(err) } }() inspect, err := client.ContainerInspect(ctx, c.ID) if err != nil { t.Fatal(err) } if len(inspect.Mounts) != 1 { t.Fatalf("unexpected number of mounts: %+v", inspect.Mounts) } m := inspect.Mounts[0] if m.Propagation != test.expected { t.Fatalf("got unexpected propagation mode, expected %q, got: %v", test.expected, m.Propagation) } }) } }) } }
TestMountDaemonRoot
config.py
from configparser import SafeConfigParser import os def
(): config = SafeConfigParser() config_filename = "config_resource.conf" config_filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), config_filename) if os.path.exists(config_filepath) == False: config_filepath = os.path.join(os.getenv("HOME"), ".tests_conf", config_filename) config.read(config_filepath) return config
get_config
socket_debugger_generator.py
import sys from os.path import abspath, dirname, join from os import makedirs import datetime import jinja2 from shutil import copytree ROOT_PATH = abspath(dirname(__file__)) TEMPLATE_PATH = abspath(join(ROOT_PATH, 'templates')) # pair of template name and output file name TEMPLATE_DATA = [ ('socket_debugger.template', 'socket-debugger.html') ] # first argument is script path, following are passed args PROJECT_PATH = sys.argv[1] PROJECT_NAME = PROJECT_PATH.split('/')[-1] if PROJECT_PATH.__contains__('/') \ else PROJECT_PATH.split('\\')[-1] SOCKET_PORT = sys.argv[2] OUTPUT_PATH = abspath(join(PROJECT_PATH, 'vxproj', 'js')) def generate_viewx_socket_debugger(): """ Method that generates socket debugger HTML page for ViewX project instance. """ # Initialize template engine. jinja_env = jinja2.Environment( trim_blocks=True, lstrip_blocks=True, loader=jinja2.FileSystemLoader(TEMPLATE_PATH)) date = datetime.datetime.now().strftime('%d.%m.%Y. %H:%M:%S') for template_name, output_file_name in TEMPLATE_DATA: # Load preview template template = jinja_env.get_template(template_name)
rendered = template.render({ 'date': date, 'project_name': PROJECT_NAME, 'socket_port': SOCKET_PORT }) # create output directory if not already exists makedirs(OUTPUT_PATH, exist_ok=True) # Write rendered content to the file with open(join(OUTPUT_PATH, output_file_name), 'w') as output_file: output_file.write(rendered) return True if generate_viewx_socket_debugger(): print('success') else: print('error')
# render the template
Day11-Problem01.js
"use strict"; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); const readLines_1 = __importDefault(require("../readLines")); const path_1 = __importDefault(require("path")); let seats = []; let lastHash = ''; let rounds = 0; let size = { rows: 0, cols: 0 }; const getAdjacentNum = (row, col) => { let num = 0; const prevRow = row > 0 ? row - 1 : row; const nextRow = row === size.rows - 1 ? row : row + 1; const prevCol = col > 0 ? col - 1 : col; const nextCol = col === size.cols - 1 ? col : col + 1; for (let r = prevRow; r <= nextRow; r++) { for (let c = prevCol; c <= nextCol; c++) { if (r !== row || c !== col) { if (seats[r][c] === '#') { num++; } } } } return num; }; const nextRound = () => { let matrix = []; for (let r = 0; r < size.rows; r++) { const row = []; for (let c = 0; c < size.cols; c++) { const itemType = seats[r][c]; const adjacent = getAdjacentNum(r, c); if (itemType === 'L') { if (adjacent === 0) { // Becomes occupied row.push('#'); }
else if (itemType === '#') { if (adjacent >= 4) { row.push('L'); } else { row.push('#'); } } else { row.push('.'); } } matrix.push(row); } const hash = getHash(matrix); if (lastHash !== hash) { lastHash = hash; rounds++; seats = matrix; return true; } return false; }; const countOccupied = (matrix) => { return matrix.reduce((accum, item) => { const m = item.join(',').match(/#/gm); if (m) { accum += m.length; } return accum; }, 0); }; const getHash = (matrix) => { return matrix.reduce((accum, row) => { accum += row.join(',') + ','; return accum; }, ''); }; const start = async () => { const lines = await readLines_1.default(path_1.default.join(__dirname, 'input.txt')); size.rows = lines.filter((l) => l.trim() !== '').length; lines.forEach((line) => { const row = line.trim().split(''); seats.push(row); }); size.cols = lines[0].length; let working = true; while (working) { working = nextRound(); } return countOccupied(seats); }; exports.default = start;
else { row.push('L'); } }
render_pass.rs
use std::borrow::Borrow; use std::ops::{Range, Deref, DerefMut}; use std::marker::PhantomData; use {buffer, pso}; use {Backend, IndexCount, InstanceCount, VertexCount, VertexOffset}; use queue::{Supports, Graphics}; use super::{ AttachmentClear, ClearValue, CommandBuffer, RawCommandBuffer, Shot, Level, Primary, Secondary, Submittable, Submit }; /// Specifies how commands for the following renderpasses will be recorded. pub enum SubpassContents { /// Contents of the subpass will be inline in the command buffer, /// NOT in secondary command buffers. Inline, /// Contents of the subpass will be in secondary command buffers, and /// the primary command buffer will only contain `execute_command()` calls /// until the subpass or render pass is complete. SecondaryBuffers, } /// This struct contains all methods for all commands submittable during a subpass. /// It is used to implement the identical portions of RenderPassInlineEncoder and SubpassCommandBuffer. /// /// Where methods are undocumented, they are identical to the methods on the `RawCommandBuffer` /// trait with the same names. pub struct RenderSubpassCommon<'a, B: Backend>(pub(crate) &'a mut B::CommandBuffer); impl<'a, B: Backend> RenderSubpassCommon<'a, B> { /// pub fn clear_attachments<T, U>(&mut self, clears: T, rects: U) where T: IntoIterator, T::Item: Borrow<AttachmentClear>, U: IntoIterator, U::Item: Borrow<pso::Rect>, { self.0.clear_attachments(clears, rects) } /// pub fn draw(&mut self, vertices: Range<VertexCount>, instances: Range<InstanceCount>) { self.0.draw(vertices, instances) } /// pub fn draw_indexed(&mut self, indices: Range<IndexCount>, base_vertex: VertexOffset, instances: Range<InstanceCount>) { self.0.draw_indexed(indices, base_vertex, instances) } /// pub fn draw_indirect(&mut self, buffer: &B::Buffer, offset: buffer::Offset, draw_count: u32, stride: u32) { self.0.draw_indirect(buffer, offset, draw_count, stride) } /// pub fn draw_indexed_indirect(&mut self, buffer: &B::Buffer, offset: buffer::Offset, draw_count: u32, stride: u32) { self.0.draw_indexed_indirect(buffer, offset, draw_count, stride) } /// pub fn bind_index_buffer(&mut self, ibv: buffer::IndexBufferView<B>) { self.0.bind_index_buffer(ibv) } /// pub fn bind_vertex_buffers(&mut self, vbs: pso::VertexBufferSet<B>) { self.0.bind_vertex_buffers(vbs); } /// pub fn bind_graphics_pipeline(&mut self, pipeline: &B::GraphicsPipeline) { self.0.bind_graphics_pipeline(pipeline) } /// pub fn bind_graphics_descriptor_sets<T>( &mut self, layout: &B::PipelineLayout, first_set: usize, sets: T, ) where T: IntoIterator, T::Item: Borrow<B::DescriptorSet>, { self.0.bind_graphics_descriptor_sets(layout, first_set, sets) } /// pub fn set_viewports<T>(&mut self, first_viewport: u32, viewports: T) where T: IntoIterator, T::Item: Borrow<pso::Viewport>, { self.0.set_viewports(first_viewport, viewports) } /// pub fn set_scissors<T>(&mut self, first_scissor: u32, scissors: T) where T: IntoIterator, T::Item: Borrow<pso::Rect>, { self.0.set_scissors(first_scissor, scissors) } /// pub fn set_stencil_reference(&mut self, front: pso::StencilValue, back: pso::StencilValue) { self.0.set_stencil_reference(front, back) } /// pub fn set_blend_constants(&mut self, cv: pso::ColorValue) { self.0.set_blend_constants(cv) } /// pub fn push_graphics_constants(&mut self, layout: &B::PipelineLayout, stages: pso::ShaderStageFlags, offset: u32, constants: &[u32]) { self.0.push_graphics_constants(layout, stages, offset, constants); } // TODO: set_line_width // TODO: set_depth_bounds // TODO: set_depth_bias // TODO: set_stencil_compare_mask // TODO: set_stencil_write_mask // TODO: pipeline barrier (postponed) // TODO: begin/end query } /// An object that records commands into a command buffer inline, that is, /// without secondary command buffers. pub struct RenderPassInlineEncoder<'a, B: Backend, L: Level>(pub(crate) Option<RenderSubpassCommon<'a, B>>, PhantomData<L>) where B::CommandBuffer: 'a; impl<'a, B: Backend, L: Level> RenderPassInlineEncoder<'a, B, L> { /// Creates a new `RenderPassInlineEncoder`, starting a new render /// pass in the given `CommandBuffer`. pub fn new<C, T, S: Shot>( cmd_buffer: &'a mut CommandBuffer<B, C, S, L>, render_pass: &B::RenderPass, frame_buffer: &B::Framebuffer, render_area: pso::Rect, clear_values: T, ) -> Self where C: Supports<Graphics>, T: IntoIterator, T::Item: Borrow<ClearValue>, { cmd_buffer.raw.begin_render_pass( render_pass, frame_buffer, render_area, clear_values, SubpassContents::Inline); RenderPassInlineEncoder(Some(RenderSubpassCommon(cmd_buffer.raw)), PhantomData) } /// Start the next subpass. pub fn next_subpass_inline(mut self) -> Self { self.0.as_mut().unwrap().0.next_subpass(SubpassContents::Inline); self } } impl<'a, B: Backend> RenderPassInlineEncoder<'a, B, Primary> { /// Begins recording a new subpass with secondary buffers. pub fn next_subpass_secondary(mut self) -> RenderPassSecondaryEncoder<'a, B> { let buffer = self.0.take().unwrap(); buffer.0.next_subpass(SubpassContents::SecondaryBuffers); RenderPassSecondaryEncoder(Some(buffer.0)) } } impl<'a, B: Backend, L: Level> Deref for RenderPassInlineEncoder<'a, B, L> { type Target = RenderSubpassCommon<'a, B>; fn deref(&self) -> &RenderSubpassCommon<'a, B> { self.0.as_ref().unwrap() } } impl<'a, B: Backend, L: Level> DerefMut for RenderPassInlineEncoder<'a, B, L> { fn deref_mut(&mut self) -> &mut RenderSubpassCommon<'a, B> { self.0.as_mut().unwrap() } } impl<'a, B: Backend, L: Level> Drop for RenderPassInlineEncoder<'a, B, L> { fn drop(&mut self) { if let Some(ref mut b) = self.0 { b.0.end_render_pass(); } } } /// An object that records commands into a command buffer where each command must /// be a call to execute a secondary command buffer. pub struct RenderPassSecondaryEncoder<'a, B: Backend>(pub(crate) Option<&'a mut B::CommandBuffer>) where B::CommandBuffer: 'a; impl<'a, B: Backend> RenderPassSecondaryEncoder<'a, B> { /// Wraps the given `CommandBuffer` in a `RenderPassSecondaryEncoder`, /// starting a new render pass where the actual commands are contained in /// secondary command buffers. pub fn new<C, T, S: Shot>( cmd_buffer: &'a mut CommandBuffer<B, C, S, Primary>, render_pass: &B::RenderPass, frame_buffer: &B::Framebuffer, render_area: pso::Rect, clear_values: T, ) -> Self where C: Supports<Graphics>, T: IntoIterator, T::Item: Borrow<ClearValue>, { cmd_buffer.raw.begin_render_pass( render_pass, frame_buffer, render_area, clear_values, SubpassContents::SecondaryBuffers ); RenderPassSecondaryEncoder(Some(cmd_buffer.raw)) } /// Executes the given commands as a secondary command buffer. pub fn execute_commands<I>(&mut self, submits: I) where I: IntoIterator, I::Item: Submittable<'a, B, Subpass, Secondary>, { let submits = submits.into_iter().collect::<Vec<_>>(); self.0.as_mut().unwrap().execute_commands(submits.into_iter().map(|submit| unsafe { submit.into_buffer() })); } /// Starts a new subpass with inline commands. pub fn next_subpass_inline(mut self) -> RenderPassInlineEncoder<'a, B, Primary> { let buffer = self.0.take().unwrap(); buffer.next_subpass(SubpassContents::Inline); RenderPassInlineEncoder(Some(RenderSubpassCommon(buffer)), PhantomData) } /// Starts a new subpass with secondary command buffers. pub fn next_subpass_secondary(mut self) -> Self { self.0.as_mut().unwrap().next_subpass(SubpassContents::SecondaryBuffers); self } } impl<'a, B: Backend> Drop for RenderPassSecondaryEncoder<'a, B> { fn drop(&mut self) { if let Some(ref mut b) = self.0 { b.end_render_pass(); } } } /// Capability used only for subpass command buffers' Submits. pub enum Subpass { } /// A secondary command buffer recorded entirely within a subpass. pub struct SubpassCommandBuffer<'a, B: Backend, S: Shot>(pub(crate) RenderSubpassCommon<'a, B>, pub(crate) PhantomData<S>); impl<'a, B: Backend, S: Shot> SubpassCommandBuffer<'a, B, S> { /// Wraps the given `CommandBuffer` in a `SubpassCommandBuffer`, starting /// to record a new subpass. pub unsafe fn new(raw: &mut B::CommandBuffer) -> SubpassCommandBuffer<B, S> { SubpassCommandBuffer(RenderSubpassCommon(raw), PhantomData) } /// Finish recording commands to the command buffer. /// /// The command buffer will be consumed and can't be modified further. /// The command pool must be reset to able to re-record commands. pub fn finish(self) -> Submit<B, Subpass, S, Secondary> { Submit::new((self.0).0.clone()) } } impl<'a, B: Backend, S: Shot> Deref for SubpassCommandBuffer<'a, B, S> { type Target = RenderSubpassCommon<'a, B>; fn deref(&self) -> &RenderSubpassCommon<'a, B> { &self.0 } } impl<'a, B: Backend, S: Shot> DerefMut for SubpassCommandBuffer<'a, B, S> { fn deref_mut(&mut self) -> &mut RenderSubpassCommon<'a, B> { &mut self.0 } } impl<'a, B: Backend, S: Shot> Drop for SubpassCommandBuffer<'a, B, S> { fn drop(&mut self)
}
{ (self.0).0.finish(); }
main.go
// Copyright 2015 The go-wtc Authors // This file is part of go-wtc. // // go-wtc is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // go-wtc is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with go-wtc. If not, see <http://www.gnu.org/licenses/>. // rlpdump is a pretty-printer for RLP data. package main import ( "bytes" "encoding/hex" "flag" "fmt" "io" "os" "strings" "github.com/wtc/go-wtc/rlp" ) var ( hexMode = flag.String("hex", "", "dump given hex data") noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably") single = flag.Bool("single", false, "print only the first element, discard the rest") ) func init() { flag.Usage = func() { fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "[-noascii] [-hex <data>] [filename]") flag.PrintDefaults() fmt.Fprintln(os.Stderr, ` Dumps RLP data from the given file in readable form. If the filename is omitted, data is read from stdin.`) } } func main() { flag.Parse() var r io.Reader switch { case *hexMode != "": data, err := hex.DecodeString(*hexMode) if err != nil { die(err) } r = bytes.NewReader(data) case flag.NArg() == 0: r = os.Stdin case flag.NArg() == 1: fd, err := os.Open(flag.Arg(0)) if err != nil { die(err) } defer fd.Close() r = fd default: fmt.Fprintln(os.Stderr, "Error: too many arguments") flag.Usage() os.Exit(2) } s := rlp.NewStream(r, 0) for { if err := dump(s, 0); err != nil { if err != io.EOF { die(err) } break } fmt.Println() if *single { break } } } func dump(s *rlp.Stream, depth int) error { kind, size, err := s.Kind() if err != nil { return err } switch kind {
case rlp.Byte, rlp.String: str, err := s.Bytes() if err != nil { return err } if len(str) == 0 || !*noASCII && isASCII(str) { fmt.Printf("%s%q", ws(depth), str) } else { fmt.Printf("%s%x", ws(depth), str) } case rlp.List: s.List() defer s.ListEnd() if size == 0 { fmt.Print(ws(depth) + "[]") } else { fmt.Println(ws(depth) + "[") for i := 0; ; i++ { if i > 0 { fmt.Print(",\n") } if err := dump(s, depth+1); err == rlp.EOL { break } else if err != nil { return err } } fmt.Print(ws(depth) + "]") } } return nil } func isASCII(b []byte) bool { for _, c := range b { if c < 32 || c > 126 { return false } } return true } func ws(n int) string { return strings.Repeat(" ", n) } func die(args ...interface{}) { fmt.Fprintln(os.Stderr, args...) os.Exit(1) }
weather__openweathermap__pyowm.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash'
import pyowm owm = pyowm.OWM(API_KEY) observation = owm.weather_at_place(place) w = observation.get_weather() temperature = w.get_temperature('celsius')['temp'] status = w.get_status() print('Температура: {} °C'.format(temperature)) print('Небо: {}'.format(status))
API_KEY = '87c7712a9b72646a269102230858837b' place = 'Магнитогорск' # pip install pyowm
main.py
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- ''' @File : main.py @Author : guoliang.wgl @version : 1.0 @Description: smart_fan案例 - 智能控制小风扇 board.json - 硬件资源配置文件 ''' from fan import Fan from aht21b import AHT21B from driver import PWM, I2C import time from aliyunIoT import Device # iot组件是连接阿里云物联网平台的组件 import json # 物联网平台连接标志位 iot_connected = False wlan = None # 三元组信息 productKey = "产品密钥" deviceName = "设备名称" deviceSecret = "设备密钥" # 物联网设备实例 device = None # Wi-Fi SSID和Password设置 wifiSsid = "请输入您的路由器名称" wifiPassword = "请输入您的路由器密码" # 警报开关以及时间段控制(大于等于alarm_start 或者小于等于alarm_end ) gear1_temp = 22 gear2_temp = 27 gear3_temp = 32 FLAG_CUR_TEMP = "cur_temp" FLAG_GEAR1 = "gear1" FLAG_GEAR2 = "gear2" FLAG_GEAR3 = "gear3" cur_gear = 0 # 等待Wi-Fi成功连接到路由器 def get_wifi_status(): global wlan wifi_connected = False wlan.active(True) #激活界面 wlan.scan() #扫描接入点 #print("start to connect ", wifiSsid) # 连接到指定的路由器(路由器名称为wifiSsid, 密码为:wifiPassword) wlan.connect(wifiSsid, wifiPassword) while True: wifi_connected = wlan.isconnected() # 获取Wi-Fi连接路由器的状态信息 if wifi_connected: # Wi-Fi连接成功则退出while循环 break else: time.sleep(0.5) print("wifi_connected:", wifi_connected) ifconfig = wlan.ifconfig() #获取接口的IP/netmask/gw/DNS地址 print(ifconfig) time.sleep(0.5) # 物联网平台连接成功的回调函数 def on_connect(data): global iot_connected iot_connected = True # 设置props 事件接收函数(当云平台向设备下发属性时) def on_props(request): global FLAG_GEAR1, FLAG_GEAR2, FLAG_GEAR3, gear1_temp, gear2_temp, gear3_temp try: props = eval(request['params']) if FLAG_GEAR1 in props.keys(): gear1_temp = props[FLAG_GEAR1] print('on_props: name is {},value is {}'.format( FLAG_GEAR1, gear1_temp)) elif FLAG_GEAR2 in props.keys(): gear2_temp = props[FLAG_GEAR2] print('on_props: name is {},value is {}'.format( FLAG_GEAR2, gear2_temp)) elif FLAG_GEAR3 in props.keys(): gear3_temp = props[FLAG_GEAR3] print('on_props: name is {},value is {}'.format( FLAG_GEAR3, gear3_temp)) post_default_value() except Exception as e: print(e) def post_props(data): global device if isinstance(data, dict): data = {'params': json.dumps(data)} ret = device.postProps(data) return ret def connect_lk(productKey, deviceName, deviceSecret): global device, iot_connected key_info = { 'region': 'cn-shanghai', 'productKey': productKey, 'deviceName': deviceName, 'deviceSecret': deviceSecret, 'keepaliveSec': 60 } # 将三元组信息设置到iot组件中 device = Device() # 设定连接到物联网平台的回调函数,如果连接物联网平台成功,则调用on_connect函数 device.on(Device.ON_CONNECT, on_connect) # 配置收到云端属性控制指令的回调函数 # 如果收到物联网平台发送的属性控制消息,则调用on_props函数 device.on(Device.ON_PROPS, on_props) # 启动连接阿里云物联网平台过程 device.connect(key_info) # 等待设备成功连接到物联网平台 while True: if iot_connected: print('物联网平台连接成功') break else: print('sleep for 1 s') time.sleep(1) time.sleep(2) def post_default_value(): global FLAG_GEAR1, FLAG_GEAR2, FLAG_GEAR3, gear1_temp, gear2_temp, gear3_temp value = {FLAG_GEAR1: gear1_temp} post_props(value) value = {FLAG_GEAR2: gear2_temp} post_props(value) value = {FLAG_GEAR3: gear3_temp} post_props(value) def upload_temp(temp): value = {FLAG_CUR_TEMP: temp} post_props(value) if __name__ == '__main__': wlan = network.WLAN(network.STA_IF) #创建WLAN对象 # 请替换物联网平台申请到的产品和设备信息 # global productKey, deviceName, deviceSecret ,on_request, on_play get_wifi_status() connect_lk(productKey, deviceName, deviceSecret) post_default_value() # 初始化风扇控制pwm pwmObj = PWM() pwmObj.open("fan") fan = Fan(pwmObj) fan.control(0) # 初始化温度传感器 i2c = I2C() i2c.open('aht21b') aht = AHT21B(i2c) while True: temp = aht.getTemperature() print('cur temp is {}'.format(temp)) upload_temp(temp) if temp <= gear1_temp and cur_gear != 0: cur_gear = 0 fan.control(cur_gear) print('fan change to gear {}'.format(cur_gear)) elif temp > gear1_temp and temp <= gear2_temp and cur_gear != 1: cur_gear = 1 fan.control(cur_gear) print('fan change to gear {}'.format(cur_gear)) elif temp > gear2_temp and temp <= gear3_temp and cur_gear != 2: cur_gear = 2 fan.control(cur_gear) print('fan change to gear {}'.format(cur_gear)) elif temp > gear3_temp and cur_gear != 3: cur_gear = 3 fan.control(cur_gear) print('fan change to gear {}'.format(cur_gear))