id
int32
0
167k
repo
stringlengths
5
54
path
stringlengths
4
155
func_name
stringlengths
1
118
original_string
stringlengths
52
85.5k
language
stringclasses
1 value
code
stringlengths
52
85.5k
code_tokens
sequencelengths
21
1.41k
docstring
stringlengths
6
2.61k
docstring_tokens
sequencelengths
3
215
sha
stringlengths
40
40
url
stringlengths
85
252
161,600
keybase/client
go/stellar/remote/remote.go
Post
func Post(mctx libkb.MetaContext, clearBundle stellar1.Bundle) (err error) { defer mctx.TraceTimed("Stellar.Post", func() error { return err })() err = clearBundle.CheckInvariants() if err != nil { return err } pukGen, pukSeed, err := getLatestPuk(mctx.Ctx(), mctx.G()) if err != nil { return err } boxed, err := bundle.BoxAndEncode(&clearBundle, pukGen, pukSeed) if err != nil { return err } payload := make(libkb.JSONPayload) section := make(libkb.JSONPayload) section["encrypted_parent"] = boxed.EncParentB64 section["visible_parent"] = boxed.VisParentB64 section["version_parent"] = boxed.FormatVersionParent section["account_bundles"] = boxed.AcctBundles payload["stellar"] = section _, err = mctx.G().API.PostJSON(mctx, libkb.APIArg{ Endpoint: "stellar/acctbundle", SessionType: libkb.APISessionTypeREQUIRED, JSONPayload: payload, }) return err }
go
func Post(mctx libkb.MetaContext, clearBundle stellar1.Bundle) (err error) { defer mctx.TraceTimed("Stellar.Post", func() error { return err })() err = clearBundle.CheckInvariants() if err != nil { return err } pukGen, pukSeed, err := getLatestPuk(mctx.Ctx(), mctx.G()) if err != nil { return err } boxed, err := bundle.BoxAndEncode(&clearBundle, pukGen, pukSeed) if err != nil { return err } payload := make(libkb.JSONPayload) section := make(libkb.JSONPayload) section["encrypted_parent"] = boxed.EncParentB64 section["visible_parent"] = boxed.VisParentB64 section["version_parent"] = boxed.FormatVersionParent section["account_bundles"] = boxed.AcctBundles payload["stellar"] = section _, err = mctx.G().API.PostJSON(mctx, libkb.APIArg{ Endpoint: "stellar/acctbundle", SessionType: libkb.APISessionTypeREQUIRED, JSONPayload: payload, }) return err }
[ "func", "Post", "(", "mctx", "libkb", ".", "MetaContext", ",", "clearBundle", "stellar1", ".", "Bundle", ")", "(", "err", "error", ")", "{", "defer", "mctx", ".", "TraceTimed", "(", "\"", "\"", ",", "func", "(", ")", "error", "{", "return", "err", "}", ")", "(", ")", "\n\n", "err", "=", "clearBundle", ".", "CheckInvariants", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "pukGen", ",", "pukSeed", ",", "err", ":=", "getLatestPuk", "(", "mctx", ".", "Ctx", "(", ")", ",", "mctx", ".", "G", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "boxed", ",", "err", ":=", "bundle", ".", "BoxAndEncode", "(", "&", "clearBundle", ",", "pukGen", ",", "pukSeed", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "payload", ":=", "make", "(", "libkb", ".", "JSONPayload", ")", "\n", "section", ":=", "make", "(", "libkb", ".", "JSONPayload", ")", "\n", "section", "[", "\"", "\"", "]", "=", "boxed", ".", "EncParentB64", "\n", "section", "[", "\"", "\"", "]", "=", "boxed", ".", "VisParentB64", "\n", "section", "[", "\"", "\"", "]", "=", "boxed", ".", "FormatVersionParent", "\n", "section", "[", "\"", "\"", "]", "=", "boxed", ".", "AcctBundles", "\n", "payload", "[", "\"", "\"", "]", "=", "section", "\n", "_", ",", "err", "=", "mctx", ".", "G", "(", ")", ".", "API", ".", "PostJSON", "(", "mctx", ",", "libkb", ".", "APIArg", "{", "Endpoint", ":", "\"", "\"", ",", "SessionType", ":", "libkb", ".", "APISessionTypeREQUIRED", ",", "JSONPayload", ":", "payload", ",", "}", ")", "\n", "return", "err", "\n", "}" ]
// Post a bundle to the server.
[ "Post", "a", "bundle", "to", "the", "server", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/remote/remote.go#L146-L175
161,601
keybase/client
go/stellar/remote/remote.go
FetchAccountBundle
func FetchAccountBundle(mctx libkb.MetaContext, accountID stellar1.AccountID) (bundle *stellar1.Bundle, err error) { defer mctx.TraceTimed("Stellar.FetchAccountBundle", func() error { return err })() bundle, _, _, _, err = fetchBundleForAccount(mctx, &accountID) return bundle, err }
go
func FetchAccountBundle(mctx libkb.MetaContext, accountID stellar1.AccountID) (bundle *stellar1.Bundle, err error) { defer mctx.TraceTimed("Stellar.FetchAccountBundle", func() error { return err })() bundle, _, _, _, err = fetchBundleForAccount(mctx, &accountID) return bundle, err }
[ "func", "FetchAccountBundle", "(", "mctx", "libkb", ".", "MetaContext", ",", "accountID", "stellar1", ".", "AccountID", ")", "(", "bundle", "*", "stellar1", ".", "Bundle", ",", "err", "error", ")", "{", "defer", "mctx", ".", "TraceTimed", "(", "\"", "\"", ",", "func", "(", ")", "error", "{", "return", "err", "}", ")", "(", ")", "\n\n", "bundle", ",", "_", ",", "_", ",", "_", ",", "err", "=", "fetchBundleForAccount", "(", "mctx", ",", "&", "accountID", ")", "\n", "return", "bundle", ",", "err", "\n", "}" ]
// FetchAccountBundle gets a bundle from the server with all of the accounts // in it, but it will only have the secrets for the specified accountID. // This method will bubble up an error if it's called by a Desktop device for // an account that is mobile only. If you don't need the secrets, use // FetchSecretlessBundle instead.
[ "FetchAccountBundle", "gets", "a", "bundle", "from", "the", "server", "with", "all", "of", "the", "accounts", "in", "it", "but", "it", "will", "only", "have", "the", "secrets", "for", "the", "specified", "accountID", ".", "This", "method", "will", "bubble", "up", "an", "error", "if", "it", "s", "called", "by", "a", "Desktop", "device", "for", "an", "account", "that", "is", "mobile", "only", ".", "If", "you", "don", "t", "need", "the", "secrets", "use", "FetchSecretlessBundle", "instead", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/remote/remote.go#L217-L222
161,602
keybase/client
go/stellar/remote/remote.go
FetchBundleWithGens
func FetchBundleWithGens(mctx libkb.MetaContext) (b *stellar1.Bundle, pukGen keybase1.PerUserKeyGeneration, accountGens bundle.AccountPukGens, err error) { defer mctx.TraceTimed("Stellar.FetchBundleWithGens", func() error { return err })() b, _, pukGen, _, err = fetchBundleForAccount(mctx, nil) // this bundle no account secrets if err != nil { return nil, 0, bundle.AccountPukGens{}, err } accountGens = make(bundle.AccountPukGens) newAccBundles := make(map[stellar1.AccountID]stellar1.AccountBundle) for _, acct := range b.Accounts { singleBundle, _, _, singleAccountGens, err := fetchBundleForAccount(mctx, &acct.AccountID) if err != nil { // expected errors include SCStellarDeviceNotMobile, SCStellarMobileOnlyPurgatory mctx.Debug("unable to pull secrets for account %v which is not necessarily a problem %v", acct.AccountID, err) continue } accBundle := singleBundle.AccountBundles[acct.AccountID] newAccBundles[acct.AccountID] = accBundle accountGens[acct.AccountID] = singleAccountGens[acct.AccountID] } b.AccountBundles = newAccBundles err = b.CheckInvariants() if err != nil { return nil, 0, bundle.AccountPukGens{}, err } return b, pukGen, accountGens, nil }
go
func FetchBundleWithGens(mctx libkb.MetaContext) (b *stellar1.Bundle, pukGen keybase1.PerUserKeyGeneration, accountGens bundle.AccountPukGens, err error) { defer mctx.TraceTimed("Stellar.FetchBundleWithGens", func() error { return err })() b, _, pukGen, _, err = fetchBundleForAccount(mctx, nil) // this bundle no account secrets if err != nil { return nil, 0, bundle.AccountPukGens{}, err } accountGens = make(bundle.AccountPukGens) newAccBundles := make(map[stellar1.AccountID]stellar1.AccountBundle) for _, acct := range b.Accounts { singleBundle, _, _, singleAccountGens, err := fetchBundleForAccount(mctx, &acct.AccountID) if err != nil { // expected errors include SCStellarDeviceNotMobile, SCStellarMobileOnlyPurgatory mctx.Debug("unable to pull secrets for account %v which is not necessarily a problem %v", acct.AccountID, err) continue } accBundle := singleBundle.AccountBundles[acct.AccountID] newAccBundles[acct.AccountID] = accBundle accountGens[acct.AccountID] = singleAccountGens[acct.AccountID] } b.AccountBundles = newAccBundles err = b.CheckInvariants() if err != nil { return nil, 0, bundle.AccountPukGens{}, err } return b, pukGen, accountGens, nil }
[ "func", "FetchBundleWithGens", "(", "mctx", "libkb", ".", "MetaContext", ")", "(", "b", "*", "stellar1", ".", "Bundle", ",", "pukGen", "keybase1", ".", "PerUserKeyGeneration", ",", "accountGens", "bundle", ".", "AccountPukGens", ",", "err", "error", ")", "{", "defer", "mctx", ".", "TraceTimed", "(", "\"", "\"", ",", "func", "(", ")", "error", "{", "return", "err", "}", ")", "(", ")", "\n\n", "b", ",", "_", ",", "pukGen", ",", "_", ",", "err", "=", "fetchBundleForAccount", "(", "mctx", ",", "nil", ")", "// this bundle no account secrets", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "0", ",", "bundle", ".", "AccountPukGens", "{", "}", ",", "err", "\n", "}", "\n", "accountGens", "=", "make", "(", "bundle", ".", "AccountPukGens", ")", "\n", "newAccBundles", ":=", "make", "(", "map", "[", "stellar1", ".", "AccountID", "]", "stellar1", ".", "AccountBundle", ")", "\n", "for", "_", ",", "acct", ":=", "range", "b", ".", "Accounts", "{", "singleBundle", ",", "_", ",", "_", ",", "singleAccountGens", ",", "err", ":=", "fetchBundleForAccount", "(", "mctx", ",", "&", "acct", ".", "AccountID", ")", "\n", "if", "err", "!=", "nil", "{", "// expected errors include SCStellarDeviceNotMobile, SCStellarMobileOnlyPurgatory", "mctx", ".", "Debug", "(", "\"", "\"", ",", "acct", ".", "AccountID", ",", "err", ")", "\n", "continue", "\n", "}", "\n", "accBundle", ":=", "singleBundle", ".", "AccountBundles", "[", "acct", ".", "AccountID", "]", "\n", "newAccBundles", "[", "acct", ".", "AccountID", "]", "=", "accBundle", "\n", "accountGens", "[", "acct", ".", "AccountID", "]", "=", "singleAccountGens", "[", "acct", ".", "AccountID", "]", "\n", "}", "\n", "b", ".", "AccountBundles", "=", "newAccBundles", "\n", "err", "=", "b", ".", "CheckInvariants", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "0", ",", "bundle", ".", "AccountPukGens", "{", "}", ",", "err", "\n", "}", "\n\n", "return", "b", ",", "pukGen", ",", "accountGens", ",", "nil", "\n", "}" ]
// FetchBundleWithGens gets a bundle with all of the secrets in it to which this device // has access, i.e. if there are no mobile-only accounts, then this bundle will have // all of the secrets. Also returned is a map of accountID->pukGen. Entries are only in the // map for accounts with secrets in the bundle. Inaccessible accounts will be in the // visible part of the parent bundle but not in the AccountBundle secrets nor in the // AccountPukGens map. FetchBundleWithGens is only for very specific usecases. // FetchAccountBundle and FetchSecretlessBundle are the preferred ways to pull a bundle.
[ "FetchBundleWithGens", "gets", "a", "bundle", "with", "all", "of", "the", "secrets", "in", "it", "to", "which", "this", "device", "has", "access", "i", ".", "e", ".", "if", "there", "are", "no", "mobile", "-", "only", "accounts", "then", "this", "bundle", "will", "have", "all", "of", "the", "secrets", ".", "Also", "returned", "is", "a", "map", "of", "accountID", "-", ">", "pukGen", ".", "Entries", "are", "only", "in", "the", "map", "for", "accounts", "with", "secrets", "in", "the", "bundle", ".", "Inaccessible", "accounts", "will", "be", "in", "the", "visible", "part", "of", "the", "parent", "bundle", "but", "not", "in", "the", "AccountBundle", "secrets", "nor", "in", "the", "AccountPukGens", "map", ".", "FetchBundleWithGens", "is", "only", "for", "very", "specific", "usecases", ".", "FetchAccountBundle", "and", "FetchSecretlessBundle", "are", "the", "preferred", "ways", "to", "pull", "a", "bundle", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/remote/remote.go#L231-L258
161,603
keybase/client
go/stellar/remote/remote.go
SetAccountMobileOnly
func SetAccountMobileOnly(ctx context.Context, g *libkb.GlobalContext, accountID stellar1.AccountID) error { mctx := libkb.NewMetaContext(ctx, g) b, err := FetchAccountBundle(mctx, accountID) if err != nil { return err } err = bundle.MakeMobileOnly(b, accountID) if err == bundle.ErrNoChangeNecessary { g.Log.CDebugf(ctx, "SetAccountMobileOnly account %s is already mobile-only", accountID) return nil } if err != nil { return err } nextBundle := bundle.AdvanceAccounts(*b, []stellar1.AccountID{accountID}) if err := Post(mctx, nextBundle); err != nil { mctx.Debug("SetAccountMobileOnly Post error: %s", err) return err } return nil }
go
func SetAccountMobileOnly(ctx context.Context, g *libkb.GlobalContext, accountID stellar1.AccountID) error { mctx := libkb.NewMetaContext(ctx, g) b, err := FetchAccountBundle(mctx, accountID) if err != nil { return err } err = bundle.MakeMobileOnly(b, accountID) if err == bundle.ErrNoChangeNecessary { g.Log.CDebugf(ctx, "SetAccountMobileOnly account %s is already mobile-only", accountID) return nil } if err != nil { return err } nextBundle := bundle.AdvanceAccounts(*b, []stellar1.AccountID{accountID}) if err := Post(mctx, nextBundle); err != nil { mctx.Debug("SetAccountMobileOnly Post error: %s", err) return err } return nil }
[ "func", "SetAccountMobileOnly", "(", "ctx", "context", ".", "Context", ",", "g", "*", "libkb", ".", "GlobalContext", ",", "accountID", "stellar1", ".", "AccountID", ")", "error", "{", "mctx", ":=", "libkb", ".", "NewMetaContext", "(", "ctx", ",", "g", ")", "\n", "b", ",", "err", ":=", "FetchAccountBundle", "(", "mctx", ",", "accountID", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "err", "=", "bundle", ".", "MakeMobileOnly", "(", "b", ",", "accountID", ")", "\n", "if", "err", "==", "bundle", ".", "ErrNoChangeNecessary", "{", "g", ".", "Log", ".", "CDebugf", "(", "ctx", ",", "\"", "\"", ",", "accountID", ")", "\n", "return", "nil", "\n", "}", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "nextBundle", ":=", "bundle", ".", "AdvanceAccounts", "(", "*", "b", ",", "[", "]", "stellar1", ".", "AccountID", "{", "accountID", "}", ")", "\n", "if", "err", ":=", "Post", "(", "mctx", ",", "nextBundle", ")", ";", "err", "!=", "nil", "{", "mctx", ".", "Debug", "(", "\"", "\"", ",", "err", ")", "\n", "return", "err", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// SetAccountMobileOnly will fetch the account bundle and flip the mobile-only switch, // then send the new account bundle revision to the server.
[ "SetAccountMobileOnly", "will", "fetch", "the", "account", "bundle", "and", "flip", "the", "mobile", "-", "only", "switch", "then", "send", "the", "new", "account", "bundle", "revision", "to", "the", "server", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/remote/remote.go#L804-L825
161,604
keybase/client
go/kbfs/kbfstool/main.go
realMain
func realMain() (exitStatus int) { kbCtx := env.NewContext() kbfsParams := libkbfs.AddFlags(flag.CommandLine, kbCtx) flag.Parse() if *version { fmt.Printf("%s\n", libkbfs.VersionString()) return 0 } if len(flag.Args()) < 1 { fmt.Print(getUsageString(kbCtx)) return 1 } log := logger.New("") tempDir, err := ioutil.TempDir(kbCtx.GetDataDir(), "kbfstool") if err != nil { panic(err.Error()) } defer func() { rmErr := os.RemoveAll(tempDir) if rmErr != nil { fmt.Fprintf(os.Stderr, "Error cleaning storage dir %s: %+v\n", tempDir, rmErr) } }() // Turn these off, and use a temp dir for the storage root, to not // interfere with a running kbfs daemon. kbfsParams.EnableJournal = false kbfsParams.DiskCacheMode = libkbfs.DiskCacheModeOff kbfsParams.Mode = libkbfs.InitSingleOpString kbfsParams.StorageRoot = tempDir ctx := context.Background() config, err := libkbfs.Init(ctx, kbCtx, *kbfsParams, nil, nil, log) if err != nil { printError("kbfs", err) return 1 } defer libkbfs.Shutdown() // TODO: Make the logging level WARNING instead of INFO, or // figure out some other way to log the full folder-branch // name for kbfsfuse but not for kbfs. cmd := flag.Arg(0) args := flag.Args()[1:] switch cmd { case "stat": return stat(ctx, config, args) case "ls": return ls(ctx, config, args) case "mkdir": return mkdir(ctx, config, args) case "read": return read(ctx, config, args) case "write": return write(ctx, config, args) case "md": return mdMain(ctx, config, args) case "git": return gitMain(ctx, config, args) default: printError("kbfs", fmt.Errorf("unknown command %q", cmd)) return 1 } }
go
func realMain() (exitStatus int) { kbCtx := env.NewContext() kbfsParams := libkbfs.AddFlags(flag.CommandLine, kbCtx) flag.Parse() if *version { fmt.Printf("%s\n", libkbfs.VersionString()) return 0 } if len(flag.Args()) < 1 { fmt.Print(getUsageString(kbCtx)) return 1 } log := logger.New("") tempDir, err := ioutil.TempDir(kbCtx.GetDataDir(), "kbfstool") if err != nil { panic(err.Error()) } defer func() { rmErr := os.RemoveAll(tempDir) if rmErr != nil { fmt.Fprintf(os.Stderr, "Error cleaning storage dir %s: %+v\n", tempDir, rmErr) } }() // Turn these off, and use a temp dir for the storage root, to not // interfere with a running kbfs daemon. kbfsParams.EnableJournal = false kbfsParams.DiskCacheMode = libkbfs.DiskCacheModeOff kbfsParams.Mode = libkbfs.InitSingleOpString kbfsParams.StorageRoot = tempDir ctx := context.Background() config, err := libkbfs.Init(ctx, kbCtx, *kbfsParams, nil, nil, log) if err != nil { printError("kbfs", err) return 1 } defer libkbfs.Shutdown() // TODO: Make the logging level WARNING instead of INFO, or // figure out some other way to log the full folder-branch // name for kbfsfuse but not for kbfs. cmd := flag.Arg(0) args := flag.Args()[1:] switch cmd { case "stat": return stat(ctx, config, args) case "ls": return ls(ctx, config, args) case "mkdir": return mkdir(ctx, config, args) case "read": return read(ctx, config, args) case "write": return write(ctx, config, args) case "md": return mdMain(ctx, config, args) case "git": return gitMain(ctx, config, args) default: printError("kbfs", fmt.Errorf("unknown command %q", cmd)) return 1 } }
[ "func", "realMain", "(", ")", "(", "exitStatus", "int", ")", "{", "kbCtx", ":=", "env", ".", "NewContext", "(", ")", "\n", "kbfsParams", ":=", "libkbfs", ".", "AddFlags", "(", "flag", ".", "CommandLine", ",", "kbCtx", ")", "\n\n", "flag", ".", "Parse", "(", ")", "\n\n", "if", "*", "version", "{", "fmt", ".", "Printf", "(", "\"", "\\n", "\"", ",", "libkbfs", ".", "VersionString", "(", ")", ")", "\n", "return", "0", "\n", "}", "\n\n", "if", "len", "(", "flag", ".", "Args", "(", ")", ")", "<", "1", "{", "fmt", ".", "Print", "(", "getUsageString", "(", "kbCtx", ")", ")", "\n", "return", "1", "\n", "}", "\n\n", "log", ":=", "logger", ".", "New", "(", "\"", "\"", ")", "\n\n", "tempDir", ",", "err", ":=", "ioutil", ".", "TempDir", "(", "kbCtx", ".", "GetDataDir", "(", ")", ",", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "panic", "(", "err", ".", "Error", "(", ")", ")", "\n", "}", "\n", "defer", "func", "(", ")", "{", "rmErr", ":=", "os", ".", "RemoveAll", "(", "tempDir", ")", "\n", "if", "rmErr", "!=", "nil", "{", "fmt", ".", "Fprintf", "(", "os", ".", "Stderr", ",", "\"", "\\n", "\"", ",", "tempDir", ",", "rmErr", ")", "\n", "}", "\n", "}", "(", ")", "\n\n", "// Turn these off, and use a temp dir for the storage root, to not", "// interfere with a running kbfs daemon.", "kbfsParams", ".", "EnableJournal", "=", "false", "\n", "kbfsParams", ".", "DiskCacheMode", "=", "libkbfs", ".", "DiskCacheModeOff", "\n", "kbfsParams", ".", "Mode", "=", "libkbfs", ".", "InitSingleOpString", "\n", "kbfsParams", ".", "StorageRoot", "=", "tempDir", "\n\n", "ctx", ":=", "context", ".", "Background", "(", ")", "\n", "config", ",", "err", ":=", "libkbfs", ".", "Init", "(", "ctx", ",", "kbCtx", ",", "*", "kbfsParams", ",", "nil", ",", "nil", ",", "log", ")", "\n", "if", "err", "!=", "nil", "{", "printError", "(", "\"", "\"", ",", "err", ")", "\n", "return", "1", "\n", "}", "\n\n", "defer", "libkbfs", ".", "Shutdown", "(", ")", "\n\n", "// TODO: Make the logging level WARNING instead of INFO, or", "// figure out some other way to log the full folder-branch", "// name for kbfsfuse but not for kbfs.", "cmd", ":=", "flag", ".", "Arg", "(", "0", ")", "\n", "args", ":=", "flag", ".", "Args", "(", ")", "[", "1", ":", "]", "\n\n", "switch", "cmd", "{", "case", "\"", "\"", ":", "return", "stat", "(", "ctx", ",", "config", ",", "args", ")", "\n", "case", "\"", "\"", ":", "return", "ls", "(", "ctx", ",", "config", ",", "args", ")", "\n", "case", "\"", "\"", ":", "return", "mkdir", "(", "ctx", ",", "config", ",", "args", ")", "\n", "case", "\"", "\"", ":", "return", "read", "(", "ctx", ",", "config", ",", "args", ")", "\n", "case", "\"", "\"", ":", "return", "write", "(", "ctx", ",", "config", ",", "args", ")", "\n", "case", "\"", "\"", ":", "return", "mdMain", "(", "ctx", ",", "config", ",", "args", ")", "\n", "case", "\"", "\"", ":", "return", "gitMain", "(", "ctx", ",", "config", ",", "args", ")", "\n", "default", ":", "printError", "(", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "cmd", ")", ")", "\n", "return", "1", "\n", "}", "\n", "}" ]
// Define this so deferred functions get executed before exit.
[ "Define", "this", "so", "deferred", "functions", "get", "executed", "before", "exit", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfstool/main.go#L58-L130
161,605
keybase/client
go/kbfs/data/path.go
TailName
func (p Path) TailName() string { return p.Path[len(p.Path)-1].Name }
go
func (p Path) TailName() string { return p.Path[len(p.Path)-1].Name }
[ "func", "(", "p", "Path", ")", "TailName", "(", ")", "string", "{", "return", "p", ".", "Path", "[", "len", "(", "p", ".", "Path", ")", "-", "1", "]", ".", "Name", "\n", "}" ]
// TailName returns the name of the final node in the Path. Must be // called with a valid path.
[ "TailName", "returns", "the", "name", "of", "the", "final", "node", "in", "the", "Path", ".", "Must", "be", "called", "with", "a", "valid", "path", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L61-L63
161,606
keybase/client
go/kbfs/data/path.go
TailPointer
func (p Path) TailPointer() BlockPointer { return p.Path[len(p.Path)-1].BlockPointer }
go
func (p Path) TailPointer() BlockPointer { return p.Path[len(p.Path)-1].BlockPointer }
[ "func", "(", "p", "Path", ")", "TailPointer", "(", ")", "BlockPointer", "{", "return", "p", ".", "Path", "[", "len", "(", "p", ".", "Path", ")", "-", "1", "]", ".", "BlockPointer", "\n", "}" ]
// TailPointer returns the BlockPointer of the final node in the Path. // Must be called with a valid path.
[ "TailPointer", "returns", "the", "BlockPointer", "of", "the", "final", "node", "in", "the", "Path", ".", "Must", "be", "called", "with", "a", "valid", "path", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L67-L69
161,607
keybase/client
go/kbfs/data/path.go
TailRef
func (p Path) TailRef() BlockRef { return p.Path[len(p.Path)-1].Ref() }
go
func (p Path) TailRef() BlockRef { return p.Path[len(p.Path)-1].Ref() }
[ "func", "(", "p", "Path", ")", "TailRef", "(", ")", "BlockRef", "{", "return", "p", ".", "Path", "[", "len", "(", "p", ".", "Path", ")", "-", "1", "]", ".", "Ref", "(", ")", "\n", "}" ]
// TailRef returns the BlockRef of the final node in the Path. Must // be called with a valid path.
[ "TailRef", "returns", "the", "BlockRef", "of", "the", "final", "node", "in", "the", "Path", ".", "Must", "be", "called", "with", "a", "valid", "path", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L73-L75
161,608
keybase/client
go/kbfs/data/path.go
DebugString
func (p Path) DebugString() string { debugNames := make([]string, 0, len(p.Path)) for _, node := range p.Path { debugNames = append(debugNames, node.DebugString()) } return fmt.Sprintf("%s:%s", p.FolderBranch, strings.Join(debugNames, "/")) }
go
func (p Path) DebugString() string { debugNames := make([]string, 0, len(p.Path)) for _, node := range p.Path { debugNames = append(debugNames, node.DebugString()) } return fmt.Sprintf("%s:%s", p.FolderBranch, strings.Join(debugNames, "/")) }
[ "func", "(", "p", "Path", ")", "DebugString", "(", ")", "string", "{", "debugNames", ":=", "make", "(", "[", "]", "string", ",", "0", ",", "len", "(", "p", ".", "Path", ")", ")", "\n", "for", "_", ",", "node", ":=", "range", "p", ".", "Path", "{", "debugNames", "=", "append", "(", "debugNames", ",", "node", ".", "DebugString", "(", ")", ")", "\n", "}", "\n", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "p", ".", "FolderBranch", ",", "strings", ".", "Join", "(", "debugNames", ",", "\"", "\"", ")", ")", "\n", "}" ]
// DebugString returns a string representation of the path with all // branch and pointer information.
[ "DebugString", "returns", "a", "string", "representation", "of", "the", "path", "with", "all", "branch", "and", "pointer", "information", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L79-L85
161,609
keybase/client
go/kbfs/data/path.go
String
func (p Path) String() string { names := make([]string, 0, len(p.Path)) for _, node := range p.Path { names = append(names, node.Name) } return strings.Join(names, "/") }
go
func (p Path) String() string { names := make([]string, 0, len(p.Path)) for _, node := range p.Path { names = append(names, node.Name) } return strings.Join(names, "/") }
[ "func", "(", "p", "Path", ")", "String", "(", ")", "string", "{", "names", ":=", "make", "(", "[", "]", "string", ",", "0", ",", "len", "(", "p", ".", "Path", ")", ")", "\n", "for", "_", ",", "node", ":=", "range", "p", ".", "Path", "{", "names", "=", "append", "(", "names", ",", "node", ".", "Name", ")", "\n", "}", "\n", "return", "strings", ".", "Join", "(", "names", ",", "\"", "\"", ")", "\n", "}" ]
// String implements the fmt.Stringer interface for Path.
[ "String", "implements", "the", "fmt", ".", "Stringer", "interface", "for", "Path", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L88-L94
161,610
keybase/client
go/kbfs/data/path.go
ParentPath
func (p Path) ParentPath() *Path { return &Path{p.FolderBranch, p.Path[:len(p.Path)-1]} }
go
func (p Path) ParentPath() *Path { return &Path{p.FolderBranch, p.Path[:len(p.Path)-1]} }
[ "func", "(", "p", "Path", ")", "ParentPath", "(", ")", "*", "Path", "{", "return", "&", "Path", "{", "p", ".", "FolderBranch", ",", "p", ".", "Path", "[", ":", "len", "(", "p", ".", "Path", ")", "-", "1", "]", "}", "\n", "}" ]
// ParentPath returns a new Path representing the parent subdirectory // of this Path. Must be called with a valid path. Should not be // called with a path of only a single node, as that would produce an // invalid path.
[ "ParentPath", "returns", "a", "new", "Path", "representing", "the", "parent", "subdirectory", "of", "this", "Path", ".", "Must", "be", "called", "with", "a", "valid", "path", ".", "Should", "not", "be", "called", "with", "a", "path", "of", "only", "a", "single", "node", "as", "that", "would", "produce", "an", "invalid", "path", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L109-L111
161,611
keybase/client
go/kbfs/data/path.go
ChildPath
func (p Path) ChildPath(name string, ptr BlockPointer) Path { child := Path{ FolderBranch: p.FolderBranch, Path: make([]PathNode, len(p.Path), len(p.Path)+1), } copy(child.Path, p.Path) child.Path = append(child.Path, PathNode{Name: name, BlockPointer: ptr}) return child }
go
func (p Path) ChildPath(name string, ptr BlockPointer) Path { child := Path{ FolderBranch: p.FolderBranch, Path: make([]PathNode, len(p.Path), len(p.Path)+1), } copy(child.Path, p.Path) child.Path = append(child.Path, PathNode{Name: name, BlockPointer: ptr}) return child }
[ "func", "(", "p", "Path", ")", "ChildPath", "(", "name", "string", ",", "ptr", "BlockPointer", ")", "Path", "{", "child", ":=", "Path", "{", "FolderBranch", ":", "p", ".", "FolderBranch", ",", "Path", ":", "make", "(", "[", "]", "PathNode", ",", "len", "(", "p", ".", "Path", ")", ",", "len", "(", "p", ".", "Path", ")", "+", "1", ")", ",", "}", "\n", "copy", "(", "child", ".", "Path", ",", "p", ".", "Path", ")", "\n", "child", ".", "Path", "=", "append", "(", "child", ".", "Path", ",", "PathNode", "{", "Name", ":", "name", ",", "BlockPointer", ":", "ptr", "}", ")", "\n", "return", "child", "\n", "}" ]
// ChildPath returns a new Path with the addition of a new entry // with the given name and BlockPointer.
[ "ChildPath", "returns", "a", "new", "Path", "with", "the", "addition", "of", "a", "new", "entry", "with", "the", "given", "name", "and", "BlockPointer", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L115-L123
161,612
keybase/client
go/kbfs/data/path.go
ChildPathNoPtr
func (p Path) ChildPathNoPtr(name string) Path { return p.ChildPath(name, BlockPointer{}) }
go
func (p Path) ChildPathNoPtr(name string) Path { return p.ChildPath(name, BlockPointer{}) }
[ "func", "(", "p", "Path", ")", "ChildPathNoPtr", "(", "name", "string", ")", "Path", "{", "return", "p", ".", "ChildPath", "(", "name", ",", "BlockPointer", "{", "}", ")", "\n", "}" ]
// ChildPathNoPtr returns a new Path with the addition of a new entry // with the given name. That final PathNode will have no BlockPointer.
[ "ChildPathNoPtr", "returns", "a", "new", "Path", "with", "the", "addition", "of", "a", "new", "entry", "with", "the", "given", "name", ".", "That", "final", "PathNode", "will", "have", "no", "BlockPointer", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L127-L129
161,613
keybase/client
go/kbfs/data/path.go
DebugString
func (n PathNode) DebugString() string { return fmt.Sprintf("%s(ptr=%s)", n.Name, n.BlockPointer) }
go
func (n PathNode) DebugString() string { return fmt.Sprintf("%s(ptr=%s)", n.Name, n.BlockPointer) }
[ "func", "(", "n", "PathNode", ")", "DebugString", "(", ")", "string", "{", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "n", ".", "Name", ",", "n", ".", "BlockPointer", ")", "\n", "}" ]
// DebugString returns a string representation of the node with all // pointer information.
[ "DebugString", "returns", "a", "string", "representation", "of", "the", "node", "with", "all", "pointer", "information", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/path.go#L145-L147
161,614
keybase/client
go/libkb/locktab.go
AcquireOnName
func (t *LockTable) AcquireOnName(ctx context.Context, g VLogContext, s string) (ret *NamedLock) { g.GetVDebugLog().CLogf(ctx, VLog1, "+ LockTable.Lock(%s)", s) t.Lock() t.init() if ret = t.locks[s]; ret == nil { ret = &NamedLock{lctx: g, refs: 0, name: s, parent: t} t.locks[s] = ret } ret.incref() t.Unlock() ret.Lock() g.GetVDebugLog().CLogf(ctx, VLog1, "- LockTable.Lock(%s)", s) return ret }
go
func (t *LockTable) AcquireOnName(ctx context.Context, g VLogContext, s string) (ret *NamedLock) { g.GetVDebugLog().CLogf(ctx, VLog1, "+ LockTable.Lock(%s)", s) t.Lock() t.init() if ret = t.locks[s]; ret == nil { ret = &NamedLock{lctx: g, refs: 0, name: s, parent: t} t.locks[s] = ret } ret.incref() t.Unlock() ret.Lock() g.GetVDebugLog().CLogf(ctx, VLog1, "- LockTable.Lock(%s)", s) return ret }
[ "func", "(", "t", "*", "LockTable", ")", "AcquireOnName", "(", "ctx", "context", ".", "Context", ",", "g", "VLogContext", ",", "s", "string", ")", "(", "ret", "*", "NamedLock", ")", "{", "g", ".", "GetVDebugLog", "(", ")", ".", "CLogf", "(", "ctx", ",", "VLog1", ",", "\"", "\"", ",", "s", ")", "\n", "t", ".", "Lock", "(", ")", "\n", "t", ".", "init", "(", ")", "\n", "if", "ret", "=", "t", ".", "locks", "[", "s", "]", ";", "ret", "==", "nil", "{", "ret", "=", "&", "NamedLock", "{", "lctx", ":", "g", ",", "refs", ":", "0", ",", "name", ":", "s", ",", "parent", ":", "t", "}", "\n", "t", ".", "locks", "[", "s", "]", "=", "ret", "\n", "}", "\n", "ret", ".", "incref", "(", ")", "\n", "t", ".", "Unlock", "(", ")", "\n", "ret", ".", "Lock", "(", ")", "\n", "g", ".", "GetVDebugLog", "(", ")", ".", "CLogf", "(", "ctx", ",", "VLog1", ",", "\"", "\"", ",", "s", ")", "\n", "return", "ret", "\n", "}" ]
// AcquireOnName acquires s's lock. // Never gives up.
[ "AcquireOnName", "acquires", "s", "s", "lock", ".", "Never", "gives", "up", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/locktab.go#L55-L68
161,615
keybase/client
go/kbfs/libfuse/file.go
Attr
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) { ctx = f.folder.fs.config.MaybeStartTrace( ctx, "File.Attr", f.node.GetBasename()) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File Attr") defer func() { err = f.folder.processError(ctx, libkbfs.ReadMode, err) }() if reqID, ok := ctx.Value(CtxIDKey).(string); ok { if ei := f.eiCache.getAndDestroyIfMatches(reqID); ei != nil { return f.fillAttrWithMode(ctx, ei, a) } } // This fits in situation 1 as described in libkbfs/delayed_cancellation.go err = libcontext.EnableDelayedCancellationWithGracePeriod( ctx, f.folder.fs.config.DelayedCancellationGracePeriod()) if err != nil { return err } return f.attr(ctx, a) }
go
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) { ctx = f.folder.fs.config.MaybeStartTrace( ctx, "File.Attr", f.node.GetBasename()) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File Attr") defer func() { err = f.folder.processError(ctx, libkbfs.ReadMode, err) }() if reqID, ok := ctx.Value(CtxIDKey).(string); ok { if ei := f.eiCache.getAndDestroyIfMatches(reqID); ei != nil { return f.fillAttrWithMode(ctx, ei, a) } } // This fits in situation 1 as described in libkbfs/delayed_cancellation.go err = libcontext.EnableDelayedCancellationWithGracePeriod( ctx, f.folder.fs.config.DelayedCancellationGracePeriod()) if err != nil { return err } return f.attr(ctx, a) }
[ "func", "(", "f", "*", "File", ")", "Attr", "(", "ctx", "context", ".", "Context", ",", "a", "*", "fuse", ".", "Attr", ")", "(", "err", "error", ")", "{", "ctx", "=", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeStartTrace", "(", "ctx", ",", "\"", "\"", ",", "f", ".", "node", ".", "GetBasename", "(", ")", ")", "\n", "defer", "func", "(", ")", "{", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeFinishTrace", "(", "ctx", ",", "err", ")", "}", "(", ")", "\n\n", "f", ".", "folder", ".", "fs", ".", "vlog", ".", "CLogf", "(", "ctx", ",", "libkb", ".", "VLog1", ",", "\"", "\"", ")", "\n", "defer", "func", "(", ")", "{", "err", "=", "f", ".", "folder", ".", "processError", "(", "ctx", ",", "libkbfs", ".", "ReadMode", ",", "err", ")", "}", "(", ")", "\n\n", "if", "reqID", ",", "ok", ":=", "ctx", ".", "Value", "(", "CtxIDKey", ")", ".", "(", "string", ")", ";", "ok", "{", "if", "ei", ":=", "f", ".", "eiCache", ".", "getAndDestroyIfMatches", "(", "reqID", ")", ";", "ei", "!=", "nil", "{", "return", "f", ".", "fillAttrWithMode", "(", "ctx", ",", "ei", ",", "a", ")", "\n", "}", "\n", "}", "\n\n", "// This fits in situation 1 as described in libkbfs/delayed_cancellation.go", "err", "=", "libcontext", ".", "EnableDelayedCancellationWithGracePeriod", "(", "ctx", ",", "f", ".", "folder", ".", "fs", ".", "config", ".", "DelayedCancellationGracePeriod", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "return", "f", ".", "attr", "(", "ctx", ",", "a", ")", "\n", "}" ]
// Attr implements the fs.Node interface for File.
[ "Attr", "implements", "the", "fs", ".", "Node", "interface", "for", "File", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/file.go#L89-L111
161,616
keybase/client
go/kbfs/libfuse/file.go
Fsync
func (f *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) { ctx = f.folder.fs.config.MaybeStartTrace( ctx, "File.Fsync", f.node.GetBasename()) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File Fsync") defer func() { err = f.folder.processError(ctx, libkbfs.WriteMode, err) }() // This fits in situation 1 as described in libkbfs/delayed_cancellation.go err = libcontext.EnableDelayedCancellationWithGracePeriod( ctx, f.folder.fs.config.DelayedCancellationGracePeriod()) if err != nil { return err } return f.sync(ctx) }
go
func (f *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) { ctx = f.folder.fs.config.MaybeStartTrace( ctx, "File.Fsync", f.node.GetBasename()) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File Fsync") defer func() { err = f.folder.processError(ctx, libkbfs.WriteMode, err) }() // This fits in situation 1 as described in libkbfs/delayed_cancellation.go err = libcontext.EnableDelayedCancellationWithGracePeriod( ctx, f.folder.fs.config.DelayedCancellationGracePeriod()) if err != nil { return err } return f.sync(ctx) }
[ "func", "(", "f", "*", "File", ")", "Fsync", "(", "ctx", "context", ".", "Context", ",", "req", "*", "fuse", ".", "FsyncRequest", ")", "(", "err", "error", ")", "{", "ctx", "=", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeStartTrace", "(", "ctx", ",", "\"", "\"", ",", "f", ".", "node", ".", "GetBasename", "(", ")", ")", "\n", "defer", "func", "(", ")", "{", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeFinishTrace", "(", "ctx", ",", "err", ")", "}", "(", ")", "\n\n", "f", ".", "folder", ".", "fs", ".", "vlog", ".", "CLogf", "(", "ctx", ",", "libkb", ".", "VLog1", ",", "\"", "\"", ")", "\n", "defer", "func", "(", ")", "{", "err", "=", "f", ".", "folder", ".", "processError", "(", "ctx", ",", "libkbfs", ".", "WriteMode", ",", "err", ")", "}", "(", ")", "\n\n", "// This fits in situation 1 as described in libkbfs/delayed_cancellation.go", "err", "=", "libcontext", ".", "EnableDelayedCancellationWithGracePeriod", "(", "ctx", ",", "f", ".", "folder", ".", "fs", ".", "config", ".", "DelayedCancellationGracePeriod", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "return", "f", ".", "sync", "(", "ctx", ")", "\n", "}" ]
// Fsync implements the fs.NodeFsyncer interface for File.
[ "Fsync", "implements", "the", "fs", ".", "NodeFsyncer", "interface", "for", "File", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/file.go#L194-L210
161,617
keybase/client
go/kbfs/libfuse/file.go
Read
func (f *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) { off := req.Offset sz := cap(resp.Data) ctx = f.folder.fs.config.MaybeStartTrace(ctx, "File.Read", fmt.Sprintf("%s off=%d sz=%d", f.node.GetBasename(), off, sz)) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File Read off=%d sz=%d", off, sz) defer func() { err = f.folder.processError(ctx, libkbfs.ReadMode, err) }() n, err := f.folder.fs.config.KBFSOps().Read( ctx, f.node, resp.Data[:sz], off) if err != nil { return err } resp.Data = resp.Data[:n] return nil }
go
func (f *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) { off := req.Offset sz := cap(resp.Data) ctx = f.folder.fs.config.MaybeStartTrace(ctx, "File.Read", fmt.Sprintf("%s off=%d sz=%d", f.node.GetBasename(), off, sz)) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File Read off=%d sz=%d", off, sz) defer func() { err = f.folder.processError(ctx, libkbfs.ReadMode, err) }() n, err := f.folder.fs.config.KBFSOps().Read( ctx, f.node, resp.Data[:sz], off) if err != nil { return err } resp.Data = resp.Data[:n] return nil }
[ "func", "(", "f", "*", "File", ")", "Read", "(", "ctx", "context", ".", "Context", ",", "req", "*", "fuse", ".", "ReadRequest", ",", "resp", "*", "fuse", ".", "ReadResponse", ")", "(", "err", "error", ")", "{", "off", ":=", "req", ".", "Offset", "\n", "sz", ":=", "cap", "(", "resp", ".", "Data", ")", "\n", "ctx", "=", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeStartTrace", "(", "ctx", ",", "\"", "\"", ",", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "f", ".", "node", ".", "GetBasename", "(", ")", ",", "off", ",", "sz", ")", ")", "\n", "defer", "func", "(", ")", "{", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeFinishTrace", "(", "ctx", ",", "err", ")", "}", "(", ")", "\n\n", "f", ".", "folder", ".", "fs", ".", "vlog", ".", "CLogf", "(", "ctx", ",", "libkb", ".", "VLog1", ",", "\"", "\"", ",", "off", ",", "sz", ")", "\n", "defer", "func", "(", ")", "{", "err", "=", "f", ".", "folder", ".", "processError", "(", "ctx", ",", "libkbfs", ".", "ReadMode", ",", "err", ")", "}", "(", ")", "\n\n", "n", ",", "err", ":=", "f", ".", "folder", ".", "fs", ".", "config", ".", "KBFSOps", "(", ")", ".", "Read", "(", "ctx", ",", "f", ".", "node", ",", "resp", ".", "Data", "[", ":", "sz", "]", ",", "off", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "resp", ".", "Data", "=", "resp", ".", "Data", "[", ":", "n", "]", "\n", "return", "nil", "\n", "}" ]
// Read implements the fs.HandleReader interface for File.
[ "Read", "implements", "the", "fs", ".", "HandleReader", "interface", "for", "File", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/file.go#L217-L235
161,618
keybase/client
go/kbfs/libfuse/file.go
Write
func (f *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { sz := len(req.Data) ctx = f.folder.fs.config.MaybeStartTrace(ctx, "File.Write", fmt.Sprintf("%s sz=%d", f.node.GetBasename(), sz)) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File Write sz=%d ", sz) defer func() { err = f.folder.processError(ctx, libkbfs.WriteMode, err) }() f.eiCache.destroy() if err := f.folder.fs.config.KBFSOps().Write( ctx, f.node, req.Data, req.Offset); err != nil { return err } resp.Size = len(req.Data) return nil }
go
func (f *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { sz := len(req.Data) ctx = f.folder.fs.config.MaybeStartTrace(ctx, "File.Write", fmt.Sprintf("%s sz=%d", f.node.GetBasename(), sz)) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File Write sz=%d ", sz) defer func() { err = f.folder.processError(ctx, libkbfs.WriteMode, err) }() f.eiCache.destroy() if err := f.folder.fs.config.KBFSOps().Write( ctx, f.node, req.Data, req.Offset); err != nil { return err } resp.Size = len(req.Data) return nil }
[ "func", "(", "f", "*", "File", ")", "Write", "(", "ctx", "context", ".", "Context", ",", "req", "*", "fuse", ".", "WriteRequest", ",", "resp", "*", "fuse", ".", "WriteResponse", ")", "(", "err", "error", ")", "{", "sz", ":=", "len", "(", "req", ".", "Data", ")", "\n", "ctx", "=", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeStartTrace", "(", "ctx", ",", "\"", "\"", ",", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "f", ".", "node", ".", "GetBasename", "(", ")", ",", "sz", ")", ")", "\n", "defer", "func", "(", ")", "{", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeFinishTrace", "(", "ctx", ",", "err", ")", "}", "(", ")", "\n\n", "f", ".", "folder", ".", "fs", ".", "vlog", ".", "CLogf", "(", "ctx", ",", "libkb", ".", "VLog1", ",", "\"", "\"", ",", "sz", ")", "\n", "defer", "func", "(", ")", "{", "err", "=", "f", ".", "folder", ".", "processError", "(", "ctx", ",", "libkbfs", ".", "WriteMode", ",", "err", ")", "}", "(", ")", "\n\n", "f", ".", "eiCache", ".", "destroy", "(", ")", "\n", "if", "err", ":=", "f", ".", "folder", ".", "fs", ".", "config", ".", "KBFSOps", "(", ")", ".", "Write", "(", "ctx", ",", "f", ".", "node", ",", "req", ".", "Data", ",", "req", ".", "Offset", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "resp", ".", "Size", "=", "len", "(", "req", ".", "Data", ")", "\n", "return", "nil", "\n", "}" ]
// Write implements the fs.HandleWriter interface for File.
[ "Write", "implements", "the", "fs", ".", "HandleWriter", "interface", "for", "File", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/file.go#L240-L257
161,619
keybase/client
go/kbfs/libfuse/file.go
Setattr
func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) { valid := req.Valid ctx = f.folder.fs.config.MaybeStartTrace(ctx, "File.SetAttr", fmt.Sprintf("%s %s", f.node.GetBasename(), valid)) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File SetAttr %s", valid) defer func() { err = f.folder.processError(ctx, libkbfs.WriteMode, err) }() f.eiCache.destroy() if valid.Size() { if err := f.folder.fs.config.KBFSOps().Truncate( ctx, f.node, req.Size); err != nil { return err } valid &^= fuse.SetattrSize } if valid.Mode() { // Unix has 3 exec bits, KBFS has one; we follow the user-exec bit. exec := req.Mode&0100 != 0 err := f.folder.fs.config.KBFSOps().SetEx( ctx, f.node, exec) if err != nil { return err } valid &^= fuse.SetattrMode } if valid.Mtime() { err := f.folder.fs.config.KBFSOps().SetMtime( ctx, f.node, &req.Mtime) if err != nil { return err } valid &^= fuse.SetattrMtime | fuse.SetattrMtimeNow } if valid.Uid() || valid.Gid() { // You can't set the UID/GID on KBFS files, but we don't want // to return ENOSYS because that causes scary warnings on some // programs like mv. Instead ignore it, print a debug // message, and advertise this behavior on the // "understand_kbfs" doc online. f.folder.fs.vlog.CLogf( ctx, libkb.VLog1, "Ignoring unsupported attempt to set "+ "the UID/GID on a file") valid &^= fuse.SetattrUid | fuse.SetattrGid } // KBFS has no concept of persistent atime; explicitly don't handle it valid &^= fuse.SetattrAtime | fuse.SetattrAtimeNow // things we don't need to explicitly handle valid &^= fuse.SetattrLockOwner | fuse.SetattrHandle // KBFS has no concept of chflags(2); explicitly ignore those valid &^= fuse.SetattrFlags if valid != 0 { // don't let an unhandled operation slip by without error f.folder.fs.log.CInfof(ctx, "Setattr did not handle %v", valid) return fuse.ENOSYS } return f.attr(ctx, &resp.Attr) }
go
func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) { valid := req.Valid ctx = f.folder.fs.config.MaybeStartTrace(ctx, "File.SetAttr", fmt.Sprintf("%s %s", f.node.GetBasename(), valid)) defer func() { f.folder.fs.config.MaybeFinishTrace(ctx, err) }() f.folder.fs.vlog.CLogf(ctx, libkb.VLog1, "File SetAttr %s", valid) defer func() { err = f.folder.processError(ctx, libkbfs.WriteMode, err) }() f.eiCache.destroy() if valid.Size() { if err := f.folder.fs.config.KBFSOps().Truncate( ctx, f.node, req.Size); err != nil { return err } valid &^= fuse.SetattrSize } if valid.Mode() { // Unix has 3 exec bits, KBFS has one; we follow the user-exec bit. exec := req.Mode&0100 != 0 err := f.folder.fs.config.KBFSOps().SetEx( ctx, f.node, exec) if err != nil { return err } valid &^= fuse.SetattrMode } if valid.Mtime() { err := f.folder.fs.config.KBFSOps().SetMtime( ctx, f.node, &req.Mtime) if err != nil { return err } valid &^= fuse.SetattrMtime | fuse.SetattrMtimeNow } if valid.Uid() || valid.Gid() { // You can't set the UID/GID on KBFS files, but we don't want // to return ENOSYS because that causes scary warnings on some // programs like mv. Instead ignore it, print a debug // message, and advertise this behavior on the // "understand_kbfs" doc online. f.folder.fs.vlog.CLogf( ctx, libkb.VLog1, "Ignoring unsupported attempt to set "+ "the UID/GID on a file") valid &^= fuse.SetattrUid | fuse.SetattrGid } // KBFS has no concept of persistent atime; explicitly don't handle it valid &^= fuse.SetattrAtime | fuse.SetattrAtimeNow // things we don't need to explicitly handle valid &^= fuse.SetattrLockOwner | fuse.SetattrHandle // KBFS has no concept of chflags(2); explicitly ignore those valid &^= fuse.SetattrFlags if valid != 0 { // don't let an unhandled operation slip by without error f.folder.fs.log.CInfof(ctx, "Setattr did not handle %v", valid) return fuse.ENOSYS } return f.attr(ctx, &resp.Attr) }
[ "func", "(", "f", "*", "File", ")", "Setattr", "(", "ctx", "context", ".", "Context", ",", "req", "*", "fuse", ".", "SetattrRequest", ",", "resp", "*", "fuse", ".", "SetattrResponse", ")", "(", "err", "error", ")", "{", "valid", ":=", "req", ".", "Valid", "\n", "ctx", "=", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeStartTrace", "(", "ctx", ",", "\"", "\"", ",", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "f", ".", "node", ".", "GetBasename", "(", ")", ",", "valid", ")", ")", "\n", "defer", "func", "(", ")", "{", "f", ".", "folder", ".", "fs", ".", "config", ".", "MaybeFinishTrace", "(", "ctx", ",", "err", ")", "}", "(", ")", "\n\n", "f", ".", "folder", ".", "fs", ".", "vlog", ".", "CLogf", "(", "ctx", ",", "libkb", ".", "VLog1", ",", "\"", "\"", ",", "valid", ")", "\n", "defer", "func", "(", ")", "{", "err", "=", "f", ".", "folder", ".", "processError", "(", "ctx", ",", "libkbfs", ".", "WriteMode", ",", "err", ")", "}", "(", ")", "\n\n", "f", ".", "eiCache", ".", "destroy", "(", ")", "\n\n", "if", "valid", ".", "Size", "(", ")", "{", "if", "err", ":=", "f", ".", "folder", ".", "fs", ".", "config", ".", "KBFSOps", "(", ")", ".", "Truncate", "(", "ctx", ",", "f", ".", "node", ",", "req", ".", "Size", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "valid", "&^=", "fuse", ".", "SetattrSize", "\n", "}", "\n\n", "if", "valid", ".", "Mode", "(", ")", "{", "// Unix has 3 exec bits, KBFS has one; we follow the user-exec bit.", "exec", ":=", "req", ".", "Mode", "&", "0100", "!=", "0", "\n", "err", ":=", "f", ".", "folder", ".", "fs", ".", "config", ".", "KBFSOps", "(", ")", ".", "SetEx", "(", "ctx", ",", "f", ".", "node", ",", "exec", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "valid", "&^=", "fuse", ".", "SetattrMode", "\n", "}", "\n\n", "if", "valid", ".", "Mtime", "(", ")", "{", "err", ":=", "f", ".", "folder", ".", "fs", ".", "config", ".", "KBFSOps", "(", ")", ".", "SetMtime", "(", "ctx", ",", "f", ".", "node", ",", "&", "req", ".", "Mtime", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "valid", "&^=", "fuse", ".", "SetattrMtime", "|", "fuse", ".", "SetattrMtimeNow", "\n", "}", "\n\n", "if", "valid", ".", "Uid", "(", ")", "||", "valid", ".", "Gid", "(", ")", "{", "// You can't set the UID/GID on KBFS files, but we don't want", "// to return ENOSYS because that causes scary warnings on some", "// programs like mv. Instead ignore it, print a debug", "// message, and advertise this behavior on the", "// \"understand_kbfs\" doc online.", "f", ".", "folder", ".", "fs", ".", "vlog", ".", "CLogf", "(", "ctx", ",", "libkb", ".", "VLog1", ",", "\"", "\"", "+", "\"", "\"", ")", "\n", "valid", "&^=", "fuse", ".", "SetattrUid", "|", "fuse", ".", "SetattrGid", "\n", "}", "\n\n", "// KBFS has no concept of persistent atime; explicitly don't handle it", "valid", "&^=", "fuse", ".", "SetattrAtime", "|", "fuse", ".", "SetattrAtimeNow", "\n\n", "// things we don't need to explicitly handle", "valid", "&^=", "fuse", ".", "SetattrLockOwner", "|", "fuse", ".", "SetattrHandle", "\n\n", "// KBFS has no concept of chflags(2); explicitly ignore those", "valid", "&^=", "fuse", ".", "SetattrFlags", "\n\n", "if", "valid", "!=", "0", "{", "// don't let an unhandled operation slip by without error", "f", ".", "folder", ".", "fs", ".", "log", ".", "CInfof", "(", "ctx", ",", "\"", "\"", ",", "valid", ")", "\n", "return", "fuse", ".", "ENOSYS", "\n", "}", "\n\n", "return", "f", ".", "attr", "(", "ctx", ",", "&", "resp", ".", "Attr", ")", "\n", "}" ]
// Setattr implements the fs.NodeSetattrer interface for File.
[ "Setattr", "implements", "the", "fs", ".", "NodeSetattrer", "interface", "for", "File", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/file.go#L262-L330
161,620
keybase/client
go/service/sigs.go
NewSigsHandler
func NewSigsHandler(xp rpc.Transporter, g *libkb.GlobalContext) *SigsHandler { return &SigsHandler{ BaseHandler: NewBaseHandler(g, xp), Contextified: libkb.NewContextified(g), } }
go
func NewSigsHandler(xp rpc.Transporter, g *libkb.GlobalContext) *SigsHandler { return &SigsHandler{ BaseHandler: NewBaseHandler(g, xp), Contextified: libkb.NewContextified(g), } }
[ "func", "NewSigsHandler", "(", "xp", "rpc", ".", "Transporter", ",", "g", "*", "libkb", ".", "GlobalContext", ")", "*", "SigsHandler", "{", "return", "&", "SigsHandler", "{", "BaseHandler", ":", "NewBaseHandler", "(", "g", ",", "xp", ")", ",", "Contextified", ":", "libkb", ".", "NewContextified", "(", "g", ")", ",", "}", "\n", "}" ]
// NewSigsHandler creates a SigsHandler for the xp transport.
[ "NewSigsHandler", "creates", "a", "SigsHandler", "for", "the", "xp", "transport", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/service/sigs.go#L21-L26
161,621
keybase/client
go/kbfs/libkbfs/disk_md_cache.go
Get
func (cache *DiskMDCacheLocal) Get( ctx context.Context, tlfID tlf.ID) ( buf []byte, ver kbfsmd.MetadataVer, timestamp time.Time, err error) { cache.lock.RLock() defer cache.lock.RUnlock() err = cache.checkCacheLocked(ctx, "MD(Get)") if err != nil { return nil, -1, time.Time{}, err } if _, ok := cache.tlfsCached[tlfID]; !ok { cache.missMeter.Mark(1) return nil, -1, time.Time{}, errors.WithStack(ldberrors.ErrNotFound) } md, err := cache.getMetadataLocked(tlfID, metered) if err != nil { return nil, -1, time.Time{}, err } return md.Buf, md.Ver, md.Time, nil }
go
func (cache *DiskMDCacheLocal) Get( ctx context.Context, tlfID tlf.ID) ( buf []byte, ver kbfsmd.MetadataVer, timestamp time.Time, err error) { cache.lock.RLock() defer cache.lock.RUnlock() err = cache.checkCacheLocked(ctx, "MD(Get)") if err != nil { return nil, -1, time.Time{}, err } if _, ok := cache.tlfsCached[tlfID]; !ok { cache.missMeter.Mark(1) return nil, -1, time.Time{}, errors.WithStack(ldberrors.ErrNotFound) } md, err := cache.getMetadataLocked(tlfID, metered) if err != nil { return nil, -1, time.Time{}, err } return md.Buf, md.Ver, md.Time, nil }
[ "func", "(", "cache", "*", "DiskMDCacheLocal", ")", "Get", "(", "ctx", "context", ".", "Context", ",", "tlfID", "tlf", ".", "ID", ")", "(", "buf", "[", "]", "byte", ",", "ver", "kbfsmd", ".", "MetadataVer", ",", "timestamp", "time", ".", "Time", ",", "err", "error", ")", "{", "cache", ".", "lock", ".", "RLock", "(", ")", "\n", "defer", "cache", ".", "lock", ".", "RUnlock", "(", ")", "\n", "err", "=", "cache", ".", "checkCacheLocked", "(", "ctx", ",", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "-", "1", ",", "time", ".", "Time", "{", "}", ",", "err", "\n", "}", "\n\n", "if", "_", ",", "ok", ":=", "cache", ".", "tlfsCached", "[", "tlfID", "]", ";", "!", "ok", "{", "cache", ".", "missMeter", ".", "Mark", "(", "1", ")", "\n", "return", "nil", ",", "-", "1", ",", "time", ".", "Time", "{", "}", ",", "errors", ".", "WithStack", "(", "ldberrors", ".", "ErrNotFound", ")", "\n", "}", "\n\n", "md", ",", "err", ":=", "cache", ".", "getMetadataLocked", "(", "tlfID", ",", "metered", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "-", "1", ",", "time", ".", "Time", "{", "}", ",", "err", "\n", "}", "\n", "return", "md", ".", "Buf", ",", "md", ".", "Ver", ",", "md", ".", "Time", ",", "nil", "\n", "}" ]
// Get implements the DiskMDCache interface for DiskMDCacheLocal.
[ "Get", "implements", "the", "DiskMDCache", "interface", "for", "DiskMDCacheLocal", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_md_cache.go#L301-L321
161,622
keybase/client
go/kbfs/libkbfs/disk_md_cache.go
Stage
func (cache *DiskMDCacheLocal) Stage( ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision, buf []byte, ver kbfsmd.MetadataVer, timestamp time.Time) error { cache.lock.Lock() defer cache.lock.Unlock() err := cache.checkCacheLocked(ctx, "MD(Stage)") if err != nil { return err } if cachedRev, ok := cache.tlfsCached[tlfID]; ok && cachedRev >= rev { // Ignore stages for older revisions return nil } md := diskMDBlock{ Buf: buf, Ver: ver, Time: timestamp, Revision: rev, } cache.tlfsStaged[tlfID] = append(cache.tlfsStaged[tlfID], md) return nil }
go
func (cache *DiskMDCacheLocal) Stage( ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision, buf []byte, ver kbfsmd.MetadataVer, timestamp time.Time) error { cache.lock.Lock() defer cache.lock.Unlock() err := cache.checkCacheLocked(ctx, "MD(Stage)") if err != nil { return err } if cachedRev, ok := cache.tlfsCached[tlfID]; ok && cachedRev >= rev { // Ignore stages for older revisions return nil } md := diskMDBlock{ Buf: buf, Ver: ver, Time: timestamp, Revision: rev, } cache.tlfsStaged[tlfID] = append(cache.tlfsStaged[tlfID], md) return nil }
[ "func", "(", "cache", "*", "DiskMDCacheLocal", ")", "Stage", "(", "ctx", "context", ".", "Context", ",", "tlfID", "tlf", ".", "ID", ",", "rev", "kbfsmd", ".", "Revision", ",", "buf", "[", "]", "byte", ",", "ver", "kbfsmd", ".", "MetadataVer", ",", "timestamp", "time", ".", "Time", ")", "error", "{", "cache", ".", "lock", ".", "Lock", "(", ")", "\n", "defer", "cache", ".", "lock", ".", "Unlock", "(", ")", "\n", "err", ":=", "cache", ".", "checkCacheLocked", "(", "ctx", ",", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "if", "cachedRev", ",", "ok", ":=", "cache", ".", "tlfsCached", "[", "tlfID", "]", ";", "ok", "&&", "cachedRev", ">=", "rev", "{", "// Ignore stages for older revisions", "return", "nil", "\n", "}", "\n\n", "md", ":=", "diskMDBlock", "{", "Buf", ":", "buf", ",", "Ver", ":", "ver", ",", "Time", ":", "timestamp", ",", "Revision", ":", "rev", ",", "}", "\n\n", "cache", ".", "tlfsStaged", "[", "tlfID", "]", "=", "append", "(", "cache", ".", "tlfsStaged", "[", "tlfID", "]", ",", "md", ")", "\n", "return", "nil", "\n", "}" ]
// Stage implements the DiskMDCache interface for DiskMDCacheLocal.
[ "Stage", "implements", "the", "DiskMDCache", "interface", "for", "DiskMDCacheLocal", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_md_cache.go#L324-L348
161,623
keybase/client
go/kbfs/libkbfs/disk_md_cache.go
Commit
func (cache *DiskMDCacheLocal) Commit( ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) error { cache.lock.Lock() defer cache.lock.Unlock() err := cache.checkCacheLocked(ctx, "MD(Commit)") if err != nil { return err } stagedMDs := cache.tlfsStaged[tlfID] if len(stagedMDs) == 0 { // Nothing to do. return nil } newStagedMDs := make([]diskMDBlock, 0, len(stagedMDs)-1) foundMD := false // The staged MDs list is unordered, so iterate through the whole // thing to find what should remain after commiting `rev`. for _, md := range stagedMDs { if md.Revision > rev { newStagedMDs = append(newStagedMDs, md) continue } else if md.Revision < rev { continue } else if foundMD { // Duplicate. continue } foundMD = true encodedMetadata, err := cache.config.Codec().Encode(&md) if err != nil { return err } err = cache.headsDb.PutWithMeter( tlfID.Bytes(), encodedMetadata, cache.putMeter) if err != nil { return err } } if !foundMD { // Nothing to do. return nil } cache.tlfsCached[tlfID] = rev if len(newStagedMDs) == 0 { delete(cache.tlfsStaged, tlfID) } else { cache.tlfsStaged[tlfID] = newStagedMDs } return nil }
go
func (cache *DiskMDCacheLocal) Commit( ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) error { cache.lock.Lock() defer cache.lock.Unlock() err := cache.checkCacheLocked(ctx, "MD(Commit)") if err != nil { return err } stagedMDs := cache.tlfsStaged[tlfID] if len(stagedMDs) == 0 { // Nothing to do. return nil } newStagedMDs := make([]diskMDBlock, 0, len(stagedMDs)-1) foundMD := false // The staged MDs list is unordered, so iterate through the whole // thing to find what should remain after commiting `rev`. for _, md := range stagedMDs { if md.Revision > rev { newStagedMDs = append(newStagedMDs, md) continue } else if md.Revision < rev { continue } else if foundMD { // Duplicate. continue } foundMD = true encodedMetadata, err := cache.config.Codec().Encode(&md) if err != nil { return err } err = cache.headsDb.PutWithMeter( tlfID.Bytes(), encodedMetadata, cache.putMeter) if err != nil { return err } } if !foundMD { // Nothing to do. return nil } cache.tlfsCached[tlfID] = rev if len(newStagedMDs) == 0 { delete(cache.tlfsStaged, tlfID) } else { cache.tlfsStaged[tlfID] = newStagedMDs } return nil }
[ "func", "(", "cache", "*", "DiskMDCacheLocal", ")", "Commit", "(", "ctx", "context", ".", "Context", ",", "tlfID", "tlf", ".", "ID", ",", "rev", "kbfsmd", ".", "Revision", ")", "error", "{", "cache", ".", "lock", ".", "Lock", "(", ")", "\n", "defer", "cache", ".", "lock", ".", "Unlock", "(", ")", "\n", "err", ":=", "cache", ".", "checkCacheLocked", "(", "ctx", ",", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "stagedMDs", ":=", "cache", ".", "tlfsStaged", "[", "tlfID", "]", "\n", "if", "len", "(", "stagedMDs", ")", "==", "0", "{", "// Nothing to do.", "return", "nil", "\n", "}", "\n", "newStagedMDs", ":=", "make", "(", "[", "]", "diskMDBlock", ",", "0", ",", "len", "(", "stagedMDs", ")", "-", "1", ")", "\n", "foundMD", ":=", "false", "\n", "// The staged MDs list is unordered, so iterate through the whole", "// thing to find what should remain after commiting `rev`.", "for", "_", ",", "md", ":=", "range", "stagedMDs", "{", "if", "md", ".", "Revision", ">", "rev", "{", "newStagedMDs", "=", "append", "(", "newStagedMDs", ",", "md", ")", "\n", "continue", "\n", "}", "else", "if", "md", ".", "Revision", "<", "rev", "{", "continue", "\n", "}", "else", "if", "foundMD", "{", "// Duplicate.", "continue", "\n", "}", "\n", "foundMD", "=", "true", "\n\n", "encodedMetadata", ",", "err", ":=", "cache", ".", "config", ".", "Codec", "(", ")", ".", "Encode", "(", "&", "md", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "err", "=", "cache", ".", "headsDb", ".", "PutWithMeter", "(", "tlfID", ".", "Bytes", "(", ")", ",", "encodedMetadata", ",", "cache", ".", "putMeter", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n\n", "if", "!", "foundMD", "{", "// Nothing to do.", "return", "nil", "\n", "}", "\n\n", "cache", ".", "tlfsCached", "[", "tlfID", "]", "=", "rev", "\n", "if", "len", "(", "newStagedMDs", ")", "==", "0", "{", "delete", "(", "cache", ".", "tlfsStaged", ",", "tlfID", ")", "\n", "}", "else", "{", "cache", ".", "tlfsStaged", "[", "tlfID", "]", "=", "newStagedMDs", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// Commit implements the DiskMDCache interface for DiskMDCacheLocal.
[ "Commit", "implements", "the", "DiskMDCache", "interface", "for", "DiskMDCacheLocal", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_md_cache.go#L351-L405
161,624
keybase/client
go/kbfs/libkbfs/disk_md_cache.go
Unstage
func (cache *DiskMDCacheLocal) Unstage( ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) error { cache.lock.Lock() defer cache.lock.Unlock() err := cache.checkCacheLocked(ctx, "MD(Unstage)") if err != nil { return err } // Just remove the first one matching `rev`. stagedMDs := cache.tlfsStaged[tlfID] for i, md := range stagedMDs { if md.Revision == rev { if len(stagedMDs) == 1 { delete(cache.tlfsStaged, tlfID) } else { cache.tlfsStaged[tlfID] = append( stagedMDs[:i], stagedMDs[i+1:]...) } return nil } } return nil }
go
func (cache *DiskMDCacheLocal) Unstage( ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) error { cache.lock.Lock() defer cache.lock.Unlock() err := cache.checkCacheLocked(ctx, "MD(Unstage)") if err != nil { return err } // Just remove the first one matching `rev`. stagedMDs := cache.tlfsStaged[tlfID] for i, md := range stagedMDs { if md.Revision == rev { if len(stagedMDs) == 1 { delete(cache.tlfsStaged, tlfID) } else { cache.tlfsStaged[tlfID] = append( stagedMDs[:i], stagedMDs[i+1:]...) } return nil } } return nil }
[ "func", "(", "cache", "*", "DiskMDCacheLocal", ")", "Unstage", "(", "ctx", "context", ".", "Context", ",", "tlfID", "tlf", ".", "ID", ",", "rev", "kbfsmd", ".", "Revision", ")", "error", "{", "cache", ".", "lock", ".", "Lock", "(", ")", "\n", "defer", "cache", ".", "lock", ".", "Unlock", "(", ")", "\n", "err", ":=", "cache", ".", "checkCacheLocked", "(", "ctx", ",", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Just remove the first one matching `rev`.", "stagedMDs", ":=", "cache", ".", "tlfsStaged", "[", "tlfID", "]", "\n", "for", "i", ",", "md", ":=", "range", "stagedMDs", "{", "if", "md", ".", "Revision", "==", "rev", "{", "if", "len", "(", "stagedMDs", ")", "==", "1", "{", "delete", "(", "cache", ".", "tlfsStaged", ",", "tlfID", ")", "\n", "}", "else", "{", "cache", ".", "tlfsStaged", "[", "tlfID", "]", "=", "append", "(", "stagedMDs", "[", ":", "i", "]", ",", "stagedMDs", "[", "i", "+", "1", ":", "]", "...", ")", "\n", "}", "\n", "return", "nil", "\n", "}", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// Unstage implements the DiskMDCache interface for DiskMDCacheLocal.
[ "Unstage", "implements", "the", "DiskMDCache", "interface", "for", "DiskMDCacheLocal", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_md_cache.go#L408-L432
161,625
keybase/client
go/kbfs/libkbfs/disk_md_cache.go
Status
func (cache *DiskMDCacheLocal) Status(_ context.Context) DiskMDCacheStatus { select { case <-cache.startedCh: case <-cache.startErrCh: return DiskMDCacheStatus{StartState: DiskMDCacheStartStateFailed} default: return DiskMDCacheStatus{StartState: DiskMDCacheStartStateStarting} } cache.lock.RLock() defer cache.lock.RUnlock() numStaged := uint64(0) for _, mds := range cache.tlfsStaged { numStaged += uint64(len(mds)) } return DiskMDCacheStatus{ StartState: DiskMDCacheStartStateStarted, NumMDs: uint64(len(cache.tlfsCached)), NumStaged: numStaged, Hits: rateMeterToStatus(cache.hitMeter), Misses: rateMeterToStatus(cache.missMeter), Puts: rateMeterToStatus(cache.putMeter), } }
go
func (cache *DiskMDCacheLocal) Status(_ context.Context) DiskMDCacheStatus { select { case <-cache.startedCh: case <-cache.startErrCh: return DiskMDCacheStatus{StartState: DiskMDCacheStartStateFailed} default: return DiskMDCacheStatus{StartState: DiskMDCacheStartStateStarting} } cache.lock.RLock() defer cache.lock.RUnlock() numStaged := uint64(0) for _, mds := range cache.tlfsStaged { numStaged += uint64(len(mds)) } return DiskMDCacheStatus{ StartState: DiskMDCacheStartStateStarted, NumMDs: uint64(len(cache.tlfsCached)), NumStaged: numStaged, Hits: rateMeterToStatus(cache.hitMeter), Misses: rateMeterToStatus(cache.missMeter), Puts: rateMeterToStatus(cache.putMeter), } }
[ "func", "(", "cache", "*", "DiskMDCacheLocal", ")", "Status", "(", "_", "context", ".", "Context", ")", "DiskMDCacheStatus", "{", "select", "{", "case", "<-", "cache", ".", "startedCh", ":", "case", "<-", "cache", ".", "startErrCh", ":", "return", "DiskMDCacheStatus", "{", "StartState", ":", "DiskMDCacheStartStateFailed", "}", "\n", "default", ":", "return", "DiskMDCacheStatus", "{", "StartState", ":", "DiskMDCacheStartStateStarting", "}", "\n", "}", "\n\n", "cache", ".", "lock", ".", "RLock", "(", ")", "\n", "defer", "cache", ".", "lock", ".", "RUnlock", "(", ")", "\n", "numStaged", ":=", "uint64", "(", "0", ")", "\n", "for", "_", ",", "mds", ":=", "range", "cache", ".", "tlfsStaged", "{", "numStaged", "+=", "uint64", "(", "len", "(", "mds", ")", ")", "\n", "}", "\n\n", "return", "DiskMDCacheStatus", "{", "StartState", ":", "DiskMDCacheStartStateStarted", ",", "NumMDs", ":", "uint64", "(", "len", "(", "cache", ".", "tlfsCached", ")", ")", ",", "NumStaged", ":", "numStaged", ",", "Hits", ":", "rateMeterToStatus", "(", "cache", ".", "hitMeter", ")", ",", "Misses", ":", "rateMeterToStatus", "(", "cache", ".", "missMeter", ")", ",", "Puts", ":", "rateMeterToStatus", "(", "cache", ".", "putMeter", ")", ",", "}", "\n", "}" ]
// Status implements the DiskMDCache interface for DiskMDCacheLocal.
[ "Status", "implements", "the", "DiskMDCache", "interface", "for", "DiskMDCacheLocal", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_md_cache.go#L435-L459
161,626
keybase/client
go/kbfs/libkbfs/disk_md_cache.go
Shutdown
func (cache *DiskMDCacheLocal) Shutdown(ctx context.Context) { // Wait for the cache to either finish starting or error. select { case <-cache.startedCh: case <-cache.startErrCh: return } cache.lock.Lock() defer cache.lock.Unlock() // shutdownCh has to be checked under lock, otherwise we can race. select { case <-cache.shutdownCh: cache.log.CWarningf(ctx, "Shutdown called more than once") return default: } close(cache.shutdownCh) if cache.headsDb == nil { return } cache.closer() cache.headsDb = nil cache.hitMeter.Shutdown() cache.missMeter.Shutdown() cache.putMeter.Shutdown() }
go
func (cache *DiskMDCacheLocal) Shutdown(ctx context.Context) { // Wait for the cache to either finish starting or error. select { case <-cache.startedCh: case <-cache.startErrCh: return } cache.lock.Lock() defer cache.lock.Unlock() // shutdownCh has to be checked under lock, otherwise we can race. select { case <-cache.shutdownCh: cache.log.CWarningf(ctx, "Shutdown called more than once") return default: } close(cache.shutdownCh) if cache.headsDb == nil { return } cache.closer() cache.headsDb = nil cache.hitMeter.Shutdown() cache.missMeter.Shutdown() cache.putMeter.Shutdown() }
[ "func", "(", "cache", "*", "DiskMDCacheLocal", ")", "Shutdown", "(", "ctx", "context", ".", "Context", ")", "{", "// Wait for the cache to either finish starting or error.", "select", "{", "case", "<-", "cache", ".", "startedCh", ":", "case", "<-", "cache", ".", "startErrCh", ":", "return", "\n", "}", "\n", "cache", ".", "lock", ".", "Lock", "(", ")", "\n", "defer", "cache", ".", "lock", ".", "Unlock", "(", ")", "\n", "// shutdownCh has to be checked under lock, otherwise we can race.", "select", "{", "case", "<-", "cache", ".", "shutdownCh", ":", "cache", ".", "log", ".", "CWarningf", "(", "ctx", ",", "\"", "\"", ")", "\n", "return", "\n", "default", ":", "}", "\n", "close", "(", "cache", ".", "shutdownCh", ")", "\n", "if", "cache", ".", "headsDb", "==", "nil", "{", "return", "\n", "}", "\n", "cache", ".", "closer", "(", ")", "\n", "cache", ".", "headsDb", "=", "nil", "\n", "cache", ".", "hitMeter", ".", "Shutdown", "(", ")", "\n", "cache", ".", "missMeter", ".", "Shutdown", "(", ")", "\n", "cache", ".", "putMeter", ".", "Shutdown", "(", ")", "\n", "}" ]
// Shutdown implements the DiskMDCache interface for DiskMDCacheLocal.
[ "Shutdown", "implements", "the", "DiskMDCache", "interface", "for", "DiskMDCacheLocal", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_md_cache.go#L462-L487
161,627
keybase/client
go/client/fork_server.go
GetExtraFlags
func GetExtraFlags() []cli.Flag { return []cli.Flag{ cli.BoolFlag{ Name: "auto-fork", Usage: "Enable auto-fork of background service.", }, cli.BoolFlag{ Name: "no-auto-fork, F", Usage: "Disable auto-fork of background service.", }, } }
go
func GetExtraFlags() []cli.Flag { return []cli.Flag{ cli.BoolFlag{ Name: "auto-fork", Usage: "Enable auto-fork of background service.", }, cli.BoolFlag{ Name: "no-auto-fork, F", Usage: "Disable auto-fork of background service.", }, } }
[ "func", "GetExtraFlags", "(", ")", "[", "]", "cli", ".", "Flag", "{", "return", "[", "]", "cli", ".", "Flag", "{", "cli", ".", "BoolFlag", "{", "Name", ":", "\"", "\"", ",", "Usage", ":", "\"", "\"", ",", "}", ",", "cli", ".", "BoolFlag", "{", "Name", ":", "\"", "\"", ",", "Usage", ":", "\"", "\"", ",", "}", ",", "}", "\n", "}" ]
// GetExtraFlags gets the extra fork-related flags for this platform
[ "GetExtraFlags", "gets", "the", "extra", "fork", "-", "related", "flags", "for", "this", "platform" ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/fork_server.go#L20-L31
161,628
keybase/client
go/client/fork_server.go
AutoForkServer
func AutoForkServer(g *libkb.GlobalContext, cl libkb.CommandLine) (bool, error) { return ForkServer(g, cl, keybase1.ForkType_AUTO) }
go
func AutoForkServer(g *libkb.GlobalContext, cl libkb.CommandLine) (bool, error) { return ForkServer(g, cl, keybase1.ForkType_AUTO) }
[ "func", "AutoForkServer", "(", "g", "*", "libkb", ".", "GlobalContext", ",", "cl", "libkb", ".", "CommandLine", ")", "(", "bool", ",", "error", ")", "{", "return", "ForkServer", "(", "g", ",", "cl", ",", "keybase1", ".", "ForkType_AUTO", ")", "\n", "}" ]
// AutoForkServer just forks the server and sets the autoFork flag to true
[ "AutoForkServer", "just", "forks", "the", "server", "and", "sets", "the", "autoFork", "flag", "to", "true" ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/fork_server.go#L34-L36
161,629
keybase/client
go/libkb/api.go
NewInternalAPIEngine
func NewInternalAPIEngine(g *GlobalContext) (*InternalAPIEngine, error) { cliConfig, err := genClientConfigForInternalAPI(g) if err != nil { return nil, err } i := &InternalAPIEngine{ BaseAPIEngine{ config: cliConfig, clients: make(map[int]*Client), Contextified: NewContextified(g), }, } return i, nil }
go
func NewInternalAPIEngine(g *GlobalContext) (*InternalAPIEngine, error) { cliConfig, err := genClientConfigForInternalAPI(g) if err != nil { return nil, err } i := &InternalAPIEngine{ BaseAPIEngine{ config: cliConfig, clients: make(map[int]*Client), Contextified: NewContextified(g), }, } return i, nil }
[ "func", "NewInternalAPIEngine", "(", "g", "*", "GlobalContext", ")", "(", "*", "InternalAPIEngine", ",", "error", ")", "{", "cliConfig", ",", "err", ":=", "genClientConfigForInternalAPI", "(", "g", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "i", ":=", "&", "InternalAPIEngine", "{", "BaseAPIEngine", "{", "config", ":", "cliConfig", ",", "clients", ":", "make", "(", "map", "[", "int", "]", "*", "Client", ")", ",", "Contextified", ":", "NewContextified", "(", "g", ")", ",", "}", ",", "}", "\n", "return", "i", ",", "nil", "\n", "}" ]
// NewInternalAPIEngine makes an API engine for internally querying the keybase // API server
[ "NewInternalAPIEngine", "makes", "an", "API", "engine", "for", "internally", "querying", "the", "keybase", "API", "server" ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/api.go#L63-L77
161,630
keybase/client
go/libkb/api.go
updateCriticalClockSkewWarning
func (a *InternalAPIEngine) updateCriticalClockSkewWarning(resp *http.Response) { g := a.G() g.oodiMu.RLock() criticalClockSkew := int64(computeCriticalClockSkew(a.G(), resp.Header.Get("Date"))) needUpdate := (criticalClockSkew != a.G().outOfDateInfo.CriticalClockSkew) g.oodiMu.RUnlock() if needUpdate { g.oodiMu.Lock() g.outOfDateInfo.CriticalClockSkew = criticalClockSkew g.oodiMu.Unlock() } }
go
func (a *InternalAPIEngine) updateCriticalClockSkewWarning(resp *http.Response) { g := a.G() g.oodiMu.RLock() criticalClockSkew := int64(computeCriticalClockSkew(a.G(), resp.Header.Get("Date"))) needUpdate := (criticalClockSkew != a.G().outOfDateInfo.CriticalClockSkew) g.oodiMu.RUnlock() if needUpdate { g.oodiMu.Lock() g.outOfDateInfo.CriticalClockSkew = criticalClockSkew g.oodiMu.Unlock() } }
[ "func", "(", "a", "*", "InternalAPIEngine", ")", "updateCriticalClockSkewWarning", "(", "resp", "*", "http", ".", "Response", ")", "{", "g", ":=", "a", ".", "G", "(", ")", "\n", "g", ".", "oodiMu", ".", "RLock", "(", ")", "\n", "criticalClockSkew", ":=", "int64", "(", "computeCriticalClockSkew", "(", "a", ".", "G", "(", ")", ",", "resp", ".", "Header", ".", "Get", "(", "\"", "\"", ")", ")", ")", "\n", "needUpdate", ":=", "(", "criticalClockSkew", "!=", "a", ".", "G", "(", ")", ".", "outOfDateInfo", ".", "CriticalClockSkew", ")", "\n", "g", ".", "oodiMu", ".", "RUnlock", "(", ")", "\n\n", "if", "needUpdate", "{", "g", ".", "oodiMu", ".", "Lock", "(", ")", "\n", "g", ".", "outOfDateInfo", ".", "CriticalClockSkew", "=", "criticalClockSkew", "\n", "g", ".", "oodiMu", ".", "Unlock", "(", ")", "\n", "}", "\n", "}" ]
// If the local clock is within a reasonable offset of the server's // clock, we'll get 0. Otherwise, we set the skew accordingly. Safe // to set this every time.
[ "If", "the", "local", "clock", "is", "within", "a", "reasonable", "offset", "of", "the", "server", "s", "clock", "we", "ll", "get", "0", ".", "Otherwise", "we", "set", "the", "skew", "accordingly", ".", "Safe", "to", "set", "this", "every", "time", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/api.go#L539-L552
161,631
keybase/client
go/libkb/api.go
GetDecode
func (a *InternalAPIEngine) GetDecode(m MetaContext, arg APIArg, v APIResponseWrapper) error { m = m.EnsureCtx().WithLogTag("API") return a.getDecode(m, arg, v) }
go
func (a *InternalAPIEngine) GetDecode(m MetaContext, arg APIArg, v APIResponseWrapper) error { m = m.EnsureCtx().WithLogTag("API") return a.getDecode(m, arg, v) }
[ "func", "(", "a", "*", "InternalAPIEngine", ")", "GetDecode", "(", "m", "MetaContext", ",", "arg", "APIArg", ",", "v", "APIResponseWrapper", ")", "error", "{", "m", "=", "m", ".", "EnsureCtx", "(", ")", ".", "WithLogTag", "(", "\"", "\"", ")", "\n", "return", "a", ".", "getDecode", "(", "m", ",", "arg", ",", "v", ")", "\n", "}" ]
// GetDecode performs a GET request and decodes the response via // JSON into the value pointed to by v.
[ "GetDecode", "performs", "a", "GET", "request", "and", "decodes", "the", "response", "via", "JSON", "into", "the", "value", "pointed", "to", "by", "v", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/api.go#L734-L737
161,632
keybase/client
go/libkb/api.go
PostJSON
func (a *InternalAPIEngine) PostJSON(m MetaContext, arg APIArg) (*APIRes, error) { return a.Post(m, arg) }
go
func (a *InternalAPIEngine) PostJSON(m MetaContext, arg APIArg) (*APIRes, error) { return a.Post(m, arg) }
[ "func", "(", "a", "*", "InternalAPIEngine", ")", "PostJSON", "(", "m", "MetaContext", ",", "arg", "APIArg", ")", "(", "*", "APIRes", ",", "error", ")", "{", "return", "a", ".", "Post", "(", "m", ",", "arg", ")", "\n", "}" ]
// PostJSON does _not_ actually enforce the use of JSON. // That is now determined by APIArg's fields.
[ "PostJSON", "does", "_not_", "actually", "enforce", "the", "use", "of", "JSON", ".", "That", "is", "now", "determined", "by", "APIArg", "s", "fields", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/api.go#L787-L789
161,633
keybase/client
go/kbfs/libkbfs/folder_update_prepper.go
addUnrefToFinalResOp
func addUnrefToFinalResOp(ops opsList, ptr data.BlockPointer, doNotUnref map[data.BlockPointer]bool) opsList { // Make sure the block ID we want to unref isn't in the "do not // unref" list -- it could mean that block has already been GC'd // by the merged branch. We can't compare pointers directly // because GC'd pointers contain no block context. for noUnref := range doNotUnref { if ptr.ID == noUnref.ID { return ops } } resOp, ok := ops[len(ops)-1].(*resolutionOp) if !ok { resOp = newResolutionOp() ops = append(ops, resOp) } resOp.AddUncommittedUnrefBlock(ptr) return ops }
go
func addUnrefToFinalResOp(ops opsList, ptr data.BlockPointer, doNotUnref map[data.BlockPointer]bool) opsList { // Make sure the block ID we want to unref isn't in the "do not // unref" list -- it could mean that block has already been GC'd // by the merged branch. We can't compare pointers directly // because GC'd pointers contain no block context. for noUnref := range doNotUnref { if ptr.ID == noUnref.ID { return ops } } resOp, ok := ops[len(ops)-1].(*resolutionOp) if !ok { resOp = newResolutionOp() ops = append(ops, resOp) } resOp.AddUncommittedUnrefBlock(ptr) return ops }
[ "func", "addUnrefToFinalResOp", "(", "ops", "opsList", ",", "ptr", "data", ".", "BlockPointer", ",", "doNotUnref", "map", "[", "data", ".", "BlockPointer", "]", "bool", ")", "opsList", "{", "// Make sure the block ID we want to unref isn't in the \"do not", "// unref\" list -- it could mean that block has already been GC'd", "// by the merged branch. We can't compare pointers directly", "// because GC'd pointers contain no block context.", "for", "noUnref", ":=", "range", "doNotUnref", "{", "if", "ptr", ".", "ID", "==", "noUnref", ".", "ID", "{", "return", "ops", "\n", "}", "\n", "}", "\n\n", "resOp", ",", "ok", ":=", "ops", "[", "len", "(", "ops", ")", "-", "1", "]", ".", "(", "*", "resolutionOp", ")", "\n", "if", "!", "ok", "{", "resOp", "=", "newResolutionOp", "(", ")", "\n", "ops", "=", "append", "(", "ops", ",", "resOp", ")", "\n", "}", "\n", "resOp", ".", "AddUncommittedUnrefBlock", "(", "ptr", ")", "\n", "return", "ops", "\n", "}" ]
// addUnrefToFinalResOp makes a resolutionOp at the end of opsList if // one doesn't exist yet, and then adds the given pointer as an unref // block to it.
[ "addUnrefToFinalResOp", "makes", "a", "resolutionOp", "at", "the", "end", "of", "opsList", "if", "one", "doesn", "t", "exist", "yet", "and", "then", "adds", "the", "given", "pointer", "as", "an", "unref", "block", "to", "it", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_update_prepper.go#L678-L697
161,634
keybase/client
go/kbfs/libkbfs/folder_update_prepper.go
cacheBlockInfos
func (fup *folderUpdatePrepper) cacheBlockInfos(infos []data.BlockInfo) { fup.cacheLock.Lock() defer fup.cacheLock.Unlock() if fup.cachedInfos == nil { fup.cachedInfos = make(map[data.BlockPointer]data.BlockInfo) } for _, info := range infos { fup.cachedInfos[info.BlockPointer] = info } }
go
func (fup *folderUpdatePrepper) cacheBlockInfos(infos []data.BlockInfo) { fup.cacheLock.Lock() defer fup.cacheLock.Unlock() if fup.cachedInfos == nil { fup.cachedInfos = make(map[data.BlockPointer]data.BlockInfo) } for _, info := range infos { fup.cachedInfos[info.BlockPointer] = info } }
[ "func", "(", "fup", "*", "folderUpdatePrepper", ")", "cacheBlockInfos", "(", "infos", "[", "]", "data", ".", "BlockInfo", ")", "{", "fup", ".", "cacheLock", ".", "Lock", "(", ")", "\n", "defer", "fup", ".", "cacheLock", ".", "Unlock", "(", ")", "\n", "if", "fup", ".", "cachedInfos", "==", "nil", "{", "fup", ".", "cachedInfos", "=", "make", "(", "map", "[", "data", ".", "BlockPointer", "]", "data", ".", "BlockInfo", ")", "\n", "}", "\n", "for", "_", ",", "info", ":=", "range", "infos", "{", "fup", ".", "cachedInfos", "[", "info", ".", "BlockPointer", "]", "=", "info", "\n", "}", "\n", "}" ]
// cacheBlockInfos stores the given block infos temporarily, until the // next prepUpdateForPaths completes, as an optimization.
[ "cacheBlockInfos", "stores", "the", "given", "block", "infos", "temporarily", "until", "the", "next", "prepUpdateForPaths", "completes", "as", "an", "optimization", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_update_prepper.go#L1563-L1572
161,635
keybase/client
go/chat/sender.go
getSupersederEphemeralMetadata
func (s *BlockingSender) getSupersederEphemeralMetadata(ctx context.Context, uid gregor1.UID, convID chat1.ConversationID, msg chat1.MessagePlaintext) (metadata *chat1.MsgEphemeralMetadata, err error) { if chat1.IsEphemeralNonSupersederType(msg.ClientHeader.MessageType) { // Leave whatever was previously set return msg.ClientHeader.EphemeralMetadata, nil } else if !chat1.IsEphemeralSupersederType(msg.ClientHeader.MessageType) { // clear out any defaults, this msg is a non-ephemeral type return nil, nil } supersededMsg, err := s.getMessage(ctx, uid, convID, msg.ClientHeader.Supersedes, false /* resolveSupersedes */) if err != nil { return nil, err } if supersededMsg.IsEphemeral() { metadata = supersededMsg.EphemeralMetadata() metadata.Lifetime = gregor1.ToDurationSec(supersededMsg.RemainingEphemeralLifetime(s.clock.Now())) } return metadata, nil }
go
func (s *BlockingSender) getSupersederEphemeralMetadata(ctx context.Context, uid gregor1.UID, convID chat1.ConversationID, msg chat1.MessagePlaintext) (metadata *chat1.MsgEphemeralMetadata, err error) { if chat1.IsEphemeralNonSupersederType(msg.ClientHeader.MessageType) { // Leave whatever was previously set return msg.ClientHeader.EphemeralMetadata, nil } else if !chat1.IsEphemeralSupersederType(msg.ClientHeader.MessageType) { // clear out any defaults, this msg is a non-ephemeral type return nil, nil } supersededMsg, err := s.getMessage(ctx, uid, convID, msg.ClientHeader.Supersedes, false /* resolveSupersedes */) if err != nil { return nil, err } if supersededMsg.IsEphemeral() { metadata = supersededMsg.EphemeralMetadata() metadata.Lifetime = gregor1.ToDurationSec(supersededMsg.RemainingEphemeralLifetime(s.clock.Now())) } return metadata, nil }
[ "func", "(", "s", "*", "BlockingSender", ")", "getSupersederEphemeralMetadata", "(", "ctx", "context", ".", "Context", ",", "uid", "gregor1", ".", "UID", ",", "convID", "chat1", ".", "ConversationID", ",", "msg", "chat1", ".", "MessagePlaintext", ")", "(", "metadata", "*", "chat1", ".", "MsgEphemeralMetadata", ",", "err", "error", ")", "{", "if", "chat1", ".", "IsEphemeralNonSupersederType", "(", "msg", ".", "ClientHeader", ".", "MessageType", ")", "{", "// Leave whatever was previously set", "return", "msg", ".", "ClientHeader", ".", "EphemeralMetadata", ",", "nil", "\n", "}", "else", "if", "!", "chat1", ".", "IsEphemeralSupersederType", "(", "msg", ".", "ClientHeader", ".", "MessageType", ")", "{", "// clear out any defaults, this msg is a non-ephemeral type", "return", "nil", ",", "nil", "\n", "}", "\n\n", "supersededMsg", ",", "err", ":=", "s", ".", "getMessage", "(", "ctx", ",", "uid", ",", "convID", ",", "msg", ".", "ClientHeader", ".", "Supersedes", ",", "false", "/* resolveSupersedes */", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "if", "supersededMsg", ".", "IsEphemeral", "(", ")", "{", "metadata", "=", "supersededMsg", ".", "EphemeralMetadata", "(", ")", "\n", "metadata", ".", "Lifetime", "=", "gregor1", ".", "ToDurationSec", "(", "supersededMsg", ".", "RemainingEphemeralLifetime", "(", "s", ".", "clock", ".", "Now", "(", ")", ")", ")", "\n", "}", "\n", "return", "metadata", ",", "nil", "\n", "}" ]
// If we are superseding an ephemeral message, we have to set the // ephemeralMetadata on this superseder message.
[ "If", "we", "are", "superseding", "an", "ephemeral", "message", "we", "have", "to", "set", "the", "ephemeralMetadata", "on", "this", "superseder", "message", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/sender.go#L367-L387
161,636
keybase/client
go/chat/sender.go
processReactionMessage
func (s *BlockingSender) processReactionMessage(ctx context.Context, uid gregor1.UID, convID chat1.ConversationID, msg chat1.MessagePlaintext) (clientHeader chat1.MessageClientHeader, body chat1.MessageBody, err error) { if msg.ClientHeader.MessageType != chat1.MessageType_REACTION { // nothing to do here return msg.ClientHeader, msg.MessageBody, nil } // We could either be posting a reaction or removing one that we already posted. supersededMsg, err := s.getMessage(ctx, uid, convID, msg.ClientHeader.Supersedes, true /* resolveSupersedes */) if err != nil { return clientHeader, body, err } found, reactionMsgID := supersededMsg.Reactions.HasReactionFromUser(msg.MessageBody.Reaction().Body, s.G().Env.GetUsername().String()) if found { msg.ClientHeader.Supersedes = reactionMsgID msg.ClientHeader.MessageType = chat1.MessageType_DELETE msg.ClientHeader.Deletes = []chat1.MessageID{reactionMsgID} msg.MessageBody = chat1.NewMessageBodyWithDelete(chat1.MessageDelete{ MessageIDs: []chat1.MessageID{reactionMsgID}, }) } else { // bookkeep the reaction used so we can keep track of the user's // popular reactions in the UI if err := storage.NewReacjiStore(s.G()).PutReacji(ctx, uid, msg.MessageBody.Reaction().Body); err != nil { s.Debug(ctx, "unable to put in ReacjiStore: %v", err) } } return msg.ClientHeader, msg.MessageBody, nil }
go
func (s *BlockingSender) processReactionMessage(ctx context.Context, uid gregor1.UID, convID chat1.ConversationID, msg chat1.MessagePlaintext) (clientHeader chat1.MessageClientHeader, body chat1.MessageBody, err error) { if msg.ClientHeader.MessageType != chat1.MessageType_REACTION { // nothing to do here return msg.ClientHeader, msg.MessageBody, nil } // We could either be posting a reaction or removing one that we already posted. supersededMsg, err := s.getMessage(ctx, uid, convID, msg.ClientHeader.Supersedes, true /* resolveSupersedes */) if err != nil { return clientHeader, body, err } found, reactionMsgID := supersededMsg.Reactions.HasReactionFromUser(msg.MessageBody.Reaction().Body, s.G().Env.GetUsername().String()) if found { msg.ClientHeader.Supersedes = reactionMsgID msg.ClientHeader.MessageType = chat1.MessageType_DELETE msg.ClientHeader.Deletes = []chat1.MessageID{reactionMsgID} msg.MessageBody = chat1.NewMessageBodyWithDelete(chat1.MessageDelete{ MessageIDs: []chat1.MessageID{reactionMsgID}, }) } else { // bookkeep the reaction used so we can keep track of the user's // popular reactions in the UI if err := storage.NewReacjiStore(s.G()).PutReacji(ctx, uid, msg.MessageBody.Reaction().Body); err != nil { s.Debug(ctx, "unable to put in ReacjiStore: %v", err) } } return msg.ClientHeader, msg.MessageBody, nil }
[ "func", "(", "s", "*", "BlockingSender", ")", "processReactionMessage", "(", "ctx", "context", ".", "Context", ",", "uid", "gregor1", ".", "UID", ",", "convID", "chat1", ".", "ConversationID", ",", "msg", "chat1", ".", "MessagePlaintext", ")", "(", "clientHeader", "chat1", ".", "MessageClientHeader", ",", "body", "chat1", ".", "MessageBody", ",", "err", "error", ")", "{", "if", "msg", ".", "ClientHeader", ".", "MessageType", "!=", "chat1", ".", "MessageType_REACTION", "{", "// nothing to do here", "return", "msg", ".", "ClientHeader", ",", "msg", ".", "MessageBody", ",", "nil", "\n", "}", "\n\n", "// We could either be posting a reaction or removing one that we already posted.", "supersededMsg", ",", "err", ":=", "s", ".", "getMessage", "(", "ctx", ",", "uid", ",", "convID", ",", "msg", ".", "ClientHeader", ".", "Supersedes", ",", "true", "/* resolveSupersedes */", ")", "\n", "if", "err", "!=", "nil", "{", "return", "clientHeader", ",", "body", ",", "err", "\n", "}", "\n", "found", ",", "reactionMsgID", ":=", "supersededMsg", ".", "Reactions", ".", "HasReactionFromUser", "(", "msg", ".", "MessageBody", ".", "Reaction", "(", ")", ".", "Body", ",", "s", ".", "G", "(", ")", ".", "Env", ".", "GetUsername", "(", ")", ".", "String", "(", ")", ")", "\n", "if", "found", "{", "msg", ".", "ClientHeader", ".", "Supersedes", "=", "reactionMsgID", "\n", "msg", ".", "ClientHeader", ".", "MessageType", "=", "chat1", ".", "MessageType_DELETE", "\n", "msg", ".", "ClientHeader", ".", "Deletes", "=", "[", "]", "chat1", ".", "MessageID", "{", "reactionMsgID", "}", "\n", "msg", ".", "MessageBody", "=", "chat1", ".", "NewMessageBodyWithDelete", "(", "chat1", ".", "MessageDelete", "{", "MessageIDs", ":", "[", "]", "chat1", ".", "MessageID", "{", "reactionMsgID", "}", ",", "}", ")", "\n", "}", "else", "{", "// bookkeep the reaction used so we can keep track of the user's", "// popular reactions in the UI", "if", "err", ":=", "storage", ".", "NewReacjiStore", "(", "s", ".", "G", "(", ")", ")", ".", "PutReacji", "(", "ctx", ",", "uid", ",", "msg", ".", "MessageBody", ".", "Reaction", "(", ")", ".", "Body", ")", ";", "err", "!=", "nil", "{", "s", ".", "Debug", "(", "ctx", ",", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "}", "\n\n", "return", "msg", ".", "ClientHeader", ",", "msg", ".", "MessageBody", ",", "nil", "\n", "}" ]
// processReactionMessage determines if we are trying to post a duplicate // chat1.MessageType_REACTION, which is considered a chat1.MessageType_DELETE // and updates the send appropriately.
[ "processReactionMessage", "determines", "if", "we", "are", "trying", "to", "post", "a", "duplicate", "chat1", ".", "MessageType_REACTION", "which", "is", "considered", "a", "chat1", ".", "MessageType_DELETE", "and", "updates", "the", "send", "appropriately", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/sender.go#L392-L421
161,637
keybase/client
go/chat/sender.go
deleteAssets
func (s *BlockingSender) deleteAssets(ctx context.Context, convID chat1.ConversationID, assets []chat1.Asset) error { // get s3 params from server params, err := s.getRi().GetS3Params(ctx, convID) if err != nil { s.G().Log.Warning("error getting s3 params: %s", err) return nil } if err := s.store.DeleteAssets(ctx, params, s, assets); err != nil { s.G().Log.Warning("error deleting assets: %s", err) // there's no way to get asset information after this point. // any assets not deleted will be stranded on s3. return nil } s.G().Log.Debug("deleted %d assets", len(assets)) return nil }
go
func (s *BlockingSender) deleteAssets(ctx context.Context, convID chat1.ConversationID, assets []chat1.Asset) error { // get s3 params from server params, err := s.getRi().GetS3Params(ctx, convID) if err != nil { s.G().Log.Warning("error getting s3 params: %s", err) return nil } if err := s.store.DeleteAssets(ctx, params, s, assets); err != nil { s.G().Log.Warning("error deleting assets: %s", err) // there's no way to get asset information after this point. // any assets not deleted will be stranded on s3. return nil } s.G().Log.Debug("deleted %d assets", len(assets)) return nil }
[ "func", "(", "s", "*", "BlockingSender", ")", "deleteAssets", "(", "ctx", "context", ".", "Context", ",", "convID", "chat1", ".", "ConversationID", ",", "assets", "[", "]", "chat1", ".", "Asset", ")", "error", "{", "// get s3 params from server", "params", ",", "err", ":=", "s", ".", "getRi", "(", ")", ".", "GetS3Params", "(", "ctx", ",", "convID", ")", "\n", "if", "err", "!=", "nil", "{", "s", ".", "G", "(", ")", ".", "Log", ".", "Warning", "(", "\"", "\"", ",", "err", ")", "\n", "return", "nil", "\n", "}", "\n\n", "if", "err", ":=", "s", ".", "store", ".", "DeleteAssets", "(", "ctx", ",", "params", ",", "s", ",", "assets", ")", ";", "err", "!=", "nil", "{", "s", ".", "G", "(", ")", ".", "Log", ".", "Warning", "(", "\"", "\"", ",", "err", ")", "\n\n", "// there's no way to get asset information after this point.", "// any assets not deleted will be stranded on s3.", "return", "nil", "\n", "}", "\n\n", "s", ".", "G", "(", ")", ".", "Log", ".", "Debug", "(", "\"", "\"", ",", "len", "(", "assets", ")", ")", "\n", "return", "nil", "\n", "}" ]
// deleteAssets deletes assets from s3. // Logs but does not return errors. Assets may be left undeleted.
[ "deleteAssets", "deletes", "assets", "from", "s3", ".", "Logs", "but", "does", "not", "return", "errors", ".", "Assets", "may", "be", "left", "undeleted", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/sender.go#L713-L732
161,638
keybase/client
go/chat/sender.go
cancelPendingDuplicateReactions
func (s *Deliverer) cancelPendingDuplicateReactions(ctx context.Context, obr chat1.OutboxRecord) (bool, error) { if obr.Msg.ClientHeader.MessageType != chat1.MessageType_REACTION { // nothing to do here return false, nil } // While holding the outbox lock, let's remove any duplicate reaction // messages and make sure we are in the outbox, otherwise someone else // canceled us. inOutbox := false numCanceled, err := s.outbox.CancelMessagesWithPredicate(ctx, func(o chat1.OutboxRecord) bool { if !o.ConvID.Eq(obr.ConvID) { return false } if o.Msg.ClientHeader.MessageType != chat1.MessageType_REACTION { return false } idEq := o.OutboxID.Eq(&obr.OutboxID) bodyEq := o.Msg.MessageBody.Reaction().Eq(obr.Msg.MessageBody.Reaction()) // Don't delete ourselves from the outbox, but we want to make sure we // are in here. inOutbox = inOutbox || idEq shouldCancel := bodyEq && !idEq if shouldCancel { s.Debug(ctx, "canceling outbox message convID: %v obid: %v", o.ConvID, o.OutboxID) } return shouldCancel }) if err != nil { return false, err } else if !inOutbox { // we were canceled previously, the jig is up return true, nil } else if numCanceled%2 == 1 { // Since we're just toggling the reaction on/off, we should abort here // and remove ourselves from the outbox since our message wouldn't // change the reaction state. err = s.outbox.RemoveMessage(ctx, obr.OutboxID) return true, err } return false, nil }
go
func (s *Deliverer) cancelPendingDuplicateReactions(ctx context.Context, obr chat1.OutboxRecord) (bool, error) { if obr.Msg.ClientHeader.MessageType != chat1.MessageType_REACTION { // nothing to do here return false, nil } // While holding the outbox lock, let's remove any duplicate reaction // messages and make sure we are in the outbox, otherwise someone else // canceled us. inOutbox := false numCanceled, err := s.outbox.CancelMessagesWithPredicate(ctx, func(o chat1.OutboxRecord) bool { if !o.ConvID.Eq(obr.ConvID) { return false } if o.Msg.ClientHeader.MessageType != chat1.MessageType_REACTION { return false } idEq := o.OutboxID.Eq(&obr.OutboxID) bodyEq := o.Msg.MessageBody.Reaction().Eq(obr.Msg.MessageBody.Reaction()) // Don't delete ourselves from the outbox, but we want to make sure we // are in here. inOutbox = inOutbox || idEq shouldCancel := bodyEq && !idEq if shouldCancel { s.Debug(ctx, "canceling outbox message convID: %v obid: %v", o.ConvID, o.OutboxID) } return shouldCancel }) if err != nil { return false, err } else if !inOutbox { // we were canceled previously, the jig is up return true, nil } else if numCanceled%2 == 1 { // Since we're just toggling the reaction on/off, we should abort here // and remove ourselves from the outbox since our message wouldn't // change the reaction state. err = s.outbox.RemoveMessage(ctx, obr.OutboxID) return true, err } return false, nil }
[ "func", "(", "s", "*", "Deliverer", ")", "cancelPendingDuplicateReactions", "(", "ctx", "context", ".", "Context", ",", "obr", "chat1", ".", "OutboxRecord", ")", "(", "bool", ",", "error", ")", "{", "if", "obr", ".", "Msg", ".", "ClientHeader", ".", "MessageType", "!=", "chat1", ".", "MessageType_REACTION", "{", "// nothing to do here", "return", "false", ",", "nil", "\n", "}", "\n", "// While holding the outbox lock, let's remove any duplicate reaction", "// messages and make sure we are in the outbox, otherwise someone else", "// canceled us.", "inOutbox", ":=", "false", "\n", "numCanceled", ",", "err", ":=", "s", ".", "outbox", ".", "CancelMessagesWithPredicate", "(", "ctx", ",", "func", "(", "o", "chat1", ".", "OutboxRecord", ")", "bool", "{", "if", "!", "o", ".", "ConvID", ".", "Eq", "(", "obr", ".", "ConvID", ")", "{", "return", "false", "\n", "}", "\n", "if", "o", ".", "Msg", ".", "ClientHeader", ".", "MessageType", "!=", "chat1", ".", "MessageType_REACTION", "{", "return", "false", "\n", "}", "\n\n", "idEq", ":=", "o", ".", "OutboxID", ".", "Eq", "(", "&", "obr", ".", "OutboxID", ")", "\n", "bodyEq", ":=", "o", ".", "Msg", ".", "MessageBody", ".", "Reaction", "(", ")", ".", "Eq", "(", "obr", ".", "Msg", ".", "MessageBody", ".", "Reaction", "(", ")", ")", "\n", "// Don't delete ourselves from the outbox, but we want to make sure we", "// are in here.", "inOutbox", "=", "inOutbox", "||", "idEq", "\n", "shouldCancel", ":=", "bodyEq", "&&", "!", "idEq", "\n", "if", "shouldCancel", "{", "s", ".", "Debug", "(", "ctx", ",", "\"", "\"", ",", "o", ".", "ConvID", ",", "o", ".", "OutboxID", ")", "\n", "}", "\n", "return", "shouldCancel", "\n", "}", ")", "\n\n", "if", "err", "!=", "nil", "{", "return", "false", ",", "err", "\n", "}", "else", "if", "!", "inOutbox", "{", "// we were canceled previously, the jig is up", "return", "true", ",", "nil", "\n", "}", "else", "if", "numCanceled", "%", "2", "==", "1", "{", "// Since we're just toggling the reaction on/off, we should abort here", "// and remove ourselves from the outbox since our message wouldn't", "// change the reaction state.", "err", "=", "s", ".", "outbox", ".", "RemoveMessage", "(", "ctx", ",", "obr", ".", "OutboxID", ")", "\n", "return", "true", ",", "err", "\n", "}", "\n", "return", "false", ",", "nil", "\n", "}" ]
// cancelPendingDuplicateReactions removes duplicate reactions in the outbox. // If we cancel an odd number of items we cancel ourselves since the current // reaction state is correct.
[ "cancelPendingDuplicateReactions", "removes", "duplicate", "reactions", "in", "the", "outbox", ".", "If", "we", "cancel", "an", "odd", "number", "of", "items", "we", "cancel", "ourselves", "since", "the", "current", "reaction", "state", "is", "correct", "." ]
b352622cd8cc94798cfacbcb56ada203c18e519e
https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/sender.go#L1403-L1445
161,639
golang/groupcache
groupcache.go
GetGroup
func GetGroup(name string) *Group { mu.RLock() g := groups[name] mu.RUnlock() return g }
go
func GetGroup(name string) *Group { mu.RLock() g := groups[name] mu.RUnlock() return g }
[ "func", "GetGroup", "(", "name", "string", ")", "*", "Group", "{", "mu", ".", "RLock", "(", ")", "\n", "g", ":=", "groups", "[", "name", "]", "\n", "mu", ".", "RUnlock", "(", ")", "\n", "return", "g", "\n", "}" ]
// GetGroup returns the named group previously created with NewGroup, or // nil if there's no such group.
[ "GetGroup", "returns", "the", "named", "group", "previously", "created", "with", "NewGroup", "or", "nil", "if", "there", "s", "no", "such", "group", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/groupcache.go#L67-L72
161,640
golang/groupcache
groupcache.go
newGroup
func newGroup(name string, cacheBytes int64, getter Getter, peers PeerPicker) *Group { if getter == nil { panic("nil Getter") } mu.Lock() defer mu.Unlock() initPeerServerOnce.Do(callInitPeerServer) if _, dup := groups[name]; dup { panic("duplicate registration of group " + name) } g := &Group{ name: name, getter: getter, peers: peers, cacheBytes: cacheBytes, loadGroup: &singleflight.Group{}, } if fn := newGroupHook; fn != nil { fn(g) } groups[name] = g return g }
go
func newGroup(name string, cacheBytes int64, getter Getter, peers PeerPicker) *Group { if getter == nil { panic("nil Getter") } mu.Lock() defer mu.Unlock() initPeerServerOnce.Do(callInitPeerServer) if _, dup := groups[name]; dup { panic("duplicate registration of group " + name) } g := &Group{ name: name, getter: getter, peers: peers, cacheBytes: cacheBytes, loadGroup: &singleflight.Group{}, } if fn := newGroupHook; fn != nil { fn(g) } groups[name] = g return g }
[ "func", "newGroup", "(", "name", "string", ",", "cacheBytes", "int64", ",", "getter", "Getter", ",", "peers", "PeerPicker", ")", "*", "Group", "{", "if", "getter", "==", "nil", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "mu", ".", "Lock", "(", ")", "\n", "defer", "mu", ".", "Unlock", "(", ")", "\n", "initPeerServerOnce", ".", "Do", "(", "callInitPeerServer", ")", "\n", "if", "_", ",", "dup", ":=", "groups", "[", "name", "]", ";", "dup", "{", "panic", "(", "\"", "\"", "+", "name", ")", "\n", "}", "\n", "g", ":=", "&", "Group", "{", "name", ":", "name", ",", "getter", ":", "getter", ",", "peers", ":", "peers", ",", "cacheBytes", ":", "cacheBytes", ",", "loadGroup", ":", "&", "singleflight", ".", "Group", "{", "}", ",", "}", "\n", "if", "fn", ":=", "newGroupHook", ";", "fn", "!=", "nil", "{", "fn", "(", "g", ")", "\n", "}", "\n", "groups", "[", "name", "]", "=", "g", "\n", "return", "g", "\n", "}" ]
// If peers is nil, the peerPicker is called via a sync.Once to initialize it.
[ "If", "peers", "is", "nil", "the", "peerPicker", "is", "called", "via", "a", "sync", ".", "Once", "to", "initialize", "it", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/groupcache.go#L88-L110
161,641
golang/groupcache
groupcache.go
load
func (g *Group) load(ctx Context, key string, dest Sink) (value ByteView, destPopulated bool, err error) { g.Stats.Loads.Add(1) viewi, err := g.loadGroup.Do(key, func() (interface{}, error) { // Check the cache again because singleflight can only dedup calls // that overlap concurrently. It's possible for 2 concurrent // requests to miss the cache, resulting in 2 load() calls. An // unfortunate goroutine scheduling would result in this callback // being run twice, serially. If we don't check the cache again, // cache.nbytes would be incremented below even though there will // be only one entry for this key. // // Consider the following serialized event ordering for two // goroutines in which this callback gets called twice for hte // same key: // 1: Get("key") // 2: Get("key") // 1: lookupCache("key") // 2: lookupCache("key") // 1: load("key") // 2: load("key") // 1: loadGroup.Do("key", fn) // 1: fn() // 2: loadGroup.Do("key", fn) // 2: fn() if value, cacheHit := g.lookupCache(key); cacheHit { g.Stats.CacheHits.Add(1) return value, nil } g.Stats.LoadsDeduped.Add(1) var value ByteView var err error if peer, ok := g.peers.PickPeer(key); ok { value, err = g.getFromPeer(ctx, peer, key) if err == nil { g.Stats.PeerLoads.Add(1) return value, nil } g.Stats.PeerErrors.Add(1) // TODO(bradfitz): log the peer's error? keep // log of the past few for /groupcachez? It's // probably boring (normal task movement), so not // worth logging I imagine. } value, err = g.getLocally(ctx, key, dest) if err != nil { g.Stats.LocalLoadErrs.Add(1) return nil, err } g.Stats.LocalLoads.Add(1) destPopulated = true // only one caller of load gets this return value g.populateCache(key, value, &g.mainCache) return value, nil }) if err == nil { value = viewi.(ByteView) } return }
go
func (g *Group) load(ctx Context, key string, dest Sink) (value ByteView, destPopulated bool, err error) { g.Stats.Loads.Add(1) viewi, err := g.loadGroup.Do(key, func() (interface{}, error) { // Check the cache again because singleflight can only dedup calls // that overlap concurrently. It's possible for 2 concurrent // requests to miss the cache, resulting in 2 load() calls. An // unfortunate goroutine scheduling would result in this callback // being run twice, serially. If we don't check the cache again, // cache.nbytes would be incremented below even though there will // be only one entry for this key. // // Consider the following serialized event ordering for two // goroutines in which this callback gets called twice for hte // same key: // 1: Get("key") // 2: Get("key") // 1: lookupCache("key") // 2: lookupCache("key") // 1: load("key") // 2: load("key") // 1: loadGroup.Do("key", fn) // 1: fn() // 2: loadGroup.Do("key", fn) // 2: fn() if value, cacheHit := g.lookupCache(key); cacheHit { g.Stats.CacheHits.Add(1) return value, nil } g.Stats.LoadsDeduped.Add(1) var value ByteView var err error if peer, ok := g.peers.PickPeer(key); ok { value, err = g.getFromPeer(ctx, peer, key) if err == nil { g.Stats.PeerLoads.Add(1) return value, nil } g.Stats.PeerErrors.Add(1) // TODO(bradfitz): log the peer's error? keep // log of the past few for /groupcachez? It's // probably boring (normal task movement), so not // worth logging I imagine. } value, err = g.getLocally(ctx, key, dest) if err != nil { g.Stats.LocalLoadErrs.Add(1) return nil, err } g.Stats.LocalLoads.Add(1) destPopulated = true // only one caller of load gets this return value g.populateCache(key, value, &g.mainCache) return value, nil }) if err == nil { value = viewi.(ByteView) } return }
[ "func", "(", "g", "*", "Group", ")", "load", "(", "ctx", "Context", ",", "key", "string", ",", "dest", "Sink", ")", "(", "value", "ByteView", ",", "destPopulated", "bool", ",", "err", "error", ")", "{", "g", ".", "Stats", ".", "Loads", ".", "Add", "(", "1", ")", "\n", "viewi", ",", "err", ":=", "g", ".", "loadGroup", ".", "Do", "(", "key", ",", "func", "(", ")", "(", "interface", "{", "}", ",", "error", ")", "{", "// Check the cache again because singleflight can only dedup calls", "// that overlap concurrently. It's possible for 2 concurrent", "// requests to miss the cache, resulting in 2 load() calls. An", "// unfortunate goroutine scheduling would result in this callback", "// being run twice, serially. If we don't check the cache again,", "// cache.nbytes would be incremented below even though there will", "// be only one entry for this key.", "//", "// Consider the following serialized event ordering for two", "// goroutines in which this callback gets called twice for hte", "// same key:", "// 1: Get(\"key\")", "// 2: Get(\"key\")", "// 1: lookupCache(\"key\")", "// 2: lookupCache(\"key\")", "// 1: load(\"key\")", "// 2: load(\"key\")", "// 1: loadGroup.Do(\"key\", fn)", "// 1: fn()", "// 2: loadGroup.Do(\"key\", fn)", "// 2: fn()", "if", "value", ",", "cacheHit", ":=", "g", ".", "lookupCache", "(", "key", ")", ";", "cacheHit", "{", "g", ".", "Stats", ".", "CacheHits", ".", "Add", "(", "1", ")", "\n", "return", "value", ",", "nil", "\n", "}", "\n", "g", ".", "Stats", ".", "LoadsDeduped", ".", "Add", "(", "1", ")", "\n", "var", "value", "ByteView", "\n", "var", "err", "error", "\n", "if", "peer", ",", "ok", ":=", "g", ".", "peers", ".", "PickPeer", "(", "key", ")", ";", "ok", "{", "value", ",", "err", "=", "g", ".", "getFromPeer", "(", "ctx", ",", "peer", ",", "key", ")", "\n", "if", "err", "==", "nil", "{", "g", ".", "Stats", ".", "PeerLoads", ".", "Add", "(", "1", ")", "\n", "return", "value", ",", "nil", "\n", "}", "\n", "g", ".", "Stats", ".", "PeerErrors", ".", "Add", "(", "1", ")", "\n", "// TODO(bradfitz): log the peer's error? keep", "// log of the past few for /groupcachez? It's", "// probably boring (normal task movement), so not", "// worth logging I imagine.", "}", "\n", "value", ",", "err", "=", "g", ".", "getLocally", "(", "ctx", ",", "key", ",", "dest", ")", "\n", "if", "err", "!=", "nil", "{", "g", ".", "Stats", ".", "LocalLoadErrs", ".", "Add", "(", "1", ")", "\n", "return", "nil", ",", "err", "\n", "}", "\n", "g", ".", "Stats", ".", "LocalLoads", ".", "Add", "(", "1", ")", "\n", "destPopulated", "=", "true", "// only one caller of load gets this return value", "\n", "g", ".", "populateCache", "(", "key", ",", "value", ",", "&", "g", ".", "mainCache", ")", "\n", "return", "value", ",", "nil", "\n", "}", ")", "\n", "if", "err", "==", "nil", "{", "value", "=", "viewi", ".", "(", "ByteView", ")", "\n", "}", "\n", "return", "\n", "}" ]
// load loads key either by invoking the getter locally or by sending it to another machine.
[ "load", "loads", "key", "either", "by", "invoking", "the", "getter", "locally", "or", "by", "sending", "it", "to", "another", "machine", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/groupcache.go#L236-L293
161,642
golang/groupcache
groupcache.go
CacheStats
func (g *Group) CacheStats(which CacheType) CacheStats { switch which { case MainCache: return g.mainCache.stats() case HotCache: return g.hotCache.stats() default: return CacheStats{} } }
go
func (g *Group) CacheStats(which CacheType) CacheStats { switch which { case MainCache: return g.mainCache.stats() case HotCache: return g.hotCache.stats() default: return CacheStats{} } }
[ "func", "(", "g", "*", "Group", ")", "CacheStats", "(", "which", "CacheType", ")", "CacheStats", "{", "switch", "which", "{", "case", "MainCache", ":", "return", "g", ".", "mainCache", ".", "stats", "(", ")", "\n", "case", "HotCache", ":", "return", "g", ".", "hotCache", ".", "stats", "(", ")", "\n", "default", ":", "return", "CacheStats", "{", "}", "\n", "}", "\n", "}" ]
// CacheStats returns stats about the provided cache within the group.
[ "CacheStats", "returns", "stats", "about", "the", "provided", "cache", "within", "the", "group", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/groupcache.go#L375-L384
161,643
golang/groupcache
groupcache.go
Add
func (i *AtomicInt) Add(n int64) { atomic.AddInt64((*int64)(i), n) }
go
func (i *AtomicInt) Add(n int64) { atomic.AddInt64((*int64)(i), n) }
[ "func", "(", "i", "*", "AtomicInt", ")", "Add", "(", "n", "int64", ")", "{", "atomic", ".", "AddInt64", "(", "(", "*", "int64", ")", "(", "i", ")", ",", "n", ")", "\n", "}" ]
// Add atomically adds n to i.
[ "Add", "atomically", "adds", "n", "to", "i", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/groupcache.go#L471-L473
161,644
golang/groupcache
sinks.go
ByteViewSink
func ByteViewSink(dst *ByteView) Sink { if dst == nil { panic("nil dst") } return &byteViewSink{dst: dst} }
go
func ByteViewSink(dst *ByteView) Sink { if dst == nil { panic("nil dst") } return &byteViewSink{dst: dst} }
[ "func", "ByteViewSink", "(", "dst", "*", "ByteView", ")", "Sink", "{", "if", "dst", "==", "nil", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "return", "&", "byteViewSink", "{", "dst", ":", "dst", "}", "\n", "}" ]
// ByteViewSink returns a Sink that populates a ByteView.
[ "ByteViewSink", "returns", "a", "Sink", "that", "populates", "a", "ByteView", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/sinks.go#L106-L111
161,645
golang/groupcache
peers.go
RegisterPeerPicker
func RegisterPeerPicker(fn func() PeerPicker) { if portPicker != nil { panic("RegisterPeerPicker called more than once") } portPicker = func(_ string) PeerPicker { return fn() } }
go
func RegisterPeerPicker(fn func() PeerPicker) { if portPicker != nil { panic("RegisterPeerPicker called more than once") } portPicker = func(_ string) PeerPicker { return fn() } }
[ "func", "RegisterPeerPicker", "(", "fn", "func", "(", ")", "PeerPicker", ")", "{", "if", "portPicker", "!=", "nil", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "portPicker", "=", "func", "(", "_", "string", ")", "PeerPicker", "{", "return", "fn", "(", ")", "}", "\n", "}" ]
// RegisterPeerPicker registers the peer initialization function. // It is called once, when the first group is created. // Either RegisterPeerPicker or RegisterPerGroupPeerPicker should be // called exactly once, but not both.
[ "RegisterPeerPicker", "registers", "the", "peer", "initialization", "function", ".", "It", "is", "called", "once", "when", "the", "first", "group", "is", "created", ".", "Either", "RegisterPeerPicker", "or", "RegisterPerGroupPeerPicker", "should", "be", "called", "exactly", "once", "but", "not", "both", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/peers.go#L57-L62
161,646
golang/groupcache
peers.go
RegisterPerGroupPeerPicker
func RegisterPerGroupPeerPicker(fn func(groupName string) PeerPicker) { if portPicker != nil { panic("RegisterPeerPicker called more than once") } portPicker = fn }
go
func RegisterPerGroupPeerPicker(fn func(groupName string) PeerPicker) { if portPicker != nil { panic("RegisterPeerPicker called more than once") } portPicker = fn }
[ "func", "RegisterPerGroupPeerPicker", "(", "fn", "func", "(", "groupName", "string", ")", "PeerPicker", ")", "{", "if", "portPicker", "!=", "nil", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "portPicker", "=", "fn", "\n", "}" ]
// RegisterPerGroupPeerPicker registers the peer initialization function, // which takes the groupName, to be used in choosing a PeerPicker. // It is called once, when the first group is created. // Either RegisterPeerPicker or RegisterPerGroupPeerPicker should be // called exactly once, but not both.
[ "RegisterPerGroupPeerPicker", "registers", "the", "peer", "initialization", "function", "which", "takes", "the", "groupName", "to", "be", "used", "in", "choosing", "a", "PeerPicker", ".", "It", "is", "called", "once", "when", "the", "first", "group", "is", "created", ".", "Either", "RegisterPeerPicker", "or", "RegisterPerGroupPeerPicker", "should", "be", "called", "exactly", "once", "but", "not", "both", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/peers.go#L69-L74
161,647
golang/groupcache
byteview.go
Len
func (v ByteView) Len() int { if v.b != nil { return len(v.b) } return len(v.s) }
go
func (v ByteView) Len() int { if v.b != nil { return len(v.b) } return len(v.s) }
[ "func", "(", "v", "ByteView", ")", "Len", "(", ")", "int", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "len", "(", "v", ".", "b", ")", "\n", "}", "\n", "return", "len", "(", "v", ".", "s", ")", "\n", "}" ]
// Len returns the view's length.
[ "Len", "returns", "the", "view", "s", "length", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L39-L44
161,648
golang/groupcache
byteview.go
ByteSlice
func (v ByteView) ByteSlice() []byte { if v.b != nil { return cloneBytes(v.b) } return []byte(v.s) }
go
func (v ByteView) ByteSlice() []byte { if v.b != nil { return cloneBytes(v.b) } return []byte(v.s) }
[ "func", "(", "v", "ByteView", ")", "ByteSlice", "(", ")", "[", "]", "byte", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "cloneBytes", "(", "v", ".", "b", ")", "\n", "}", "\n", "return", "[", "]", "byte", "(", "v", ".", "s", ")", "\n", "}" ]
// ByteSlice returns a copy of the data as a byte slice.
[ "ByteSlice", "returns", "a", "copy", "of", "the", "data", "as", "a", "byte", "slice", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L47-L52
161,649
golang/groupcache
byteview.go
String
func (v ByteView) String() string { if v.b != nil { return string(v.b) } return v.s }
go
func (v ByteView) String() string { if v.b != nil { return string(v.b) } return v.s }
[ "func", "(", "v", "ByteView", ")", "String", "(", ")", "string", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "string", "(", "v", ".", "b", ")", "\n", "}", "\n", "return", "v", ".", "s", "\n", "}" ]
// String returns the data as a string, making a copy if necessary.
[ "String", "returns", "the", "data", "as", "a", "string", "making", "a", "copy", "if", "necessary", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L55-L60
161,650
golang/groupcache
byteview.go
At
func (v ByteView) At(i int) byte { if v.b != nil { return v.b[i] } return v.s[i] }
go
func (v ByteView) At(i int) byte { if v.b != nil { return v.b[i] } return v.s[i] }
[ "func", "(", "v", "ByteView", ")", "At", "(", "i", "int", ")", "byte", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "v", ".", "b", "[", "i", "]", "\n", "}", "\n", "return", "v", ".", "s", "[", "i", "]", "\n", "}" ]
// At returns the byte at index i.
[ "At", "returns", "the", "byte", "at", "index", "i", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L63-L68
161,651
golang/groupcache
byteview.go
Slice
func (v ByteView) Slice(from, to int) ByteView { if v.b != nil { return ByteView{b: v.b[from:to]} } return ByteView{s: v.s[from:to]} }
go
func (v ByteView) Slice(from, to int) ByteView { if v.b != nil { return ByteView{b: v.b[from:to]} } return ByteView{s: v.s[from:to]} }
[ "func", "(", "v", "ByteView", ")", "Slice", "(", "from", ",", "to", "int", ")", "ByteView", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "ByteView", "{", "b", ":", "v", ".", "b", "[", "from", ":", "to", "]", "}", "\n", "}", "\n", "return", "ByteView", "{", "s", ":", "v", ".", "s", "[", "from", ":", "to", "]", "}", "\n", "}" ]
// Slice slices the view between the provided from and to indices.
[ "Slice", "slices", "the", "view", "between", "the", "provided", "from", "and", "to", "indices", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L71-L76
161,652
golang/groupcache
byteview.go
SliceFrom
func (v ByteView) SliceFrom(from int) ByteView { if v.b != nil { return ByteView{b: v.b[from:]} } return ByteView{s: v.s[from:]} }
go
func (v ByteView) SliceFrom(from int) ByteView { if v.b != nil { return ByteView{b: v.b[from:]} } return ByteView{s: v.s[from:]} }
[ "func", "(", "v", "ByteView", ")", "SliceFrom", "(", "from", "int", ")", "ByteView", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "ByteView", "{", "b", ":", "v", ".", "b", "[", "from", ":", "]", "}", "\n", "}", "\n", "return", "ByteView", "{", "s", ":", "v", ".", "s", "[", "from", ":", "]", "}", "\n", "}" ]
// SliceFrom slices the view from the provided index until the end.
[ "SliceFrom", "slices", "the", "view", "from", "the", "provided", "index", "until", "the", "end", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L79-L84
161,653
golang/groupcache
byteview.go
Copy
func (v ByteView) Copy(dest []byte) int { if v.b != nil { return copy(dest, v.b) } return copy(dest, v.s) }
go
func (v ByteView) Copy(dest []byte) int { if v.b != nil { return copy(dest, v.b) } return copy(dest, v.s) }
[ "func", "(", "v", "ByteView", ")", "Copy", "(", "dest", "[", "]", "byte", ")", "int", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "copy", "(", "dest", ",", "v", ".", "b", ")", "\n", "}", "\n", "return", "copy", "(", "dest", ",", "v", ".", "s", ")", "\n", "}" ]
// Copy copies b into dest and returns the number of bytes copied.
[ "Copy", "copies", "b", "into", "dest", "and", "returns", "the", "number", "of", "bytes", "copied", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L87-L92
161,654
golang/groupcache
byteview.go
Equal
func (v ByteView) Equal(b2 ByteView) bool { if b2.b == nil { return v.EqualString(b2.s) } return v.EqualBytes(b2.b) }
go
func (v ByteView) Equal(b2 ByteView) bool { if b2.b == nil { return v.EqualString(b2.s) } return v.EqualBytes(b2.b) }
[ "func", "(", "v", "ByteView", ")", "Equal", "(", "b2", "ByteView", ")", "bool", "{", "if", "b2", ".", "b", "==", "nil", "{", "return", "v", ".", "EqualString", "(", "b2", ".", "s", ")", "\n", "}", "\n", "return", "v", ".", "EqualBytes", "(", "b2", ".", "b", ")", "\n", "}" ]
// Equal returns whether the bytes in b are the same as the bytes in // b2.
[ "Equal", "returns", "whether", "the", "bytes", "in", "b", "are", "the", "same", "as", "the", "bytes", "in", "b2", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L96-L101
161,655
golang/groupcache
byteview.go
EqualString
func (v ByteView) EqualString(s string) bool { if v.b == nil { return v.s == s } l := v.Len() if len(s) != l { return false } for i, bi := range v.b { if bi != s[i] { return false } } return true }
go
func (v ByteView) EqualString(s string) bool { if v.b == nil { return v.s == s } l := v.Len() if len(s) != l { return false } for i, bi := range v.b { if bi != s[i] { return false } } return true }
[ "func", "(", "v", "ByteView", ")", "EqualString", "(", "s", "string", ")", "bool", "{", "if", "v", ".", "b", "==", "nil", "{", "return", "v", ".", "s", "==", "s", "\n", "}", "\n", "l", ":=", "v", ".", "Len", "(", ")", "\n", "if", "len", "(", "s", ")", "!=", "l", "{", "return", "false", "\n", "}", "\n", "for", "i", ",", "bi", ":=", "range", "v", ".", "b", "{", "if", "bi", "!=", "s", "[", "i", "]", "{", "return", "false", "\n", "}", "\n", "}", "\n", "return", "true", "\n", "}" ]
// EqualString returns whether the bytes in b are the same as the bytes // in s.
[ "EqualString", "returns", "whether", "the", "bytes", "in", "b", "are", "the", "same", "as", "the", "bytes", "in", "s", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L105-L119
161,656
golang/groupcache
byteview.go
EqualBytes
func (v ByteView) EqualBytes(b2 []byte) bool { if v.b != nil { return bytes.Equal(v.b, b2) } l := v.Len() if len(b2) != l { return false } for i, bi := range b2 { if bi != v.s[i] { return false } } return true }
go
func (v ByteView) EqualBytes(b2 []byte) bool { if v.b != nil { return bytes.Equal(v.b, b2) } l := v.Len() if len(b2) != l { return false } for i, bi := range b2 { if bi != v.s[i] { return false } } return true }
[ "func", "(", "v", "ByteView", ")", "EqualBytes", "(", "b2", "[", "]", "byte", ")", "bool", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "bytes", ".", "Equal", "(", "v", ".", "b", ",", "b2", ")", "\n", "}", "\n", "l", ":=", "v", ".", "Len", "(", ")", "\n", "if", "len", "(", "b2", ")", "!=", "l", "{", "return", "false", "\n", "}", "\n", "for", "i", ",", "bi", ":=", "range", "b2", "{", "if", "bi", "!=", "v", ".", "s", "[", "i", "]", "{", "return", "false", "\n", "}", "\n", "}", "\n", "return", "true", "\n", "}" ]
// EqualBytes returns whether the bytes in b are the same as the bytes // in b2.
[ "EqualBytes", "returns", "whether", "the", "bytes", "in", "b", "are", "the", "same", "as", "the", "bytes", "in", "b2", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L123-L137
161,657
golang/groupcache
byteview.go
Reader
func (v ByteView) Reader() io.ReadSeeker { if v.b != nil { return bytes.NewReader(v.b) } return strings.NewReader(v.s) }
go
func (v ByteView) Reader() io.ReadSeeker { if v.b != nil { return bytes.NewReader(v.b) } return strings.NewReader(v.s) }
[ "func", "(", "v", "ByteView", ")", "Reader", "(", ")", "io", ".", "ReadSeeker", "{", "if", "v", ".", "b", "!=", "nil", "{", "return", "bytes", ".", "NewReader", "(", "v", ".", "b", ")", "\n", "}", "\n", "return", "strings", ".", "NewReader", "(", "v", ".", "s", ")", "\n", "}" ]
// Reader returns an io.ReadSeeker for the bytes in v.
[ "Reader", "returns", "an", "io", ".", "ReadSeeker", "for", "the", "bytes", "in", "v", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L140-L145
161,658
golang/groupcache
byteview.go
ReadAt
func (v ByteView) ReadAt(p []byte, off int64) (n int, err error) { if off < 0 { return 0, errors.New("view: invalid offset") } if off >= int64(v.Len()) { return 0, io.EOF } n = v.SliceFrom(int(off)).Copy(p) if n < len(p) { err = io.EOF } return }
go
func (v ByteView) ReadAt(p []byte, off int64) (n int, err error) { if off < 0 { return 0, errors.New("view: invalid offset") } if off >= int64(v.Len()) { return 0, io.EOF } n = v.SliceFrom(int(off)).Copy(p) if n < len(p) { err = io.EOF } return }
[ "func", "(", "v", "ByteView", ")", "ReadAt", "(", "p", "[", "]", "byte", ",", "off", "int64", ")", "(", "n", "int", ",", "err", "error", ")", "{", "if", "off", "<", "0", "{", "return", "0", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "off", ">=", "int64", "(", "v", ".", "Len", "(", ")", ")", "{", "return", "0", ",", "io", ".", "EOF", "\n", "}", "\n", "n", "=", "v", ".", "SliceFrom", "(", "int", "(", "off", ")", ")", ".", "Copy", "(", "p", ")", "\n", "if", "n", "<", "len", "(", "p", ")", "{", "err", "=", "io", ".", "EOF", "\n", "}", "\n", "return", "\n", "}" ]
// ReadAt implements io.ReaderAt on the bytes in v.
[ "ReadAt", "implements", "io", ".", "ReaderAt", "on", "the", "bytes", "in", "v", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L148-L160
161,659
golang/groupcache
byteview.go
WriteTo
func (v ByteView) WriteTo(w io.Writer) (n int64, err error) { var m int if v.b != nil { m, err = w.Write(v.b) } else { m, err = io.WriteString(w, v.s) } if err == nil && m < v.Len() { err = io.ErrShortWrite } n = int64(m) return }
go
func (v ByteView) WriteTo(w io.Writer) (n int64, err error) { var m int if v.b != nil { m, err = w.Write(v.b) } else { m, err = io.WriteString(w, v.s) } if err == nil && m < v.Len() { err = io.ErrShortWrite } n = int64(m) return }
[ "func", "(", "v", "ByteView", ")", "WriteTo", "(", "w", "io", ".", "Writer", ")", "(", "n", "int64", ",", "err", "error", ")", "{", "var", "m", "int", "\n", "if", "v", ".", "b", "!=", "nil", "{", "m", ",", "err", "=", "w", ".", "Write", "(", "v", ".", "b", ")", "\n", "}", "else", "{", "m", ",", "err", "=", "io", ".", "WriteString", "(", "w", ",", "v", ".", "s", ")", "\n", "}", "\n", "if", "err", "==", "nil", "&&", "m", "<", "v", ".", "Len", "(", ")", "{", "err", "=", "io", ".", "ErrShortWrite", "\n", "}", "\n", "n", "=", "int64", "(", "m", ")", "\n", "return", "\n", "}" ]
// WriteTo implements io.WriterTo on the bytes in v.
[ "WriteTo", "implements", "io", ".", "WriterTo", "on", "the", "bytes", "in", "v", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/byteview.go#L163-L175
161,660
golang/groupcache
consistenthash/consistenthash.go
Add
func (m *Map) Add(keys ...string) { for _, key := range keys { for i := 0; i < m.replicas; i++ { hash := int(m.hash([]byte(strconv.Itoa(i) + key))) m.keys = append(m.keys, hash) m.hashMap[hash] = key } } sort.Ints(m.keys) }
go
func (m *Map) Add(keys ...string) { for _, key := range keys { for i := 0; i < m.replicas; i++ { hash := int(m.hash([]byte(strconv.Itoa(i) + key))) m.keys = append(m.keys, hash) m.hashMap[hash] = key } } sort.Ints(m.keys) }
[ "func", "(", "m", "*", "Map", ")", "Add", "(", "keys", "...", "string", ")", "{", "for", "_", ",", "key", ":=", "range", "keys", "{", "for", "i", ":=", "0", ";", "i", "<", "m", ".", "replicas", ";", "i", "++", "{", "hash", ":=", "int", "(", "m", ".", "hash", "(", "[", "]", "byte", "(", "strconv", ".", "Itoa", "(", "i", ")", "+", "key", ")", ")", ")", "\n", "m", ".", "keys", "=", "append", "(", "m", ".", "keys", ",", "hash", ")", "\n", "m", ".", "hashMap", "[", "hash", "]", "=", "key", "\n", "}", "\n", "}", "\n", "sort", ".", "Ints", "(", "m", ".", "keys", ")", "\n", "}" ]
// Adds some keys to the hash.
[ "Adds", "some", "keys", "to", "the", "hash", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/consistenthash/consistenthash.go#L53-L62
161,661
golang/groupcache
consistenthash/consistenthash.go
Get
func (m *Map) Get(key string) string { if m.IsEmpty() { return "" } hash := int(m.hash([]byte(key))) // Binary search for appropriate replica. idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash }) // Means we have cycled back to the first replica. if idx == len(m.keys) { idx = 0 } return m.hashMap[m.keys[idx]] }
go
func (m *Map) Get(key string) string { if m.IsEmpty() { return "" } hash := int(m.hash([]byte(key))) // Binary search for appropriate replica. idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash }) // Means we have cycled back to the first replica. if idx == len(m.keys) { idx = 0 } return m.hashMap[m.keys[idx]] }
[ "func", "(", "m", "*", "Map", ")", "Get", "(", "key", "string", ")", "string", "{", "if", "m", ".", "IsEmpty", "(", ")", "{", "return", "\"", "\"", "\n", "}", "\n\n", "hash", ":=", "int", "(", "m", ".", "hash", "(", "[", "]", "byte", "(", "key", ")", ")", ")", "\n\n", "// Binary search for appropriate replica.", "idx", ":=", "sort", ".", "Search", "(", "len", "(", "m", ".", "keys", ")", ",", "func", "(", "i", "int", ")", "bool", "{", "return", "m", ".", "keys", "[", "i", "]", ">=", "hash", "}", ")", "\n\n", "// Means we have cycled back to the first replica.", "if", "idx", "==", "len", "(", "m", ".", "keys", ")", "{", "idx", "=", "0", "\n", "}", "\n\n", "return", "m", ".", "hashMap", "[", "m", ".", "keys", "[", "idx", "]", "]", "\n", "}" ]
// Gets the closest item in the hash to the provided key.
[ "Gets", "the", "closest", "item", "in", "the", "hash", "to", "the", "provided", "key", "." ]
5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b
https://github.com/golang/groupcache/blob/5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b/consistenthash/consistenthash.go#L65-L81
161,662
argoproj/argo-cd
util/rbac/rbac.go
Enforce
func (e *Enforcer) Enforce(rvals ...interface{}) bool { return enforce(e.Enforcer, e.defaultRole, e.claimsEnforcerFunc, rvals...) }
go
func (e *Enforcer) Enforce(rvals ...interface{}) bool { return enforce(e.Enforcer, e.defaultRole, e.claimsEnforcerFunc, rvals...) }
[ "func", "(", "e", "*", "Enforcer", ")", "Enforce", "(", "rvals", "...", "interface", "{", "}", ")", "bool", "{", "return", "enforce", "(", "e", ".", "Enforcer", ",", "e", ".", "defaultRole", ",", "e", ".", "claimsEnforcerFunc", ",", "rvals", "...", ")", "\n", "}" ]
// Enforce is a wrapper around casbin.Enforce to additionally enforce a default role and a custom // claims function
[ "Enforce", "is", "a", "wrapper", "around", "casbin", ".", "Enforce", "to", "additionally", "enforce", "a", "default", "role", "and", "a", "custom", "claims", "function" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L86-L88
161,663
argoproj/argo-cd
util/rbac/rbac.go
EnforceErr
func (e *Enforcer) EnforceErr(rvals ...interface{}) error { if !e.Enforce(rvals...) { errMsg := "permission denied" if len(rvals) > 0 { rvalsStrs := make([]string, len(rvals)-1) for i, rval := range rvals[1:] { rvalsStrs[i] = fmt.Sprintf("%s", rval) } errMsg = fmt.Sprintf("%s: %s", errMsg, strings.Join(rvalsStrs, ", ")) } return status.Error(codes.PermissionDenied, errMsg) } return nil }
go
func (e *Enforcer) EnforceErr(rvals ...interface{}) error { if !e.Enforce(rvals...) { errMsg := "permission denied" if len(rvals) > 0 { rvalsStrs := make([]string, len(rvals)-1) for i, rval := range rvals[1:] { rvalsStrs[i] = fmt.Sprintf("%s", rval) } errMsg = fmt.Sprintf("%s: %s", errMsg, strings.Join(rvalsStrs, ", ")) } return status.Error(codes.PermissionDenied, errMsg) } return nil }
[ "func", "(", "e", "*", "Enforcer", ")", "EnforceErr", "(", "rvals", "...", "interface", "{", "}", ")", "error", "{", "if", "!", "e", ".", "Enforce", "(", "rvals", "...", ")", "{", "errMsg", ":=", "\"", "\"", "\n", "if", "len", "(", "rvals", ")", ">", "0", "{", "rvalsStrs", ":=", "make", "(", "[", "]", "string", ",", "len", "(", "rvals", ")", "-", "1", ")", "\n", "for", "i", ",", "rval", ":=", "range", "rvals", "[", "1", ":", "]", "{", "rvalsStrs", "[", "i", "]", "=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "rval", ")", "\n", "}", "\n", "errMsg", "=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "errMsg", ",", "strings", ".", "Join", "(", "rvalsStrs", ",", "\"", "\"", ")", ")", "\n", "}", "\n", "return", "status", ".", "Error", "(", "codes", ".", "PermissionDenied", ",", "errMsg", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// EnforceErr is a convenience helper to wrap a failed enforcement with a detailed error about the request
[ "EnforceErr", "is", "a", "convenience", "helper", "to", "wrap", "a", "failed", "enforcement", "with", "a", "detailed", "error", "about", "the", "request" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L91-L104
161,664
argoproj/argo-cd
util/rbac/rbac.go
EnforceRuntimePolicy
func (e *Enforcer) EnforceRuntimePolicy(policy string, rvals ...interface{}) bool { var enf *casbin.Enforcer var err error if policy == "" { enf = e.Enforcer } else { enf, err = casbin.NewEnforcerSafe(newBuiltInModel(), newAdapter(e.adapter.builtinPolicy, e.adapter.userDefinedPolicy, policy)) if err != nil { log.Warnf("invalid runtime policy: %s", policy) enf = e.Enforcer } } return enforce(enf, e.defaultRole, e.claimsEnforcerFunc, rvals...) }
go
func (e *Enforcer) EnforceRuntimePolicy(policy string, rvals ...interface{}) bool { var enf *casbin.Enforcer var err error if policy == "" { enf = e.Enforcer } else { enf, err = casbin.NewEnforcerSafe(newBuiltInModel(), newAdapter(e.adapter.builtinPolicy, e.adapter.userDefinedPolicy, policy)) if err != nil { log.Warnf("invalid runtime policy: %s", policy) enf = e.Enforcer } } return enforce(enf, e.defaultRole, e.claimsEnforcerFunc, rvals...) }
[ "func", "(", "e", "*", "Enforcer", ")", "EnforceRuntimePolicy", "(", "policy", "string", ",", "rvals", "...", "interface", "{", "}", ")", "bool", "{", "var", "enf", "*", "casbin", ".", "Enforcer", "\n", "var", "err", "error", "\n", "if", "policy", "==", "\"", "\"", "{", "enf", "=", "e", ".", "Enforcer", "\n", "}", "else", "{", "enf", ",", "err", "=", "casbin", ".", "NewEnforcerSafe", "(", "newBuiltInModel", "(", ")", ",", "newAdapter", "(", "e", ".", "adapter", ".", "builtinPolicy", ",", "e", ".", "adapter", ".", "userDefinedPolicy", ",", "policy", ")", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Warnf", "(", "\"", "\"", ",", "policy", ")", "\n", "enf", "=", "e", ".", "Enforcer", "\n", "}", "\n", "}", "\n", "return", "enforce", "(", "enf", ",", "e", ".", "defaultRole", ",", "e", ".", "claimsEnforcerFunc", ",", "rvals", "...", ")", "\n", "}" ]
// EnforceRuntimePolicy enforces a policy defined at run-time which augments the built-in and // user-defined policy. This allows any explicit denies of the built-in, and user-defined policies // to override the run-time policy. Runs normal enforcement if run-time policy is empty.
[ "EnforceRuntimePolicy", "enforces", "a", "policy", "defined", "at", "run", "-", "time", "which", "augments", "the", "built", "-", "in", "and", "user", "-", "defined", "policy", ".", "This", "allows", "any", "explicit", "denies", "of", "the", "built", "-", "in", "and", "user", "-", "defined", "policies", "to", "override", "the", "run", "-", "time", "policy", ".", "Runs", "normal", "enforcement", "if", "run", "-", "time", "policy", "is", "empty", "." ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L109-L122
161,665
argoproj/argo-cd
util/rbac/rbac.go
enforce
func enforce(enf *casbin.Enforcer, defaultRole string, claimsEnforcerFunc ClaimsEnforcerFunc, rvals ...interface{}) bool { // check the default role if defaultRole != "" && len(rvals) >= 2 { if enf.Enforce(append([]interface{}{defaultRole}, rvals[1:]...)...) { return true } } if len(rvals) == 0 { return false } // check if subject is jwt.Claims vs. a normal subject string and run custom claims // enforcement func (if set) sub := rvals[0] switch s := sub.(type) { case string: // noop case jwt.Claims: if claimsEnforcerFunc != nil && claimsEnforcerFunc(s, rvals...) { return true } rvals = append([]interface{}{""}, rvals[1:]...) default: rvals = append([]interface{}{""}, rvals[1:]...) } return enf.Enforce(rvals...) }
go
func enforce(enf *casbin.Enforcer, defaultRole string, claimsEnforcerFunc ClaimsEnforcerFunc, rvals ...interface{}) bool { // check the default role if defaultRole != "" && len(rvals) >= 2 { if enf.Enforce(append([]interface{}{defaultRole}, rvals[1:]...)...) { return true } } if len(rvals) == 0 { return false } // check if subject is jwt.Claims vs. a normal subject string and run custom claims // enforcement func (if set) sub := rvals[0] switch s := sub.(type) { case string: // noop case jwt.Claims: if claimsEnforcerFunc != nil && claimsEnforcerFunc(s, rvals...) { return true } rvals = append([]interface{}{""}, rvals[1:]...) default: rvals = append([]interface{}{""}, rvals[1:]...) } return enf.Enforce(rvals...) }
[ "func", "enforce", "(", "enf", "*", "casbin", ".", "Enforcer", ",", "defaultRole", "string", ",", "claimsEnforcerFunc", "ClaimsEnforcerFunc", ",", "rvals", "...", "interface", "{", "}", ")", "bool", "{", "// check the default role", "if", "defaultRole", "!=", "\"", "\"", "&&", "len", "(", "rvals", ")", ">=", "2", "{", "if", "enf", ".", "Enforce", "(", "append", "(", "[", "]", "interface", "{", "}", "{", "defaultRole", "}", ",", "rvals", "[", "1", ":", "]", "...", ")", "...", ")", "{", "return", "true", "\n", "}", "\n", "}", "\n", "if", "len", "(", "rvals", ")", "==", "0", "{", "return", "false", "\n", "}", "\n", "// check if subject is jwt.Claims vs. a normal subject string and run custom claims", "// enforcement func (if set)", "sub", ":=", "rvals", "[", "0", "]", "\n", "switch", "s", ":=", "sub", ".", "(", "type", ")", "{", "case", "string", ":", "// noop", "case", "jwt", ".", "Claims", ":", "if", "claimsEnforcerFunc", "!=", "nil", "&&", "claimsEnforcerFunc", "(", "s", ",", "rvals", "...", ")", "{", "return", "true", "\n", "}", "\n", "rvals", "=", "append", "(", "[", "]", "interface", "{", "}", "{", "\"", "\"", "}", ",", "rvals", "[", "1", ":", "]", "...", ")", "\n", "default", ":", "rvals", "=", "append", "(", "[", "]", "interface", "{", "}", "{", "\"", "\"", "}", ",", "rvals", "[", "1", ":", "]", "...", ")", "\n", "}", "\n", "return", "enf", ".", "Enforce", "(", "rvals", "...", ")", "\n", "}" ]
// enforce is a helper to additionally check a default role and invoke a custom claims enforcement function
[ "enforce", "is", "a", "helper", "to", "additionally", "check", "a", "default", "role", "and", "invoke", "a", "custom", "claims", "enforcement", "function" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L125-L150
161,666
argoproj/argo-cd
util/rbac/rbac.go
SetBuiltinPolicy
func (e *Enforcer) SetBuiltinPolicy(policy string) error { e.adapter.builtinPolicy = policy return e.LoadPolicy() }
go
func (e *Enforcer) SetBuiltinPolicy(policy string) error { e.adapter.builtinPolicy = policy return e.LoadPolicy() }
[ "func", "(", "e", "*", "Enforcer", ")", "SetBuiltinPolicy", "(", "policy", "string", ")", "error", "{", "e", ".", "adapter", ".", "builtinPolicy", "=", "policy", "\n", "return", "e", ".", "LoadPolicy", "(", ")", "\n", "}" ]
// SetBuiltinPolicy sets a built-in policy, which augments any user defined policies
[ "SetBuiltinPolicy", "sets", "a", "built", "-", "in", "policy", "which", "augments", "any", "user", "defined", "policies" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L153-L156
161,667
argoproj/argo-cd
util/rbac/rbac.go
SetUserPolicy
func (e *Enforcer) SetUserPolicy(policy string) error { e.adapter.userDefinedPolicy = policy return e.LoadPolicy() }
go
func (e *Enforcer) SetUserPolicy(policy string) error { e.adapter.userDefinedPolicy = policy return e.LoadPolicy() }
[ "func", "(", "e", "*", "Enforcer", ")", "SetUserPolicy", "(", "policy", "string", ")", "error", "{", "e", ".", "adapter", ".", "userDefinedPolicy", "=", "policy", "\n", "return", "e", ".", "LoadPolicy", "(", ")", "\n", "}" ]
// SetUserPolicy sets a user policy, augmenting the built-in policy
[ "SetUserPolicy", "sets", "a", "user", "policy", "augmenting", "the", "built", "-", "in", "policy" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L159-L162
161,668
argoproj/argo-cd
util/rbac/rbac.go
newInformer
func (e *Enforcer) newInformer() cache.SharedIndexInformer { tweakConfigMap := func(options *metav1.ListOptions) { cmFieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", e.configmap)) options.FieldSelector = cmFieldSelector.String() } return v1.NewFilteredConfigMapInformer(e.clientset, e.namespace, defaultRBACSyncPeriod, cache.Indexers{}, tweakConfigMap) }
go
func (e *Enforcer) newInformer() cache.SharedIndexInformer { tweakConfigMap := func(options *metav1.ListOptions) { cmFieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", e.configmap)) options.FieldSelector = cmFieldSelector.String() } return v1.NewFilteredConfigMapInformer(e.clientset, e.namespace, defaultRBACSyncPeriod, cache.Indexers{}, tweakConfigMap) }
[ "func", "(", "e", "*", "Enforcer", ")", "newInformer", "(", ")", "cache", ".", "SharedIndexInformer", "{", "tweakConfigMap", ":=", "func", "(", "options", "*", "metav1", ".", "ListOptions", ")", "{", "cmFieldSelector", ":=", "fields", ".", "ParseSelectorOrDie", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "e", ".", "configmap", ")", ")", "\n", "options", ".", "FieldSelector", "=", "cmFieldSelector", ".", "String", "(", ")", "\n", "}", "\n", "return", "v1", ".", "NewFilteredConfigMapInformer", "(", "e", ".", "clientset", ",", "e", ".", "namespace", ",", "defaultRBACSyncPeriod", ",", "cache", ".", "Indexers", "{", "}", ",", "tweakConfigMap", ")", "\n", "}" ]
// newInformers returns an informer which watches updates on the rbac configmap
[ "newInformers", "returns", "an", "informer", "which", "watches", "updates", "on", "the", "rbac", "configmap" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L165-L171
161,669
argoproj/argo-cd
util/rbac/rbac.go
RunPolicyLoader
func (e *Enforcer) RunPolicyLoader(ctx context.Context) error { cm, err := e.clientset.CoreV1().ConfigMaps(e.namespace).Get(e.configmap, metav1.GetOptions{}) if err != nil { if !apierr.IsNotFound(err) { return err } } else { err = e.syncUpdate(cm) if err != nil { return err } } e.runInformer(ctx) return nil }
go
func (e *Enforcer) RunPolicyLoader(ctx context.Context) error { cm, err := e.clientset.CoreV1().ConfigMaps(e.namespace).Get(e.configmap, metav1.GetOptions{}) if err != nil { if !apierr.IsNotFound(err) { return err } } else { err = e.syncUpdate(cm) if err != nil { return err } } e.runInformer(ctx) return nil }
[ "func", "(", "e", "*", "Enforcer", ")", "RunPolicyLoader", "(", "ctx", "context", ".", "Context", ")", "error", "{", "cm", ",", "err", ":=", "e", ".", "clientset", ".", "CoreV1", "(", ")", ".", "ConfigMaps", "(", "e", ".", "namespace", ")", ".", "Get", "(", "e", ".", "configmap", ",", "metav1", ".", "GetOptions", "{", "}", ")", "\n", "if", "err", "!=", "nil", "{", "if", "!", "apierr", ".", "IsNotFound", "(", "err", ")", "{", "return", "err", "\n", "}", "\n", "}", "else", "{", "err", "=", "e", ".", "syncUpdate", "(", "cm", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "e", ".", "runInformer", "(", "ctx", ")", "\n", "return", "nil", "\n", "}" ]
// RunPolicyLoader runs the policy loader which watches policy updates from the configmap and reloads them
[ "RunPolicyLoader", "runs", "the", "policy", "loader", "which", "watches", "policy", "updates", "from", "the", "configmap", "and", "reloads", "them" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L174-L188
161,670
argoproj/argo-cd
util/rbac/rbac.go
syncUpdate
func (e *Enforcer) syncUpdate(cm *apiv1.ConfigMap) error { e.SetDefaultRole(cm.Data[ConfigMapPolicyDefaultKey]) policyCSV, ok := cm.Data[ConfigMapPolicyCSVKey] if !ok { policyCSV = "" } return e.SetUserPolicy(policyCSV) }
go
func (e *Enforcer) syncUpdate(cm *apiv1.ConfigMap) error { e.SetDefaultRole(cm.Data[ConfigMapPolicyDefaultKey]) policyCSV, ok := cm.Data[ConfigMapPolicyCSVKey] if !ok { policyCSV = "" } return e.SetUserPolicy(policyCSV) }
[ "func", "(", "e", "*", "Enforcer", ")", "syncUpdate", "(", "cm", "*", "apiv1", ".", "ConfigMap", ")", "error", "{", "e", ".", "SetDefaultRole", "(", "cm", ".", "Data", "[", "ConfigMapPolicyDefaultKey", "]", ")", "\n", "policyCSV", ",", "ok", ":=", "cm", ".", "Data", "[", "ConfigMapPolicyCSVKey", "]", "\n", "if", "!", "ok", "{", "policyCSV", "=", "\"", "\"", "\n", "}", "\n", "return", "e", ".", "SetUserPolicy", "(", "policyCSV", ")", "\n", "}" ]
// syncUpdate updates the enforcer
[ "syncUpdate", "updates", "the", "enforcer" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L225-L232
161,671
argoproj/argo-cd
util/rbac/rbac.go
ValidatePolicy
func ValidatePolicy(policy string) error { _, err := casbin.NewEnforcerSafe(newBuiltInModel(), newAdapter("", "", policy)) if err != nil { return fmt.Errorf("policy syntax error: %s", policy) } return nil }
go
func ValidatePolicy(policy string) error { _, err := casbin.NewEnforcerSafe(newBuiltInModel(), newAdapter("", "", policy)) if err != nil { return fmt.Errorf("policy syntax error: %s", policy) } return nil }
[ "func", "ValidatePolicy", "(", "policy", "string", ")", "error", "{", "_", ",", "err", ":=", "casbin", ".", "NewEnforcerSafe", "(", "newBuiltInModel", "(", ")", ",", "newAdapter", "(", "\"", "\"", ",", "\"", "\"", ",", "policy", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "policy", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// ValidatePolicy verifies a policy string is acceptable to casbin
[ "ValidatePolicy", "verifies", "a", "policy", "string", "is", "acceptable", "to", "casbin" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/rbac/rbac.go#L235-L241
161,672
argoproj/argo-cd
controller/appcontroller.go
NewApplicationController
func NewApplicationController( namespace string, settingsMgr *settings_util.SettingsManager, kubeClientset kubernetes.Interface, applicationClientset appclientset.Interface, repoClientset reposerver.Clientset, argoCache *argocache.Cache, appResyncPeriod time.Duration, ) (*ApplicationController, error) { db := db.NewDB(namespace, settingsMgr, kubeClientset) settings, err := settingsMgr.GetSettings() if err != nil { return nil, err } kubectlCmd := kube.KubectlCmd{} ctrl := ApplicationController{ cache: argoCache, namespace: namespace, kubeClientset: kubeClientset, kubectl: kubectlCmd, applicationClientset: applicationClientset, repoClientset: repoClientset, appRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), appOperationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), db: db, statusRefreshTimeout: appResyncPeriod, refreshRequestedApps: make(map[string]bool), refreshRequestedAppsMutex: &sync.Mutex{}, auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "argocd-application-controller"), settingsMgr: settingsMgr, settings: settings, } appInformer, appLister := ctrl.newApplicationInformerAndLister() projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, cache.Indexers{}) metricsAddr := fmt.Sprintf("0.0.0.0:%d", common.PortArgoCDMetrics) ctrl.metricsServer = metrics.NewMetricsServer(metricsAddr, appLister) stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settings, kubectlCmd, ctrl.metricsServer, ctrl.handleAppUpdated) appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, ctrl.settings, stateCache, projInformer, ctrl.metricsServer) ctrl.appInformer = appInformer ctrl.appLister = appLister ctrl.projInformer = projInformer ctrl.appStateManager = appStateManager ctrl.stateCache = stateCache return &ctrl, nil }
go
func NewApplicationController( namespace string, settingsMgr *settings_util.SettingsManager, kubeClientset kubernetes.Interface, applicationClientset appclientset.Interface, repoClientset reposerver.Clientset, argoCache *argocache.Cache, appResyncPeriod time.Duration, ) (*ApplicationController, error) { db := db.NewDB(namespace, settingsMgr, kubeClientset) settings, err := settingsMgr.GetSettings() if err != nil { return nil, err } kubectlCmd := kube.KubectlCmd{} ctrl := ApplicationController{ cache: argoCache, namespace: namespace, kubeClientset: kubeClientset, kubectl: kubectlCmd, applicationClientset: applicationClientset, repoClientset: repoClientset, appRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), appOperationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), db: db, statusRefreshTimeout: appResyncPeriod, refreshRequestedApps: make(map[string]bool), refreshRequestedAppsMutex: &sync.Mutex{}, auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "argocd-application-controller"), settingsMgr: settingsMgr, settings: settings, } appInformer, appLister := ctrl.newApplicationInformerAndLister() projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, cache.Indexers{}) metricsAddr := fmt.Sprintf("0.0.0.0:%d", common.PortArgoCDMetrics) ctrl.metricsServer = metrics.NewMetricsServer(metricsAddr, appLister) stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settings, kubectlCmd, ctrl.metricsServer, ctrl.handleAppUpdated) appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, ctrl.settings, stateCache, projInformer, ctrl.metricsServer) ctrl.appInformer = appInformer ctrl.appLister = appLister ctrl.projInformer = projInformer ctrl.appStateManager = appStateManager ctrl.stateCache = stateCache return &ctrl, nil }
[ "func", "NewApplicationController", "(", "namespace", "string", ",", "settingsMgr", "*", "settings_util", ".", "SettingsManager", ",", "kubeClientset", "kubernetes", ".", "Interface", ",", "applicationClientset", "appclientset", ".", "Interface", ",", "repoClientset", "reposerver", ".", "Clientset", ",", "argoCache", "*", "argocache", ".", "Cache", ",", "appResyncPeriod", "time", ".", "Duration", ",", ")", "(", "*", "ApplicationController", ",", "error", ")", "{", "db", ":=", "db", ".", "NewDB", "(", "namespace", ",", "settingsMgr", ",", "kubeClientset", ")", "\n", "settings", ",", "err", ":=", "settingsMgr", ".", "GetSettings", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "kubectlCmd", ":=", "kube", ".", "KubectlCmd", "{", "}", "\n", "ctrl", ":=", "ApplicationController", "{", "cache", ":", "argoCache", ",", "namespace", ":", "namespace", ",", "kubeClientset", ":", "kubeClientset", ",", "kubectl", ":", "kubectlCmd", ",", "applicationClientset", ":", "applicationClientset", ",", "repoClientset", ":", "repoClientset", ",", "appRefreshQueue", ":", "workqueue", ".", "NewRateLimitingQueue", "(", "workqueue", ".", "DefaultControllerRateLimiter", "(", ")", ")", ",", "appOperationQueue", ":", "workqueue", ".", "NewRateLimitingQueue", "(", "workqueue", ".", "DefaultControllerRateLimiter", "(", ")", ")", ",", "db", ":", "db", ",", "statusRefreshTimeout", ":", "appResyncPeriod", ",", "refreshRequestedApps", ":", "make", "(", "map", "[", "string", "]", "bool", ")", ",", "refreshRequestedAppsMutex", ":", "&", "sync", ".", "Mutex", "{", "}", ",", "auditLogger", ":", "argo", ".", "NewAuditLogger", "(", "namespace", ",", "kubeClientset", ",", "\"", "\"", ")", ",", "settingsMgr", ":", "settingsMgr", ",", "settings", ":", "settings", ",", "}", "\n", "appInformer", ",", "appLister", ":=", "ctrl", ".", "newApplicationInformerAndLister", "(", ")", "\n", "projInformer", ":=", "v1alpha1", ".", "NewAppProjectInformer", "(", "applicationClientset", ",", "namespace", ",", "appResyncPeriod", ",", "cache", ".", "Indexers", "{", "}", ")", "\n", "metricsAddr", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "common", ".", "PortArgoCDMetrics", ")", "\n", "ctrl", ".", "metricsServer", "=", "metrics", ".", "NewMetricsServer", "(", "metricsAddr", ",", "appLister", ")", "\n", "stateCache", ":=", "statecache", ".", "NewLiveStateCache", "(", "db", ",", "appInformer", ",", "ctrl", ".", "settings", ",", "kubectlCmd", ",", "ctrl", ".", "metricsServer", ",", "ctrl", ".", "handleAppUpdated", ")", "\n", "appStateManager", ":=", "NewAppStateManager", "(", "db", ",", "applicationClientset", ",", "repoClientset", ",", "namespace", ",", "kubectlCmd", ",", "ctrl", ".", "settings", ",", "stateCache", ",", "projInformer", ",", "ctrl", ".", "metricsServer", ")", "\n", "ctrl", ".", "appInformer", "=", "appInformer", "\n", "ctrl", ".", "appLister", "=", "appLister", "\n", "ctrl", ".", "projInformer", "=", "projInformer", "\n", "ctrl", ".", "appStateManager", "=", "appStateManager", "\n", "ctrl", ".", "stateCache", "=", "stateCache", "\n\n", "return", "&", "ctrl", ",", "nil", "\n", "}" ]
// NewApplicationController creates new instance of ApplicationController.
[ "NewApplicationController", "creates", "new", "instance", "of", "ApplicationController", "." ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/appcontroller.go#L81-L126
161,673
argoproj/argo-cd
controller/appcontroller.go
Run
func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int, operationProcessors int) { defer runtime.HandleCrash() defer ctrl.appRefreshQueue.ShutDown() go ctrl.appInformer.Run(ctx.Done()) go ctrl.projInformer.Run(ctx.Done()) go ctrl.watchSettings(ctx) if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced, ctrl.projInformer.HasSynced) { log.Error("Timed out waiting for caches to sync") return } go ctrl.stateCache.Run(ctx) go func() { errors.CheckError(ctrl.metricsServer.ListenAndServe()) }() for i := 0; i < statusProcessors; i++ { go wait.Until(func() { for ctrl.processAppRefreshQueueItem() { } }, time.Second, ctx.Done()) } for i := 0; i < operationProcessors; i++ { go wait.Until(func() { for ctrl.processAppOperationQueueItem() { } }, time.Second, ctx.Done()) } <-ctx.Done() }
go
func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int, operationProcessors int) { defer runtime.HandleCrash() defer ctrl.appRefreshQueue.ShutDown() go ctrl.appInformer.Run(ctx.Done()) go ctrl.projInformer.Run(ctx.Done()) go ctrl.watchSettings(ctx) if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced, ctrl.projInformer.HasSynced) { log.Error("Timed out waiting for caches to sync") return } go ctrl.stateCache.Run(ctx) go func() { errors.CheckError(ctrl.metricsServer.ListenAndServe()) }() for i := 0; i < statusProcessors; i++ { go wait.Until(func() { for ctrl.processAppRefreshQueueItem() { } }, time.Second, ctx.Done()) } for i := 0; i < operationProcessors; i++ { go wait.Until(func() { for ctrl.processAppOperationQueueItem() { } }, time.Second, ctx.Done()) } <-ctx.Done() }
[ "func", "(", "ctrl", "*", "ApplicationController", ")", "Run", "(", "ctx", "context", ".", "Context", ",", "statusProcessors", "int", ",", "operationProcessors", "int", ")", "{", "defer", "runtime", ".", "HandleCrash", "(", ")", "\n", "defer", "ctrl", ".", "appRefreshQueue", ".", "ShutDown", "(", ")", "\n\n", "go", "ctrl", ".", "appInformer", ".", "Run", "(", "ctx", ".", "Done", "(", ")", ")", "\n", "go", "ctrl", ".", "projInformer", ".", "Run", "(", "ctx", ".", "Done", "(", ")", ")", "\n", "go", "ctrl", ".", "watchSettings", "(", "ctx", ")", "\n\n", "if", "!", "cache", ".", "WaitForCacheSync", "(", "ctx", ".", "Done", "(", ")", ",", "ctrl", ".", "appInformer", ".", "HasSynced", ",", "ctrl", ".", "projInformer", ".", "HasSynced", ")", "{", "log", ".", "Error", "(", "\"", "\"", ")", "\n", "return", "\n", "}", "\n\n", "go", "ctrl", ".", "stateCache", ".", "Run", "(", "ctx", ")", "\n", "go", "func", "(", ")", "{", "errors", ".", "CheckError", "(", "ctrl", ".", "metricsServer", ".", "ListenAndServe", "(", ")", ")", "}", "(", ")", "\n\n", "for", "i", ":=", "0", ";", "i", "<", "statusProcessors", ";", "i", "++", "{", "go", "wait", ".", "Until", "(", "func", "(", ")", "{", "for", "ctrl", ".", "processAppRefreshQueueItem", "(", ")", "{", "}", "\n", "}", ",", "time", ".", "Second", ",", "ctx", ".", "Done", "(", ")", ")", "\n", "}", "\n\n", "for", "i", ":=", "0", ";", "i", "<", "operationProcessors", ";", "i", "++", "{", "go", "wait", ".", "Until", "(", "func", "(", ")", "{", "for", "ctrl", ".", "processAppOperationQueueItem", "(", ")", "{", "}", "\n", "}", ",", "time", ".", "Second", ",", "ctx", ".", "Done", "(", ")", ")", "\n", "}", "\n\n", "<-", "ctx", ".", "Done", "(", ")", "\n", "}" ]
// Run starts the Application CRD controller.
[ "Run", "starts", "the", "Application", "CRD", "controller", "." ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/appcontroller.go#L249-L280
161,674
argoproj/argo-cd
controller/appcontroller.go
needRefreshAppStatus
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType, bool) { logCtx := log.WithFields(log.Fields{"application": app.Name}) var reason string fullRefresh := true refreshType := appv1.RefreshTypeNormal expired := app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC()) if requestedType, ok := app.IsRefreshRequested(); ok { refreshType = requestedType reason = fmt.Sprintf("%s refresh requested", refreshType) } else if requested, full := ctrl.isRefreshRequested(app.Name); requested { fullRefresh = full reason = fmt.Sprintf("controller refresh requested") } else if app.Status.Sync.Status == appv1.SyncStatusCodeUnknown && expired { reason = "comparison status unknown" } else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) { reason = "spec.source differs" } else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) { reason = "spec.destination differs" } else if expired { reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout) } if reason != "" { logCtx.Infof("Refreshing app status (%s)", reason) return true, refreshType, fullRefresh } return false, refreshType, fullRefresh }
go
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType, bool) { logCtx := log.WithFields(log.Fields{"application": app.Name}) var reason string fullRefresh := true refreshType := appv1.RefreshTypeNormal expired := app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC()) if requestedType, ok := app.IsRefreshRequested(); ok { refreshType = requestedType reason = fmt.Sprintf("%s refresh requested", refreshType) } else if requested, full := ctrl.isRefreshRequested(app.Name); requested { fullRefresh = full reason = fmt.Sprintf("controller refresh requested") } else if app.Status.Sync.Status == appv1.SyncStatusCodeUnknown && expired { reason = "comparison status unknown" } else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) { reason = "spec.source differs" } else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) { reason = "spec.destination differs" } else if expired { reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout) } if reason != "" { logCtx.Infof("Refreshing app status (%s)", reason) return true, refreshType, fullRefresh } return false, refreshType, fullRefresh }
[ "func", "(", "ctrl", "*", "ApplicationController", ")", "needRefreshAppStatus", "(", "app", "*", "appv1", ".", "Application", ",", "statusRefreshTimeout", "time", ".", "Duration", ")", "(", "bool", ",", "appv1", ".", "RefreshType", ",", "bool", ")", "{", "logCtx", ":=", "log", ".", "WithFields", "(", "log", ".", "Fields", "{", "\"", "\"", ":", "app", ".", "Name", "}", ")", "\n", "var", "reason", "string", "\n", "fullRefresh", ":=", "true", "\n", "refreshType", ":=", "appv1", ".", "RefreshTypeNormal", "\n", "expired", ":=", "app", ".", "Status", ".", "ReconciledAt", ".", "Add", "(", "statusRefreshTimeout", ")", ".", "Before", "(", "time", ".", "Now", "(", ")", ".", "UTC", "(", ")", ")", "\n", "if", "requestedType", ",", "ok", ":=", "app", ".", "IsRefreshRequested", "(", ")", ";", "ok", "{", "refreshType", "=", "requestedType", "\n", "reason", "=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "refreshType", ")", "\n", "}", "else", "if", "requested", ",", "full", ":=", "ctrl", ".", "isRefreshRequested", "(", "app", ".", "Name", ")", ";", "requested", "{", "fullRefresh", "=", "full", "\n", "reason", "=", "fmt", ".", "Sprintf", "(", "\"", "\"", ")", "\n", "}", "else", "if", "app", ".", "Status", ".", "Sync", ".", "Status", "==", "appv1", ".", "SyncStatusCodeUnknown", "&&", "expired", "{", "reason", "=", "\"", "\"", "\n", "}", "else", "if", "!", "app", ".", "Spec", ".", "Source", ".", "Equals", "(", "app", ".", "Status", ".", "Sync", ".", "ComparedTo", ".", "Source", ")", "{", "reason", "=", "\"", "\"", "\n", "}", "else", "if", "!", "app", ".", "Spec", ".", "Destination", ".", "Equals", "(", "app", ".", "Status", ".", "Sync", ".", "ComparedTo", ".", "Destination", ")", "{", "reason", "=", "\"", "\"", "\n", "}", "else", "if", "expired", "{", "reason", "=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "app", ".", "Status", ".", "ReconciledAt", ",", "statusRefreshTimeout", ")", "\n", "}", "\n", "if", "reason", "!=", "\"", "\"", "{", "logCtx", ".", "Infof", "(", "\"", "\"", ",", "reason", ")", "\n", "return", "true", ",", "refreshType", ",", "fullRefresh", "\n", "}", "\n", "return", "false", ",", "refreshType", ",", "fullRefresh", "\n", "}" ]
// needRefreshAppStatus answers if application status needs to be refreshed. // Returns true if application never been compared, has changed or comparison result has expired. // Additionally returns whether full refresh was requested or not. // If full refresh is requested then target and live state should be reconciled, else only live state tree should be updated.
[ "needRefreshAppStatus", "answers", "if", "application", "status", "needs", "to", "be", "refreshed", ".", "Returns", "true", "if", "application", "never", "been", "compared", "has", "changed", "or", "comparison", "result", "has", "expired", ".", "Additionally", "returns", "whether", "full", "refresh", "was", "requested", "or", "not", ".", "If", "full", "refresh", "is", "requested", "then", "target", "and", "live", "state", "should", "be", "reconciled", "else", "only", "live", "state", "tree", "should", "be", "updated", "." ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/appcontroller.go#L666-L692
161,675
argoproj/argo-cd
controller/appcontroller.go
normalizeApplication
func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Application, sourceType appv1.ApplicationSourceType) { logCtx := log.WithFields(log.Fields{"application": app.Name}) app.Spec = *argo.NormalizeApplicationSpec(&app.Spec, sourceType) patch, modified, err := diff.CreateTwoWayMergePatch(orig, app, appv1.Application{}) if err != nil { logCtx.Errorf("error constructing app spec patch: %v", err) } else if modified { appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace) _, err = appClient.Patch(app.Name, types.MergePatchType, patch) if err != nil { logCtx.Errorf("Error persisting normalized application spec: %v", err) } else { logCtx.Infof("Normalized app spec: %s", string(patch)) } } }
go
func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Application, sourceType appv1.ApplicationSourceType) { logCtx := log.WithFields(log.Fields{"application": app.Name}) app.Spec = *argo.NormalizeApplicationSpec(&app.Spec, sourceType) patch, modified, err := diff.CreateTwoWayMergePatch(orig, app, appv1.Application{}) if err != nil { logCtx.Errorf("error constructing app spec patch: %v", err) } else if modified { appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace) _, err = appClient.Patch(app.Name, types.MergePatchType, patch) if err != nil { logCtx.Errorf("Error persisting normalized application spec: %v", err) } else { logCtx.Infof("Normalized app spec: %s", string(patch)) } } }
[ "func", "(", "ctrl", "*", "ApplicationController", ")", "normalizeApplication", "(", "orig", ",", "app", "*", "appv1", ".", "Application", ",", "sourceType", "appv1", ".", "ApplicationSourceType", ")", "{", "logCtx", ":=", "log", ".", "WithFields", "(", "log", ".", "Fields", "{", "\"", "\"", ":", "app", ".", "Name", "}", ")", "\n", "app", ".", "Spec", "=", "*", "argo", ".", "NormalizeApplicationSpec", "(", "&", "app", ".", "Spec", ",", "sourceType", ")", "\n", "patch", ",", "modified", ",", "err", ":=", "diff", ".", "CreateTwoWayMergePatch", "(", "orig", ",", "app", ",", "appv1", ".", "Application", "{", "}", ")", "\n", "if", "err", "!=", "nil", "{", "logCtx", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "else", "if", "modified", "{", "appClient", ":=", "ctrl", ".", "applicationClientset", ".", "ArgoprojV1alpha1", "(", ")", ".", "Applications", "(", "app", ".", "Namespace", ")", "\n", "_", ",", "err", "=", "appClient", ".", "Patch", "(", "app", ".", "Name", ",", "types", ".", "MergePatchType", ",", "patch", ")", "\n", "if", "err", "!=", "nil", "{", "logCtx", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "else", "{", "logCtx", ".", "Infof", "(", "\"", "\"", ",", "string", "(", "patch", ")", ")", "\n", "}", "\n", "}", "\n", "}" ]
// normalizeApplication normalizes an application.spec and additionally persists updates if it changed
[ "normalizeApplication", "normalizes", "an", "application", ".", "spec", "and", "additionally", "persists", "updates", "if", "it", "changed" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/appcontroller.go#L750-L765
161,676
argoproj/argo-cd
controller/appcontroller.go
persistAppStatus
func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) { logCtx := log.WithFields(log.Fields{"application": orig.Name}) if orig.Status.Sync.Status != newStatus.Sync.Status { message := fmt.Sprintf("Updated sync status: %s -> %s", orig.Status.Sync.Status, newStatus.Sync.Status) ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message) } if orig.Status.Health.Status != newStatus.Health.Status { message := fmt.Sprintf("Updated health status: %s -> %s", orig.Status.Health.Status, newStatus.Health.Status) ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message) } var newAnnotations map[string]string if orig.GetAnnotations() != nil { newAnnotations = make(map[string]string) for k, v := range orig.GetAnnotations() { newAnnotations[k] = v } delete(newAnnotations, common.AnnotationKeyRefresh) } patch, modified, err := diff.CreateTwoWayMergePatch( &appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: orig.GetAnnotations()}, Status: orig.Status}, &appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: newAnnotations}, Status: *newStatus}, appv1.Application{}) if err != nil { logCtx.Errorf("Error constructing app status patch: %v", err) return } if !modified { logCtx.Infof("No status changes. Skipping patch") return } logCtx.Debugf("patch: %s", string(patch)) appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace) _, err = appClient.Patch(orig.Name, types.MergePatchType, patch) if err != nil { logCtx.Warnf("Error updating application: %v", err) } else { logCtx.Infof("Update successful") } }
go
func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) { logCtx := log.WithFields(log.Fields{"application": orig.Name}) if orig.Status.Sync.Status != newStatus.Sync.Status { message := fmt.Sprintf("Updated sync status: %s -> %s", orig.Status.Sync.Status, newStatus.Sync.Status) ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message) } if orig.Status.Health.Status != newStatus.Health.Status { message := fmt.Sprintf("Updated health status: %s -> %s", orig.Status.Health.Status, newStatus.Health.Status) ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message) } var newAnnotations map[string]string if orig.GetAnnotations() != nil { newAnnotations = make(map[string]string) for k, v := range orig.GetAnnotations() { newAnnotations[k] = v } delete(newAnnotations, common.AnnotationKeyRefresh) } patch, modified, err := diff.CreateTwoWayMergePatch( &appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: orig.GetAnnotations()}, Status: orig.Status}, &appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: newAnnotations}, Status: *newStatus}, appv1.Application{}) if err != nil { logCtx.Errorf("Error constructing app status patch: %v", err) return } if !modified { logCtx.Infof("No status changes. Skipping patch") return } logCtx.Debugf("patch: %s", string(patch)) appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace) _, err = appClient.Patch(orig.Name, types.MergePatchType, patch) if err != nil { logCtx.Warnf("Error updating application: %v", err) } else { logCtx.Infof("Update successful") } }
[ "func", "(", "ctrl", "*", "ApplicationController", ")", "persistAppStatus", "(", "orig", "*", "appv1", ".", "Application", ",", "newStatus", "*", "appv1", ".", "ApplicationStatus", ")", "{", "logCtx", ":=", "log", ".", "WithFields", "(", "log", ".", "Fields", "{", "\"", "\"", ":", "orig", ".", "Name", "}", ")", "\n", "if", "orig", ".", "Status", ".", "Sync", ".", "Status", "!=", "newStatus", ".", "Sync", ".", "Status", "{", "message", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "orig", ".", "Status", ".", "Sync", ".", "Status", ",", "newStatus", ".", "Sync", ".", "Status", ")", "\n", "ctrl", ".", "auditLogger", ".", "LogAppEvent", "(", "orig", ",", "argo", ".", "EventInfo", "{", "Reason", ":", "argo", ".", "EventReasonResourceUpdated", ",", "Type", ":", "v1", ".", "EventTypeNormal", "}", ",", "message", ")", "\n", "}", "\n", "if", "orig", ".", "Status", ".", "Health", ".", "Status", "!=", "newStatus", ".", "Health", ".", "Status", "{", "message", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "orig", ".", "Status", ".", "Health", ".", "Status", ",", "newStatus", ".", "Health", ".", "Status", ")", "\n", "ctrl", ".", "auditLogger", ".", "LogAppEvent", "(", "orig", ",", "argo", ".", "EventInfo", "{", "Reason", ":", "argo", ".", "EventReasonResourceUpdated", ",", "Type", ":", "v1", ".", "EventTypeNormal", "}", ",", "message", ")", "\n", "}", "\n", "var", "newAnnotations", "map", "[", "string", "]", "string", "\n", "if", "orig", ".", "GetAnnotations", "(", ")", "!=", "nil", "{", "newAnnotations", "=", "make", "(", "map", "[", "string", "]", "string", ")", "\n", "for", "k", ",", "v", ":=", "range", "orig", ".", "GetAnnotations", "(", ")", "{", "newAnnotations", "[", "k", "]", "=", "v", "\n", "}", "\n", "delete", "(", "newAnnotations", ",", "common", ".", "AnnotationKeyRefresh", ")", "\n", "}", "\n", "patch", ",", "modified", ",", "err", ":=", "diff", ".", "CreateTwoWayMergePatch", "(", "&", "appv1", ".", "Application", "{", "ObjectMeta", ":", "metav1", ".", "ObjectMeta", "{", "Annotations", ":", "orig", ".", "GetAnnotations", "(", ")", "}", ",", "Status", ":", "orig", ".", "Status", "}", ",", "&", "appv1", ".", "Application", "{", "ObjectMeta", ":", "metav1", ".", "ObjectMeta", "{", "Annotations", ":", "newAnnotations", "}", ",", "Status", ":", "*", "newStatus", "}", ",", "appv1", ".", "Application", "{", "}", ")", "\n", "if", "err", "!=", "nil", "{", "logCtx", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "return", "\n", "}", "\n", "if", "!", "modified", "{", "logCtx", ".", "Infof", "(", "\"", "\"", ")", "\n", "return", "\n", "}", "\n", "logCtx", ".", "Debugf", "(", "\"", "\"", ",", "string", "(", "patch", ")", ")", "\n", "appClient", ":=", "ctrl", ".", "applicationClientset", ".", "ArgoprojV1alpha1", "(", ")", ".", "Applications", "(", "orig", ".", "Namespace", ")", "\n", "_", ",", "err", "=", "appClient", ".", "Patch", "(", "orig", ".", "Name", ",", "types", ".", "MergePatchType", ",", "patch", ")", "\n", "if", "err", "!=", "nil", "{", "logCtx", ".", "Warnf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "else", "{", "logCtx", ".", "Infof", "(", "\"", "\"", ")", "\n", "}", "\n", "}" ]
// persistAppStatus persists updates to application status. If no changes were made, it is a no-op
[ "persistAppStatus", "persists", "updates", "to", "application", "status", ".", "If", "no", "changes", "were", "made", "it", "is", "a", "no", "-", "op" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/appcontroller.go#L768-L805
161,677
argoproj/argo-cd
controller/appcontroller.go
autoSync
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus) *appv1.ApplicationCondition { if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil { return nil } logCtx := log.WithFields(log.Fields{"application": app.Name}) if app.Operation != nil { logCtx.Infof("Skipping auto-sync: another operation is in progress") return nil } if app.DeletionTimestamp != nil && !app.DeletionTimestamp.IsZero() { logCtx.Infof("Skipping auto-sync: deletion in progress") return nil } // Only perform auto-sync if we detect OutOfSync status. This is to prevent us from attempting // a sync when application is already in a Synced or Unknown state if syncStatus.Status != appv1.SyncStatusCodeOutOfSync { logCtx.Infof("Skipping auto-sync: application status is %s", syncStatus.Status) return nil } desiredCommitSHA := syncStatus.Revision // It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g. // auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an // application in an infinite loop. To detect this, we only attempt the Sync if the revision // and parameter overrides are different from our most recent sync operation. if alreadyAttemptedSync(app, desiredCommitSHA) { if app.Status.OperationState.Phase != appv1.OperationSucceeded { logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA) message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message) return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message} } logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA) return nil } op := appv1.Operation{ Sync: &appv1.SyncOperation{ Revision: desiredCommitSHA, Prune: app.Spec.SyncPolicy.Automated.Prune, }, } appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace) _, err := argo.SetAppOperation(appIf, app.Name, &op) if err != nil { logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err) return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()} } message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA) ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message) logCtx.Info(message) return nil }
go
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus) *appv1.ApplicationCondition { if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil { return nil } logCtx := log.WithFields(log.Fields{"application": app.Name}) if app.Operation != nil { logCtx.Infof("Skipping auto-sync: another operation is in progress") return nil } if app.DeletionTimestamp != nil && !app.DeletionTimestamp.IsZero() { logCtx.Infof("Skipping auto-sync: deletion in progress") return nil } // Only perform auto-sync if we detect OutOfSync status. This is to prevent us from attempting // a sync when application is already in a Synced or Unknown state if syncStatus.Status != appv1.SyncStatusCodeOutOfSync { logCtx.Infof("Skipping auto-sync: application status is %s", syncStatus.Status) return nil } desiredCommitSHA := syncStatus.Revision // It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g. // auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an // application in an infinite loop. To detect this, we only attempt the Sync if the revision // and parameter overrides are different from our most recent sync operation. if alreadyAttemptedSync(app, desiredCommitSHA) { if app.Status.OperationState.Phase != appv1.OperationSucceeded { logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA) message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message) return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message} } logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA) return nil } op := appv1.Operation{ Sync: &appv1.SyncOperation{ Revision: desiredCommitSHA, Prune: app.Spec.SyncPolicy.Automated.Prune, }, } appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace) _, err := argo.SetAppOperation(appIf, app.Name, &op) if err != nil { logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err) return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()} } message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA) ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message) logCtx.Info(message) return nil }
[ "func", "(", "ctrl", "*", "ApplicationController", ")", "autoSync", "(", "app", "*", "appv1", ".", "Application", ",", "syncStatus", "*", "appv1", ".", "SyncStatus", ")", "*", "appv1", ".", "ApplicationCondition", "{", "if", "app", ".", "Spec", ".", "SyncPolicy", "==", "nil", "||", "app", ".", "Spec", ".", "SyncPolicy", ".", "Automated", "==", "nil", "{", "return", "nil", "\n", "}", "\n", "logCtx", ":=", "log", ".", "WithFields", "(", "log", ".", "Fields", "{", "\"", "\"", ":", "app", ".", "Name", "}", ")", "\n", "if", "app", ".", "Operation", "!=", "nil", "{", "logCtx", ".", "Infof", "(", "\"", "\"", ")", "\n", "return", "nil", "\n", "}", "\n", "if", "app", ".", "DeletionTimestamp", "!=", "nil", "&&", "!", "app", ".", "DeletionTimestamp", ".", "IsZero", "(", ")", "{", "logCtx", ".", "Infof", "(", "\"", "\"", ")", "\n", "return", "nil", "\n", "}", "\n", "// Only perform auto-sync if we detect OutOfSync status. This is to prevent us from attempting", "// a sync when application is already in a Synced or Unknown state", "if", "syncStatus", ".", "Status", "!=", "appv1", ".", "SyncStatusCodeOutOfSync", "{", "logCtx", ".", "Infof", "(", "\"", "\"", ",", "syncStatus", ".", "Status", ")", "\n", "return", "nil", "\n", "}", "\n", "desiredCommitSHA", ":=", "syncStatus", ".", "Revision", "\n\n", "// It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g.", "// auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an", "// application in an infinite loop. To detect this, we only attempt the Sync if the revision", "// and parameter overrides are different from our most recent sync operation.", "if", "alreadyAttemptedSync", "(", "app", ",", "desiredCommitSHA", ")", "{", "if", "app", ".", "Status", ".", "OperationState", ".", "Phase", "!=", "appv1", ".", "OperationSucceeded", "{", "logCtx", ".", "Warnf", "(", "\"", "\"", ",", "desiredCommitSHA", ")", "\n", "message", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "desiredCommitSHA", ",", "app", ".", "Status", ".", "OperationState", ".", "Message", ")", "\n", "return", "&", "appv1", ".", "ApplicationCondition", "{", "Type", ":", "appv1", ".", "ApplicationConditionSyncError", ",", "Message", ":", "message", "}", "\n", "}", "\n", "logCtx", ".", "Infof", "(", "\"", "\"", ",", "desiredCommitSHA", ")", "\n", "return", "nil", "\n", "}", "\n\n", "op", ":=", "appv1", ".", "Operation", "{", "Sync", ":", "&", "appv1", ".", "SyncOperation", "{", "Revision", ":", "desiredCommitSHA", ",", "Prune", ":", "app", ".", "Spec", ".", "SyncPolicy", ".", "Automated", ".", "Prune", ",", "}", ",", "}", "\n", "appIf", ":=", "ctrl", ".", "applicationClientset", ".", "ArgoprojV1alpha1", "(", ")", ".", "Applications", "(", "app", ".", "Namespace", ")", "\n", "_", ",", "err", ":=", "argo", ".", "SetAppOperation", "(", "appIf", ",", "app", ".", "Name", ",", "&", "op", ")", "\n", "if", "err", "!=", "nil", "{", "logCtx", ".", "Errorf", "(", "\"", "\"", ",", "desiredCommitSHA", ",", "err", ")", "\n", "return", "&", "appv1", ".", "ApplicationCondition", "{", "Type", ":", "appv1", ".", "ApplicationConditionSyncError", ",", "Message", ":", "err", ".", "Error", "(", ")", "}", "\n", "}", "\n", "message", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "desiredCommitSHA", ")", "\n", "ctrl", ".", "auditLogger", ".", "LogAppEvent", "(", "app", ",", "argo", ".", "EventInfo", "{", "Reason", ":", "argo", ".", "EventReasonOperationStarted", ",", "Type", ":", "v1", ".", "EventTypeNormal", "}", ",", "message", ")", "\n", "logCtx", ".", "Info", "(", "message", ")", "\n", "return", "nil", "\n", "}" ]
// autoSync will initiate a sync operation for an application configured with automated sync
[ "autoSync", "will", "initiate", "a", "sync", "operation", "for", "an", "application", "configured", "with", "automated", "sync" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/appcontroller.go#L808-L859
161,678
argoproj/argo-cd
controller/appcontroller.go
alreadyAttemptedSync
func alreadyAttemptedSync(app *appv1.Application, commitSHA string) bool { if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil { return false } if app.Status.OperationState.SyncResult.Revision != commitSHA { return false } // Ignore differences in target revision, since we already just verified commitSHAs are equal, // and we do not want to trigger auto-sync due to things like HEAD != master specSource := app.Spec.Source.DeepCopy() specSource.TargetRevision = "" syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy() syncResSource.TargetRevision = "" return reflect.DeepEqual(app.Spec.Source, app.Status.OperationState.SyncResult.Source) }
go
func alreadyAttemptedSync(app *appv1.Application, commitSHA string) bool { if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil { return false } if app.Status.OperationState.SyncResult.Revision != commitSHA { return false } // Ignore differences in target revision, since we already just verified commitSHAs are equal, // and we do not want to trigger auto-sync due to things like HEAD != master specSource := app.Spec.Source.DeepCopy() specSource.TargetRevision = "" syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy() syncResSource.TargetRevision = "" return reflect.DeepEqual(app.Spec.Source, app.Status.OperationState.SyncResult.Source) }
[ "func", "alreadyAttemptedSync", "(", "app", "*", "appv1", ".", "Application", ",", "commitSHA", "string", ")", "bool", "{", "if", "app", ".", "Status", ".", "OperationState", "==", "nil", "||", "app", ".", "Status", ".", "OperationState", ".", "Operation", ".", "Sync", "==", "nil", "||", "app", ".", "Status", ".", "OperationState", ".", "SyncResult", "==", "nil", "{", "return", "false", "\n", "}", "\n", "if", "app", ".", "Status", ".", "OperationState", ".", "SyncResult", ".", "Revision", "!=", "commitSHA", "{", "return", "false", "\n", "}", "\n", "// Ignore differences in target revision, since we already just verified commitSHAs are equal,", "// and we do not want to trigger auto-sync due to things like HEAD != master", "specSource", ":=", "app", ".", "Spec", ".", "Source", ".", "DeepCopy", "(", ")", "\n", "specSource", ".", "TargetRevision", "=", "\"", "\"", "\n", "syncResSource", ":=", "app", ".", "Status", ".", "OperationState", ".", "SyncResult", ".", "Source", ".", "DeepCopy", "(", ")", "\n", "syncResSource", ".", "TargetRevision", "=", "\"", "\"", "\n", "return", "reflect", ".", "DeepEqual", "(", "app", ".", "Spec", ".", "Source", ",", "app", ".", "Status", ".", "OperationState", ".", "SyncResult", ".", "Source", ")", "\n", "}" ]
// alreadyAttemptedSync returns whether or not the most recent sync was performed against the // commitSHA and with the same app source config which are currently set in the app
[ "alreadyAttemptedSync", "returns", "whether", "or", "not", "the", "most", "recent", "sync", "was", "performed", "against", "the", "commitSHA", "and", "with", "the", "same", "app", "source", "config", "which", "are", "currently", "set", "in", "the", "app" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/appcontroller.go#L863-L877
161,679
argoproj/argo-cd
controller/appcontroller.go
toggledAutomatedSync
func toggledAutomatedSync(old *appv1.Application, new *appv1.Application) bool { if new.Spec.SyncPolicy == nil || new.Spec.SyncPolicy.Automated == nil { return false } // auto-sync is enabled. check if it was previously disabled if old.Spec.SyncPolicy == nil || old.Spec.SyncPolicy.Automated == nil { return true } // nothing changed return false }
go
func toggledAutomatedSync(old *appv1.Application, new *appv1.Application) bool { if new.Spec.SyncPolicy == nil || new.Spec.SyncPolicy.Automated == nil { return false } // auto-sync is enabled. check if it was previously disabled if old.Spec.SyncPolicy == nil || old.Spec.SyncPolicy.Automated == nil { return true } // nothing changed return false }
[ "func", "toggledAutomatedSync", "(", "old", "*", "appv1", ".", "Application", ",", "new", "*", "appv1", ".", "Application", ")", "bool", "{", "if", "new", ".", "Spec", ".", "SyncPolicy", "==", "nil", "||", "new", ".", "Spec", ".", "SyncPolicy", ".", "Automated", "==", "nil", "{", "return", "false", "\n", "}", "\n", "// auto-sync is enabled. check if it was previously disabled", "if", "old", ".", "Spec", ".", "SyncPolicy", "==", "nil", "||", "old", ".", "Spec", ".", "SyncPolicy", ".", "Automated", "==", "nil", "{", "return", "true", "\n", "}", "\n", "// nothing changed", "return", "false", "\n", "}" ]
// toggledAutomatedSync tests if an app went from auto-sync disabled to enabled. // if it was toggled to be enabled, the informer handler will force a refresh
[ "toggledAutomatedSync", "tests", "if", "an", "app", "went", "from", "auto", "-", "sync", "disabled", "to", "enabled", ".", "if", "it", "was", "toggled", "to", "be", "enabled", "the", "informer", "handler", "will", "force", "a", "refresh" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/appcontroller.go#L932-L942
161,680
argoproj/argo-cd
controller/sync.go
sync
func (sc *syncContext) sync() { syncTasks, successful := sc.generateSyncTasks() if !successful { sc.setOperationPhase(appv1.OperationFailed, "one or more synchronization tasks are not valid") return } // If no sync tasks were generated (e.g., in case all application manifests have been removed), // set the sync operation as successful. if len(syncTasks) == 0 { sc.setOperationPhase(appv1.OperationSucceeded, "successfully synced (no manifests)") return } // Perform a `kubectl apply --dry-run` against all the manifests. This will detect most (but // not all) validation issues with the user's manifests (e.g. will detect syntax issues, but // will not not detect if they are mutating immutable fields). If anything fails, we will refuse // to perform the sync. if !sc.startedPreSyncPhase() { // Optimization: we only wish to do this once per operation, performing additional dry-runs // is harmless, but redundant. The indicator we use to detect if we have already performed // the dry-run for this operation, is if the resource or hook list is empty. if !sc.doApplySync(syncTasks, true, false, sc.syncOp.DryRun) { sc.setOperationPhase(appv1.OperationFailed, "one or more objects failed to apply (dry run)") return } if sc.syncOp.DryRun { sc.setOperationPhase(appv1.OperationSucceeded, "successfully synced (dry run)") return } } // All objects passed a `kubectl apply --dry-run`, so we are now ready to actually perform the sync. if sc.syncOp.SyncStrategy == nil { // default sync strategy to hook if no strategy sc.syncOp.SyncStrategy = &appv1.SyncStrategy{Hook: &appv1.SyncStrategyHook{}} } if sc.syncOp.SyncStrategy.Apply != nil { if !sc.startedSyncPhase() { if !sc.doApplySync(syncTasks, false, sc.syncOp.SyncStrategy.Apply.Force, true) { sc.setOperationPhase(appv1.OperationFailed, "one or more objects failed to apply") return } // If apply was successful, return here and force an app refresh. This is so the app // will become requeued into the workqueue, to force a new sync/health assessment before // marking the operation as completed return } sc.setOperationPhase(appv1.OperationSucceeded, "successfully synced") } else if sc.syncOp.SyncStrategy.Hook != nil { hooks, err := sc.getHooks() if err != nil { sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("failed to generate hooks resources: %v", err)) return } sc.doHookSync(syncTasks, hooks) } else { sc.setOperationPhase(appv1.OperationFailed, "Unknown sync strategy") return } }
go
func (sc *syncContext) sync() { syncTasks, successful := sc.generateSyncTasks() if !successful { sc.setOperationPhase(appv1.OperationFailed, "one or more synchronization tasks are not valid") return } // If no sync tasks were generated (e.g., in case all application manifests have been removed), // set the sync operation as successful. if len(syncTasks) == 0 { sc.setOperationPhase(appv1.OperationSucceeded, "successfully synced (no manifests)") return } // Perform a `kubectl apply --dry-run` against all the manifests. This will detect most (but // not all) validation issues with the user's manifests (e.g. will detect syntax issues, but // will not not detect if they are mutating immutable fields). If anything fails, we will refuse // to perform the sync. if !sc.startedPreSyncPhase() { // Optimization: we only wish to do this once per operation, performing additional dry-runs // is harmless, but redundant. The indicator we use to detect if we have already performed // the dry-run for this operation, is if the resource or hook list is empty. if !sc.doApplySync(syncTasks, true, false, sc.syncOp.DryRun) { sc.setOperationPhase(appv1.OperationFailed, "one or more objects failed to apply (dry run)") return } if sc.syncOp.DryRun { sc.setOperationPhase(appv1.OperationSucceeded, "successfully synced (dry run)") return } } // All objects passed a `kubectl apply --dry-run`, so we are now ready to actually perform the sync. if sc.syncOp.SyncStrategy == nil { // default sync strategy to hook if no strategy sc.syncOp.SyncStrategy = &appv1.SyncStrategy{Hook: &appv1.SyncStrategyHook{}} } if sc.syncOp.SyncStrategy.Apply != nil { if !sc.startedSyncPhase() { if !sc.doApplySync(syncTasks, false, sc.syncOp.SyncStrategy.Apply.Force, true) { sc.setOperationPhase(appv1.OperationFailed, "one or more objects failed to apply") return } // If apply was successful, return here and force an app refresh. This is so the app // will become requeued into the workqueue, to force a new sync/health assessment before // marking the operation as completed return } sc.setOperationPhase(appv1.OperationSucceeded, "successfully synced") } else if sc.syncOp.SyncStrategy.Hook != nil { hooks, err := sc.getHooks() if err != nil { sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("failed to generate hooks resources: %v", err)) return } sc.doHookSync(syncTasks, hooks) } else { sc.setOperationPhase(appv1.OperationFailed, "Unknown sync strategy") return } }
[ "func", "(", "sc", "*", "syncContext", ")", "sync", "(", ")", "{", "syncTasks", ",", "successful", ":=", "sc", ".", "generateSyncTasks", "(", ")", "\n", "if", "!", "successful", "{", "sc", ".", "setOperationPhase", "(", "appv1", ".", "OperationFailed", ",", "\"", "\"", ")", "\n", "return", "\n", "}", "\n\n", "// If no sync tasks were generated (e.g., in case all application manifests have been removed),", "// set the sync operation as successful.", "if", "len", "(", "syncTasks", ")", "==", "0", "{", "sc", ".", "setOperationPhase", "(", "appv1", ".", "OperationSucceeded", ",", "\"", "\"", ")", "\n", "return", "\n", "}", "\n\n", "// Perform a `kubectl apply --dry-run` against all the manifests. This will detect most (but", "// not all) validation issues with the user's manifests (e.g. will detect syntax issues, but", "// will not not detect if they are mutating immutable fields). If anything fails, we will refuse", "// to perform the sync.", "if", "!", "sc", ".", "startedPreSyncPhase", "(", ")", "{", "// Optimization: we only wish to do this once per operation, performing additional dry-runs", "// is harmless, but redundant. The indicator we use to detect if we have already performed", "// the dry-run for this operation, is if the resource or hook list is empty.", "if", "!", "sc", ".", "doApplySync", "(", "syncTasks", ",", "true", ",", "false", ",", "sc", ".", "syncOp", ".", "DryRun", ")", "{", "sc", ".", "setOperationPhase", "(", "appv1", ".", "OperationFailed", ",", "\"", "\"", ")", "\n", "return", "\n", "}", "\n", "if", "sc", ".", "syncOp", ".", "DryRun", "{", "sc", ".", "setOperationPhase", "(", "appv1", ".", "OperationSucceeded", ",", "\"", "\"", ")", "\n", "return", "\n", "}", "\n", "}", "\n\n", "// All objects passed a `kubectl apply --dry-run`, so we are now ready to actually perform the sync.", "if", "sc", ".", "syncOp", ".", "SyncStrategy", "==", "nil", "{", "// default sync strategy to hook if no strategy", "sc", ".", "syncOp", ".", "SyncStrategy", "=", "&", "appv1", ".", "SyncStrategy", "{", "Hook", ":", "&", "appv1", ".", "SyncStrategyHook", "{", "}", "}", "\n", "}", "\n", "if", "sc", ".", "syncOp", ".", "SyncStrategy", ".", "Apply", "!=", "nil", "{", "if", "!", "sc", ".", "startedSyncPhase", "(", ")", "{", "if", "!", "sc", ".", "doApplySync", "(", "syncTasks", ",", "false", ",", "sc", ".", "syncOp", ".", "SyncStrategy", ".", "Apply", ".", "Force", ",", "true", ")", "{", "sc", ".", "setOperationPhase", "(", "appv1", ".", "OperationFailed", ",", "\"", "\"", ")", "\n", "return", "\n", "}", "\n", "// If apply was successful, return here and force an app refresh. This is so the app", "// will become requeued into the workqueue, to force a new sync/health assessment before", "// marking the operation as completed", "return", "\n", "}", "\n", "sc", ".", "setOperationPhase", "(", "appv1", ".", "OperationSucceeded", ",", "\"", "\"", ")", "\n", "}", "else", "if", "sc", ".", "syncOp", ".", "SyncStrategy", ".", "Hook", "!=", "nil", "{", "hooks", ",", "err", ":=", "sc", ".", "getHooks", "(", ")", "\n", "if", "err", "!=", "nil", "{", "sc", ".", "setOperationPhase", "(", "appv1", ".", "OperationError", ",", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "err", ")", ")", "\n", "return", "\n", "}", "\n", "sc", ".", "doHookSync", "(", "syncTasks", ",", "hooks", ")", "\n", "}", "else", "{", "sc", ".", "setOperationPhase", "(", "appv1", ".", "OperationFailed", ",", "\"", "\"", ")", "\n", "return", "\n", "}", "\n", "}" ]
// sync has performs the actual apply or hook based sync
[ "sync", "has", "performs", "the", "actual", "apply", "or", "hook", "based", "sync" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/sync.go#L183-L243
161,681
argoproj/argo-cd
controller/sync.go
startedPostSyncPhase
func (sc *syncContext) startedPostSyncPhase() bool { for _, res := range sc.syncRes.Resources { if res.IsHook() && res.HookType == appv1.HookTypePostSync { return true } } return false }
go
func (sc *syncContext) startedPostSyncPhase() bool { for _, res := range sc.syncRes.Resources { if res.IsHook() && res.HookType == appv1.HookTypePostSync { return true } } return false }
[ "func", "(", "sc", "*", "syncContext", ")", "startedPostSyncPhase", "(", ")", "bool", "{", "for", "_", ",", "res", ":=", "range", "sc", ".", "syncRes", ".", "Resources", "{", "if", "res", ".", "IsHook", "(", ")", "&&", "res", ".", "HookType", "==", "appv1", ".", "HookTypePostSync", "{", "return", "true", "\n", "}", "\n", "}", "\n", "return", "false", "\n", "}" ]
// startedPostSyncPhase detects if we have already started the PostSync stage. This is equal to if // we see any PostSync hooks
[ "startedPostSyncPhase", "detects", "if", "we", "have", "already", "started", "the", "PostSync", "stage", ".", "This", "is", "equal", "to", "if", "we", "see", "any", "PostSync", "hooks" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/sync.go#L352-L359
161,682
argoproj/argo-cd
controller/sync.go
applyObject
func (sc *syncContext) applyObject(targetObj *unstructured.Unstructured, dryRun bool, force bool) appv1.ResourceResult { gvk := targetObj.GroupVersionKind() resDetails := appv1.ResourceResult{ Name: targetObj.GetName(), Group: gvk.Group, Version: gvk.Version, Kind: targetObj.GetKind(), Namespace: targetObj.GetNamespace(), } message, err := sc.kubectl.ApplyResource(sc.config, targetObj, targetObj.GetNamespace(), dryRun, force) if err != nil { resDetails.Message = err.Error() resDetails.Status = appv1.ResultCodeSyncFailed return resDetails } resDetails.Message = message resDetails.Status = appv1.ResultCodeSynced return resDetails }
go
func (sc *syncContext) applyObject(targetObj *unstructured.Unstructured, dryRun bool, force bool) appv1.ResourceResult { gvk := targetObj.GroupVersionKind() resDetails := appv1.ResourceResult{ Name: targetObj.GetName(), Group: gvk.Group, Version: gvk.Version, Kind: targetObj.GetKind(), Namespace: targetObj.GetNamespace(), } message, err := sc.kubectl.ApplyResource(sc.config, targetObj, targetObj.GetNamespace(), dryRun, force) if err != nil { resDetails.Message = err.Error() resDetails.Status = appv1.ResultCodeSyncFailed return resDetails } resDetails.Message = message resDetails.Status = appv1.ResultCodeSynced return resDetails }
[ "func", "(", "sc", "*", "syncContext", ")", "applyObject", "(", "targetObj", "*", "unstructured", ".", "Unstructured", ",", "dryRun", "bool", ",", "force", "bool", ")", "appv1", ".", "ResourceResult", "{", "gvk", ":=", "targetObj", ".", "GroupVersionKind", "(", ")", "\n", "resDetails", ":=", "appv1", ".", "ResourceResult", "{", "Name", ":", "targetObj", ".", "GetName", "(", ")", ",", "Group", ":", "gvk", ".", "Group", ",", "Version", ":", "gvk", ".", "Version", ",", "Kind", ":", "targetObj", ".", "GetKind", "(", ")", ",", "Namespace", ":", "targetObj", ".", "GetNamespace", "(", ")", ",", "}", "\n", "message", ",", "err", ":=", "sc", ".", "kubectl", ".", "ApplyResource", "(", "sc", ".", "config", ",", "targetObj", ",", "targetObj", ".", "GetNamespace", "(", ")", ",", "dryRun", ",", "force", ")", "\n", "if", "err", "!=", "nil", "{", "resDetails", ".", "Message", "=", "err", ".", "Error", "(", ")", "\n", "resDetails", ".", "Status", "=", "appv1", ".", "ResultCodeSyncFailed", "\n", "return", "resDetails", "\n", "}", "\n\n", "resDetails", ".", "Message", "=", "message", "\n", "resDetails", ".", "Status", "=", "appv1", ".", "ResultCodeSynced", "\n", "return", "resDetails", "\n", "}" ]
// applyObject performs a `kubectl apply` of a single resource
[ "applyObject", "performs", "a", "kubectl", "apply", "of", "a", "single", "resource" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/sync.go#L370-L389
161,683
argoproj/argo-cd
controller/sync.go
pruneObject
func (sc *syncContext) pruneObject(liveObj *unstructured.Unstructured, prune, dryRun bool) appv1.ResourceResult { gvk := liveObj.GroupVersionKind() resDetails := appv1.ResourceResult{ Name: liveObj.GetName(), Group: gvk.Group, Version: gvk.Version, Kind: liveObj.GetKind(), Namespace: liveObj.GetNamespace(), } if prune { if dryRun { resDetails.Message = "pruned (dry run)" resDetails.Status = appv1.ResultCodePruned } else { resDetails.Message = "pruned" resDetails.Status = appv1.ResultCodePruned // Skip deletion if object is already marked for deletion, so we don't cause a resource update hotloop deletionTimestamp := liveObj.GetDeletionTimestamp() if deletionTimestamp == nil || deletionTimestamp.IsZero() { err := sc.kubectl.DeleteResource(sc.config, liveObj.GroupVersionKind(), liveObj.GetName(), liveObj.GetNamespace(), false) if err != nil { resDetails.Message = err.Error() resDetails.Status = appv1.ResultCodeSyncFailed } } } } else { resDetails.Message = "ignored (requires pruning)" resDetails.Status = appv1.ResultCodePruneSkipped } return resDetails }
go
func (sc *syncContext) pruneObject(liveObj *unstructured.Unstructured, prune, dryRun bool) appv1.ResourceResult { gvk := liveObj.GroupVersionKind() resDetails := appv1.ResourceResult{ Name: liveObj.GetName(), Group: gvk.Group, Version: gvk.Version, Kind: liveObj.GetKind(), Namespace: liveObj.GetNamespace(), } if prune { if dryRun { resDetails.Message = "pruned (dry run)" resDetails.Status = appv1.ResultCodePruned } else { resDetails.Message = "pruned" resDetails.Status = appv1.ResultCodePruned // Skip deletion if object is already marked for deletion, so we don't cause a resource update hotloop deletionTimestamp := liveObj.GetDeletionTimestamp() if deletionTimestamp == nil || deletionTimestamp.IsZero() { err := sc.kubectl.DeleteResource(sc.config, liveObj.GroupVersionKind(), liveObj.GetName(), liveObj.GetNamespace(), false) if err != nil { resDetails.Message = err.Error() resDetails.Status = appv1.ResultCodeSyncFailed } } } } else { resDetails.Message = "ignored (requires pruning)" resDetails.Status = appv1.ResultCodePruneSkipped } return resDetails }
[ "func", "(", "sc", "*", "syncContext", ")", "pruneObject", "(", "liveObj", "*", "unstructured", ".", "Unstructured", ",", "prune", ",", "dryRun", "bool", ")", "appv1", ".", "ResourceResult", "{", "gvk", ":=", "liveObj", ".", "GroupVersionKind", "(", ")", "\n", "resDetails", ":=", "appv1", ".", "ResourceResult", "{", "Name", ":", "liveObj", ".", "GetName", "(", ")", ",", "Group", ":", "gvk", ".", "Group", ",", "Version", ":", "gvk", ".", "Version", ",", "Kind", ":", "liveObj", ".", "GetKind", "(", ")", ",", "Namespace", ":", "liveObj", ".", "GetNamespace", "(", ")", ",", "}", "\n", "if", "prune", "{", "if", "dryRun", "{", "resDetails", ".", "Message", "=", "\"", "\"", "\n", "resDetails", ".", "Status", "=", "appv1", ".", "ResultCodePruned", "\n", "}", "else", "{", "resDetails", ".", "Message", "=", "\"", "\"", "\n", "resDetails", ".", "Status", "=", "appv1", ".", "ResultCodePruned", "\n", "// Skip deletion if object is already marked for deletion, so we don't cause a resource update hotloop", "deletionTimestamp", ":=", "liveObj", ".", "GetDeletionTimestamp", "(", ")", "\n", "if", "deletionTimestamp", "==", "nil", "||", "deletionTimestamp", ".", "IsZero", "(", ")", "{", "err", ":=", "sc", ".", "kubectl", ".", "DeleteResource", "(", "sc", ".", "config", ",", "liveObj", ".", "GroupVersionKind", "(", ")", ",", "liveObj", ".", "GetName", "(", ")", ",", "liveObj", ".", "GetNamespace", "(", ")", ",", "false", ")", "\n", "if", "err", "!=", "nil", "{", "resDetails", ".", "Message", "=", "err", ".", "Error", "(", ")", "\n", "resDetails", ".", "Status", "=", "appv1", ".", "ResultCodeSyncFailed", "\n", "}", "\n", "}", "\n", "}", "\n", "}", "else", "{", "resDetails", ".", "Message", "=", "\"", "\"", "\n", "resDetails", ".", "Status", "=", "appv1", ".", "ResultCodePruneSkipped", "\n", "}", "\n", "return", "resDetails", "\n", "}" ]
// pruneObject deletes the object if both prune is true and dryRun is false. Otherwise appropriate message
[ "pruneObject", "deletes", "the", "object", "if", "both", "prune", "is", "true", "and", "dryRun", "is", "false", ".", "Otherwise", "appropriate", "message" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/sync.go#L392-L423
161,684
argoproj/argo-cd
controller/sync.go
setResourceDetails
func (sc *syncContext) setResourceDetails(details *appv1.ResourceResult) { sc.lock.Lock() defer sc.lock.Unlock() for i, res := range sc.syncRes.Resources { if res.Group == details.Group && res.Kind == details.Kind && res.Namespace == details.Namespace && res.Name == details.Name { // update existing value if res.Status != details.Status { sc.log.Infof("updated resource %s/%s/%s status: %s -> %s", res.Kind, res.Namespace, res.Name, res.Status, details.Status) } if res.Message != details.Message { sc.log.Infof("updated resource %s/%s/%s message: %s -> %s", res.Kind, res.Namespace, res.Name, res.Message, details.Message) } sc.syncRes.Resources[i] = details return } } sc.log.Infof("added resource %s/%s status: %s, message: %s", details.Kind, details.Name, details.Status, details.Message) sc.syncRes.Resources = append(sc.syncRes.Resources, details) }
go
func (sc *syncContext) setResourceDetails(details *appv1.ResourceResult) { sc.lock.Lock() defer sc.lock.Unlock() for i, res := range sc.syncRes.Resources { if res.Group == details.Group && res.Kind == details.Kind && res.Namespace == details.Namespace && res.Name == details.Name { // update existing value if res.Status != details.Status { sc.log.Infof("updated resource %s/%s/%s status: %s -> %s", res.Kind, res.Namespace, res.Name, res.Status, details.Status) } if res.Message != details.Message { sc.log.Infof("updated resource %s/%s/%s message: %s -> %s", res.Kind, res.Namespace, res.Name, res.Message, details.Message) } sc.syncRes.Resources[i] = details return } } sc.log.Infof("added resource %s/%s status: %s, message: %s", details.Kind, details.Name, details.Status, details.Message) sc.syncRes.Resources = append(sc.syncRes.Resources, details) }
[ "func", "(", "sc", "*", "syncContext", ")", "setResourceDetails", "(", "details", "*", "appv1", ".", "ResourceResult", ")", "{", "sc", ".", "lock", ".", "Lock", "(", ")", "\n", "defer", "sc", ".", "lock", ".", "Unlock", "(", ")", "\n", "for", "i", ",", "res", ":=", "range", "sc", ".", "syncRes", ".", "Resources", "{", "if", "res", ".", "Group", "==", "details", ".", "Group", "&&", "res", ".", "Kind", "==", "details", ".", "Kind", "&&", "res", ".", "Namespace", "==", "details", ".", "Namespace", "&&", "res", ".", "Name", "==", "details", ".", "Name", "{", "// update existing value", "if", "res", ".", "Status", "!=", "details", ".", "Status", "{", "sc", ".", "log", ".", "Infof", "(", "\"", "\"", ",", "res", ".", "Kind", ",", "res", ".", "Namespace", ",", "res", ".", "Name", ",", "res", ".", "Status", ",", "details", ".", "Status", ")", "\n", "}", "\n", "if", "res", ".", "Message", "!=", "details", ".", "Message", "{", "sc", ".", "log", ".", "Infof", "(", "\"", "\"", ",", "res", ".", "Kind", ",", "res", ".", "Namespace", ",", "res", ".", "Name", ",", "res", ".", "Message", ",", "details", ".", "Message", ")", "\n", "}", "\n", "sc", ".", "syncRes", ".", "Resources", "[", "i", "]", "=", "details", "\n", "return", "\n", "}", "\n", "}", "\n", "sc", ".", "log", ".", "Infof", "(", "\"", "\"", ",", "details", ".", "Kind", ",", "details", ".", "Name", ",", "details", ".", "Status", ",", "details", ".", "Message", ")", "\n", "sc", ".", "syncRes", ".", "Resources", "=", "append", "(", "sc", ".", "syncRes", ".", "Resources", ",", "details", ")", "\n", "}" ]
// setResourceDetails sets a resource details in the SyncResult.Resources list
[ "setResourceDetails", "sets", "a", "resource", "details", "in", "the", "SyncResult", ".", "Resources", "list" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/controller/sync.go#L517-L535
161,685
argoproj/argo-cd
util/kustomize/kustomize.go
NewKustomizeApp
func NewKustomizeApp(path string, creds *GitCredentials) Kustomize { return &kustomize{ path: path, creds: creds, } }
go
func NewKustomizeApp(path string, creds *GitCredentials) Kustomize { return &kustomize{ path: path, creds: creds, } }
[ "func", "NewKustomizeApp", "(", "path", "string", ",", "creds", "*", "GitCredentials", ")", "Kustomize", "{", "return", "&", "kustomize", "{", "path", ":", "path", ",", "creds", ":", "creds", ",", "}", "\n", "}" ]
// NewKustomizeApp create a new wrapper to run commands on the `kustomize` command-line tool.
[ "NewKustomizeApp", "create", "a", "new", "wrapper", "to", "run", "commands", "on", "the", "kustomize", "command", "-", "line", "tool", "." ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/kustomize/kustomize.go#L51-L56
161,686
argoproj/argo-cd
util/kustomize/kustomize.go
findKustomization
func (k *kustomize) findKustomization() (string, error) { for _, file := range KustomizationNames { kustomization := filepath.Join(k.path, file) if _, err := os.Stat(kustomization); err == nil { return kustomization, nil } } return "", errors.New("did not find kustomization in " + k.path) }
go
func (k *kustomize) findKustomization() (string, error) { for _, file := range KustomizationNames { kustomization := filepath.Join(k.path, file) if _, err := os.Stat(kustomization); err == nil { return kustomization, nil } } return "", errors.New("did not find kustomization in " + k.path) }
[ "func", "(", "k", "*", "kustomize", ")", "findKustomization", "(", ")", "(", "string", ",", "error", ")", "{", "for", "_", ",", "file", ":=", "range", "KustomizationNames", "{", "kustomization", ":=", "filepath", ".", "Join", "(", "k", ".", "path", ",", "file", ")", "\n", "if", "_", ",", "err", ":=", "os", ".", "Stat", "(", "kustomization", ")", ";", "err", "==", "nil", "{", "return", "kustomization", ",", "nil", "\n", "}", "\n", "}", "\n", "return", "\"", "\"", ",", "errors", ".", "New", "(", "\"", "\"", "+", "k", ".", "path", ")", "\n", "}" ]
// kustomization is a file that describes a configuration consumable by kustomize.
[ "kustomization", "is", "a", "file", "that", "describes", "a", "configuration", "consumable", "by", "kustomize", "." ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/kustomize/kustomize.go#L150-L158
161,687
argoproj/argo-cd
util/dex/dex.go
NewDexRewriteURLRoundTripper
func NewDexRewriteURLRoundTripper(dexServerAddr string, T http.RoundTripper) DexRewriteURLRoundTripper { dexURL, _ := url.Parse(dexServerAddr) return DexRewriteURLRoundTripper{ DexURL: dexURL, T: T, } }
go
func NewDexRewriteURLRoundTripper(dexServerAddr string, T http.RoundTripper) DexRewriteURLRoundTripper { dexURL, _ := url.Parse(dexServerAddr) return DexRewriteURLRoundTripper{ DexURL: dexURL, T: T, } }
[ "func", "NewDexRewriteURLRoundTripper", "(", "dexServerAddr", "string", ",", "T", "http", ".", "RoundTripper", ")", "DexRewriteURLRoundTripper", "{", "dexURL", ",", "_", ":=", "url", ".", "Parse", "(", "dexServerAddr", ")", "\n", "return", "DexRewriteURLRoundTripper", "{", "DexURL", ":", "dexURL", ",", "T", ":", "T", ",", "}", "\n", "}" ]
// NewDexRewriteURLRoundTripper creates a new DexRewriteURLRoundTripper
[ "NewDexRewriteURLRoundTripper", "creates", "a", "new", "DexRewriteURLRoundTripper" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/dex/dex.go#L59-L65
161,688
argoproj/argo-cd
common/installer.go
CreateServiceAccount
func CreateServiceAccount( clientset kubernetes.Interface, serviceAccountName string, namespace string, ) error { serviceAccount := apiv1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "ServiceAccount", }, ObjectMeta: metav1.ObjectMeta{ Name: serviceAccountName, Namespace: namespace, }, } _, err := clientset.CoreV1().ServiceAccounts(namespace).Create(&serviceAccount) if err != nil { if !apierr.IsAlreadyExists(err) { return fmt.Errorf("Failed to create service account %q: %v", serviceAccountName, err) } log.Infof("ServiceAccount %q already exists", serviceAccountName) return nil } log.Infof("ServiceAccount %q created", serviceAccountName) return nil }
go
func CreateServiceAccount( clientset kubernetes.Interface, serviceAccountName string, namespace string, ) error { serviceAccount := apiv1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "ServiceAccount", }, ObjectMeta: metav1.ObjectMeta{ Name: serviceAccountName, Namespace: namespace, }, } _, err := clientset.CoreV1().ServiceAccounts(namespace).Create(&serviceAccount) if err != nil { if !apierr.IsAlreadyExists(err) { return fmt.Errorf("Failed to create service account %q: %v", serviceAccountName, err) } log.Infof("ServiceAccount %q already exists", serviceAccountName) return nil } log.Infof("ServiceAccount %q created", serviceAccountName) return nil }
[ "func", "CreateServiceAccount", "(", "clientset", "kubernetes", ".", "Interface", ",", "serviceAccountName", "string", ",", "namespace", "string", ",", ")", "error", "{", "serviceAccount", ":=", "apiv1", ".", "ServiceAccount", "{", "TypeMeta", ":", "metav1", ".", "TypeMeta", "{", "APIVersion", ":", "\"", "\"", ",", "Kind", ":", "\"", "\"", ",", "}", ",", "ObjectMeta", ":", "metav1", ".", "ObjectMeta", "{", "Name", ":", "serviceAccountName", ",", "Namespace", ":", "namespace", ",", "}", ",", "}", "\n", "_", ",", "err", ":=", "clientset", ".", "CoreV1", "(", ")", ".", "ServiceAccounts", "(", "namespace", ")", ".", "Create", "(", "&", "serviceAccount", ")", "\n", "if", "err", "!=", "nil", "{", "if", "!", "apierr", ".", "IsAlreadyExists", "(", "err", ")", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "serviceAccountName", ",", "err", ")", "\n", "}", "\n", "log", ".", "Infof", "(", "\"", "\"", ",", "serviceAccountName", ")", "\n", "return", "nil", "\n", "}", "\n", "log", ".", "Infof", "(", "\"", "\"", ",", "serviceAccountName", ")", "\n", "return", "nil", "\n", "}" ]
// CreateServiceAccount creates a service account
[ "CreateServiceAccount", "creates", "a", "service", "account" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/common/installer.go#L37-L62
161,689
argoproj/argo-cd
common/installer.go
CreateClusterRole
func CreateClusterRole( clientset kubernetes.Interface, clusterRoleName string, rules []rbacv1.PolicyRule, ) error { clusterRole := rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{ APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", }, ObjectMeta: metav1.ObjectMeta{ Name: clusterRoleName, }, Rules: rules, } crclient := clientset.RbacV1().ClusterRoles() _, err := crclient.Create(&clusterRole) if err != nil { if !apierr.IsAlreadyExists(err) { return fmt.Errorf("Failed to create ClusterRole %q: %v", clusterRoleName, err) } _, err = crclient.Update(&clusterRole) if err != nil { return fmt.Errorf("Failed to update ClusterRole %q: %v", clusterRoleName, err) } log.Infof("ClusterRole %q updated", clusterRoleName) } else { log.Infof("ClusterRole %q created", clusterRoleName) } return nil }
go
func CreateClusterRole( clientset kubernetes.Interface, clusterRoleName string, rules []rbacv1.PolicyRule, ) error { clusterRole := rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{ APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", }, ObjectMeta: metav1.ObjectMeta{ Name: clusterRoleName, }, Rules: rules, } crclient := clientset.RbacV1().ClusterRoles() _, err := crclient.Create(&clusterRole) if err != nil { if !apierr.IsAlreadyExists(err) { return fmt.Errorf("Failed to create ClusterRole %q: %v", clusterRoleName, err) } _, err = crclient.Update(&clusterRole) if err != nil { return fmt.Errorf("Failed to update ClusterRole %q: %v", clusterRoleName, err) } log.Infof("ClusterRole %q updated", clusterRoleName) } else { log.Infof("ClusterRole %q created", clusterRoleName) } return nil }
[ "func", "CreateClusterRole", "(", "clientset", "kubernetes", ".", "Interface", ",", "clusterRoleName", "string", ",", "rules", "[", "]", "rbacv1", ".", "PolicyRule", ",", ")", "error", "{", "clusterRole", ":=", "rbacv1", ".", "ClusterRole", "{", "TypeMeta", ":", "metav1", ".", "TypeMeta", "{", "APIVersion", ":", "\"", "\"", ",", "Kind", ":", "\"", "\"", ",", "}", ",", "ObjectMeta", ":", "metav1", ".", "ObjectMeta", "{", "Name", ":", "clusterRoleName", ",", "}", ",", "Rules", ":", "rules", ",", "}", "\n", "crclient", ":=", "clientset", ".", "RbacV1", "(", ")", ".", "ClusterRoles", "(", ")", "\n", "_", ",", "err", ":=", "crclient", ".", "Create", "(", "&", "clusterRole", ")", "\n", "if", "err", "!=", "nil", "{", "if", "!", "apierr", ".", "IsAlreadyExists", "(", "err", ")", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "clusterRoleName", ",", "err", ")", "\n", "}", "\n", "_", ",", "err", "=", "crclient", ".", "Update", "(", "&", "clusterRole", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "clusterRoleName", ",", "err", ")", "\n", "}", "\n", "log", ".", "Infof", "(", "\"", "\"", ",", "clusterRoleName", ")", "\n", "}", "else", "{", "log", ".", "Infof", "(", "\"", "\"", ",", "clusterRoleName", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// CreateClusterRole creates a cluster role
[ "CreateClusterRole", "creates", "a", "cluster", "role" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/common/installer.go#L65-L95
161,690
argoproj/argo-cd
common/installer.go
CreateClusterRoleBinding
func CreateClusterRoleBinding( clientset kubernetes.Interface, clusterBindingRoleName, serviceAccountName, clusterRoleName string, namespace string, ) error { roleBinding := rbacv1.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{ APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRoleBinding", }, ObjectMeta: metav1.ObjectMeta{ Name: clusterBindingRoleName, }, RoleRef: rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: clusterRoleName, }, Subjects: []rbacv1.Subject{ { Kind: rbacv1.ServiceAccountKind, Name: serviceAccountName, Namespace: namespace, }, }, } _, err := clientset.RbacV1().ClusterRoleBindings().Create(&roleBinding) if err != nil { if !apierr.IsAlreadyExists(err) { return fmt.Errorf("Failed to create ClusterRoleBinding %s: %v", clusterBindingRoleName, err) } log.Infof("ClusterRoleBinding %q already exists", clusterBindingRoleName) return nil } log.Infof("ClusterRoleBinding %q created, bound %q to %q", clusterBindingRoleName, serviceAccountName, clusterRoleName) return nil }
go
func CreateClusterRoleBinding( clientset kubernetes.Interface, clusterBindingRoleName, serviceAccountName, clusterRoleName string, namespace string, ) error { roleBinding := rbacv1.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{ APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRoleBinding", }, ObjectMeta: metav1.ObjectMeta{ Name: clusterBindingRoleName, }, RoleRef: rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: clusterRoleName, }, Subjects: []rbacv1.Subject{ { Kind: rbacv1.ServiceAccountKind, Name: serviceAccountName, Namespace: namespace, }, }, } _, err := clientset.RbacV1().ClusterRoleBindings().Create(&roleBinding) if err != nil { if !apierr.IsAlreadyExists(err) { return fmt.Errorf("Failed to create ClusterRoleBinding %s: %v", clusterBindingRoleName, err) } log.Infof("ClusterRoleBinding %q already exists", clusterBindingRoleName) return nil } log.Infof("ClusterRoleBinding %q created, bound %q to %q", clusterBindingRoleName, serviceAccountName, clusterRoleName) return nil }
[ "func", "CreateClusterRoleBinding", "(", "clientset", "kubernetes", ".", "Interface", ",", "clusterBindingRoleName", ",", "serviceAccountName", ",", "clusterRoleName", "string", ",", "namespace", "string", ",", ")", "error", "{", "roleBinding", ":=", "rbacv1", ".", "ClusterRoleBinding", "{", "TypeMeta", ":", "metav1", ".", "TypeMeta", "{", "APIVersion", ":", "\"", "\"", ",", "Kind", ":", "\"", "\"", ",", "}", ",", "ObjectMeta", ":", "metav1", ".", "ObjectMeta", "{", "Name", ":", "clusterBindingRoleName", ",", "}", ",", "RoleRef", ":", "rbacv1", ".", "RoleRef", "{", "APIGroup", ":", "\"", "\"", ",", "Kind", ":", "\"", "\"", ",", "Name", ":", "clusterRoleName", ",", "}", ",", "Subjects", ":", "[", "]", "rbacv1", ".", "Subject", "{", "{", "Kind", ":", "rbacv1", ".", "ServiceAccountKind", ",", "Name", ":", "serviceAccountName", ",", "Namespace", ":", "namespace", ",", "}", ",", "}", ",", "}", "\n", "_", ",", "err", ":=", "clientset", ".", "RbacV1", "(", ")", ".", "ClusterRoleBindings", "(", ")", ".", "Create", "(", "&", "roleBinding", ")", "\n", "if", "err", "!=", "nil", "{", "if", "!", "apierr", ".", "IsAlreadyExists", "(", "err", ")", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "clusterBindingRoleName", ",", "err", ")", "\n", "}", "\n", "log", ".", "Infof", "(", "\"", "\"", ",", "clusterBindingRoleName", ")", "\n", "return", "nil", "\n", "}", "\n", "log", ".", "Infof", "(", "\"", "\"", ",", "clusterBindingRoleName", ",", "serviceAccountName", ",", "clusterRoleName", ")", "\n", "return", "nil", "\n", "}" ]
// CreateClusterRoleBinding create a ClusterRoleBinding
[ "CreateClusterRoleBinding", "create", "a", "ClusterRoleBinding" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/common/installer.go#L98-L136
161,691
argoproj/argo-cd
common/installer.go
InstallClusterManagerRBAC
func InstallClusterManagerRBAC(clientset kubernetes.Interface) (string, error) { const ns = "kube-system" err := CreateServiceAccount(clientset, ArgoCDManagerServiceAccount, ns) if err != nil { return "", err } err = CreateClusterRole(clientset, ArgoCDManagerClusterRole, ArgoCDManagerPolicyRules) if err != nil { return "", err } err = CreateClusterRoleBinding(clientset, ArgoCDManagerClusterRoleBinding, ArgoCDManagerServiceAccount, ArgoCDManagerClusterRole, ns) if err != nil { return "", err } var serviceAccount *apiv1.ServiceAccount var secretName string err = wait.Poll(500*time.Millisecond, 30*time.Second, func() (bool, error) { serviceAccount, err = clientset.CoreV1().ServiceAccounts(ns).Get(ArgoCDManagerServiceAccount, metav1.GetOptions{}) if err != nil { return false, err } if len(serviceAccount.Secrets) == 0 { return false, nil } secretName = serviceAccount.Secrets[0].Name return true, nil }) if err != nil { return "", fmt.Errorf("Failed to wait for service account secret: %v", err) } secret, err := clientset.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("Failed to retrieve secret %q: %v", secretName, err) } token, ok := secret.Data["token"] if !ok { return "", fmt.Errorf("Secret %q for service account %q did not have a token", secretName, serviceAccount) } return string(token), nil }
go
func InstallClusterManagerRBAC(clientset kubernetes.Interface) (string, error) { const ns = "kube-system" err := CreateServiceAccount(clientset, ArgoCDManagerServiceAccount, ns) if err != nil { return "", err } err = CreateClusterRole(clientset, ArgoCDManagerClusterRole, ArgoCDManagerPolicyRules) if err != nil { return "", err } err = CreateClusterRoleBinding(clientset, ArgoCDManagerClusterRoleBinding, ArgoCDManagerServiceAccount, ArgoCDManagerClusterRole, ns) if err != nil { return "", err } var serviceAccount *apiv1.ServiceAccount var secretName string err = wait.Poll(500*time.Millisecond, 30*time.Second, func() (bool, error) { serviceAccount, err = clientset.CoreV1().ServiceAccounts(ns).Get(ArgoCDManagerServiceAccount, metav1.GetOptions{}) if err != nil { return false, err } if len(serviceAccount.Secrets) == 0 { return false, nil } secretName = serviceAccount.Secrets[0].Name return true, nil }) if err != nil { return "", fmt.Errorf("Failed to wait for service account secret: %v", err) } secret, err := clientset.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("Failed to retrieve secret %q: %v", secretName, err) } token, ok := secret.Data["token"] if !ok { return "", fmt.Errorf("Secret %q for service account %q did not have a token", secretName, serviceAccount) } return string(token), nil }
[ "func", "InstallClusterManagerRBAC", "(", "clientset", "kubernetes", ".", "Interface", ")", "(", "string", ",", "error", ")", "{", "const", "ns", "=", "\"", "\"", "\n\n", "err", ":=", "CreateServiceAccount", "(", "clientset", ",", "ArgoCDManagerServiceAccount", ",", "ns", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "err", "\n", "}", "\n\n", "err", "=", "CreateClusterRole", "(", "clientset", ",", "ArgoCDManagerClusterRole", ",", "ArgoCDManagerPolicyRules", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "err", "\n", "}", "\n\n", "err", "=", "CreateClusterRoleBinding", "(", "clientset", ",", "ArgoCDManagerClusterRoleBinding", ",", "ArgoCDManagerServiceAccount", ",", "ArgoCDManagerClusterRole", ",", "ns", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "err", "\n", "}", "\n\n", "var", "serviceAccount", "*", "apiv1", ".", "ServiceAccount", "\n", "var", "secretName", "string", "\n", "err", "=", "wait", ".", "Poll", "(", "500", "*", "time", ".", "Millisecond", ",", "30", "*", "time", ".", "Second", ",", "func", "(", ")", "(", "bool", ",", "error", ")", "{", "serviceAccount", ",", "err", "=", "clientset", ".", "CoreV1", "(", ")", ".", "ServiceAccounts", "(", "ns", ")", ".", "Get", "(", "ArgoCDManagerServiceAccount", ",", "metav1", ".", "GetOptions", "{", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", "false", ",", "err", "\n", "}", "\n", "if", "len", "(", "serviceAccount", ".", "Secrets", ")", "==", "0", "{", "return", "false", ",", "nil", "\n", "}", "\n", "secretName", "=", "serviceAccount", ".", "Secrets", "[", "0", "]", ".", "Name", "\n", "return", "true", ",", "nil", "\n", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "secret", ",", "err", ":=", "clientset", ".", "CoreV1", "(", ")", ".", "Secrets", "(", "ns", ")", ".", "Get", "(", "secretName", ",", "metav1", ".", "GetOptions", "{", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "secretName", ",", "err", ")", "\n", "}", "\n", "token", ",", "ok", ":=", "secret", ".", "Data", "[", "\"", "\"", "]", "\n", "if", "!", "ok", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "secretName", ",", "serviceAccount", ")", "\n", "}", "\n", "return", "string", "(", "token", ")", ",", "nil", "\n", "}" ]
// InstallClusterManagerRBAC installs RBAC resources for a cluster manager to operate a cluster. Returns a token
[ "InstallClusterManagerRBAC", "installs", "RBAC", "resources", "for", "a", "cluster", "manager", "to", "operate", "a", "cluster", ".", "Returns", "a", "token" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/common/installer.go#L139-L182
161,692
argoproj/argo-cd
common/installer.go
UninstallClusterManagerRBAC
func UninstallClusterManagerRBAC(clientset kubernetes.Interface) error { return UninstallRBAC(clientset, "kube-system", ArgoCDManagerClusterRoleBinding, ArgoCDManagerClusterRole, ArgoCDManagerServiceAccount) }
go
func UninstallClusterManagerRBAC(clientset kubernetes.Interface) error { return UninstallRBAC(clientset, "kube-system", ArgoCDManagerClusterRoleBinding, ArgoCDManagerClusterRole, ArgoCDManagerServiceAccount) }
[ "func", "UninstallClusterManagerRBAC", "(", "clientset", "kubernetes", ".", "Interface", ")", "error", "{", "return", "UninstallRBAC", "(", "clientset", ",", "\"", "\"", ",", "ArgoCDManagerClusterRoleBinding", ",", "ArgoCDManagerClusterRole", ",", "ArgoCDManagerServiceAccount", ")", "\n", "}" ]
// UninstallClusterManagerRBAC removes RBAC resources for a cluster manager to operate a cluster
[ "UninstallClusterManagerRBAC", "removes", "RBAC", "resources", "for", "a", "cluster", "manager", "to", "operate", "a", "cluster" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/common/installer.go#L185-L187
161,693
argoproj/argo-cd
common/installer.go
UninstallRBAC
func UninstallRBAC(clientset kubernetes.Interface, namespace, bindingName, roleName, serviceAccount string) error { if err := clientset.RbacV1().ClusterRoleBindings().Delete(bindingName, &metav1.DeleteOptions{}); err != nil { if !apierr.IsNotFound(err) { return fmt.Errorf("Failed to delete ClusterRoleBinding: %v", err) } log.Infof("ClusterRoleBinding %q not found", bindingName) } else { log.Infof("ClusterRoleBinding %q deleted", bindingName) } if err := clientset.RbacV1().ClusterRoles().Delete(roleName, &metav1.DeleteOptions{}); err != nil { if !apierr.IsNotFound(err) { return fmt.Errorf("Failed to delete ClusterRole: %v", err) } log.Infof("ClusterRole %q not found", roleName) } else { log.Infof("ClusterRole %q deleted", roleName) } if err := clientset.CoreV1().ServiceAccounts(namespace).Delete(serviceAccount, &metav1.DeleteOptions{}); err != nil { if !apierr.IsNotFound(err) { return fmt.Errorf("Failed to delete ServiceAccount: %v", err) } log.Infof("ServiceAccount %q in namespace %q not found", serviceAccount, namespace) } else { log.Infof("ServiceAccount %q deleted", serviceAccount) } return nil }
go
func UninstallRBAC(clientset kubernetes.Interface, namespace, bindingName, roleName, serviceAccount string) error { if err := clientset.RbacV1().ClusterRoleBindings().Delete(bindingName, &metav1.DeleteOptions{}); err != nil { if !apierr.IsNotFound(err) { return fmt.Errorf("Failed to delete ClusterRoleBinding: %v", err) } log.Infof("ClusterRoleBinding %q not found", bindingName) } else { log.Infof("ClusterRoleBinding %q deleted", bindingName) } if err := clientset.RbacV1().ClusterRoles().Delete(roleName, &metav1.DeleteOptions{}); err != nil { if !apierr.IsNotFound(err) { return fmt.Errorf("Failed to delete ClusterRole: %v", err) } log.Infof("ClusterRole %q not found", roleName) } else { log.Infof("ClusterRole %q deleted", roleName) } if err := clientset.CoreV1().ServiceAccounts(namespace).Delete(serviceAccount, &metav1.DeleteOptions{}); err != nil { if !apierr.IsNotFound(err) { return fmt.Errorf("Failed to delete ServiceAccount: %v", err) } log.Infof("ServiceAccount %q in namespace %q not found", serviceAccount, namespace) } else { log.Infof("ServiceAccount %q deleted", serviceAccount) } return nil }
[ "func", "UninstallRBAC", "(", "clientset", "kubernetes", ".", "Interface", ",", "namespace", ",", "bindingName", ",", "roleName", ",", "serviceAccount", "string", ")", "error", "{", "if", "err", ":=", "clientset", ".", "RbacV1", "(", ")", ".", "ClusterRoleBindings", "(", ")", ".", "Delete", "(", "bindingName", ",", "&", "metav1", ".", "DeleteOptions", "{", "}", ")", ";", "err", "!=", "nil", "{", "if", "!", "apierr", ".", "IsNotFound", "(", "err", ")", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "log", ".", "Infof", "(", "\"", "\"", ",", "bindingName", ")", "\n", "}", "else", "{", "log", ".", "Infof", "(", "\"", "\"", ",", "bindingName", ")", "\n", "}", "\n\n", "if", "err", ":=", "clientset", ".", "RbacV1", "(", ")", ".", "ClusterRoles", "(", ")", ".", "Delete", "(", "roleName", ",", "&", "metav1", ".", "DeleteOptions", "{", "}", ")", ";", "err", "!=", "nil", "{", "if", "!", "apierr", ".", "IsNotFound", "(", "err", ")", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "log", ".", "Infof", "(", "\"", "\"", ",", "roleName", ")", "\n", "}", "else", "{", "log", ".", "Infof", "(", "\"", "\"", ",", "roleName", ")", "\n", "}", "\n\n", "if", "err", ":=", "clientset", ".", "CoreV1", "(", ")", ".", "ServiceAccounts", "(", "namespace", ")", ".", "Delete", "(", "serviceAccount", ",", "&", "metav1", ".", "DeleteOptions", "{", "}", ")", ";", "err", "!=", "nil", "{", "if", "!", "apierr", ".", "IsNotFound", "(", "err", ")", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "log", ".", "Infof", "(", "\"", "\"", ",", "serviceAccount", ",", "namespace", ")", "\n", "}", "else", "{", "log", ".", "Infof", "(", "\"", "\"", ",", "serviceAccount", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// UninstallRBAC uninstalls RBAC related resources for a binding, role, and service account
[ "UninstallRBAC", "uninstalls", "RBAC", "related", "resources", "for", "a", "binding", "role", "and", "service", "account" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/common/installer.go#L190-L218
161,694
argoproj/argo-cd
util/hook/hook.go
IsHelmHook
func IsHelmHook(obj *unstructured.Unstructured) bool { annotations := obj.GetAnnotations() if annotations == nil { return false } hooks, ok := annotations[common.AnnotationKeyHelmHook] if ok && hasHook(hooks, common.AnnotationValueHelmHookCRDInstall) { return false } return ok }
go
func IsHelmHook(obj *unstructured.Unstructured) bool { annotations := obj.GetAnnotations() if annotations == nil { return false } hooks, ok := annotations[common.AnnotationKeyHelmHook] if ok && hasHook(hooks, common.AnnotationValueHelmHookCRDInstall) { return false } return ok }
[ "func", "IsHelmHook", "(", "obj", "*", "unstructured", ".", "Unstructured", ")", "bool", "{", "annotations", ":=", "obj", ".", "GetAnnotations", "(", ")", "\n", "if", "annotations", "==", "nil", "{", "return", "false", "\n", "}", "\n", "hooks", ",", "ok", ":=", "annotations", "[", "common", ".", "AnnotationKeyHelmHook", "]", "\n", "if", "ok", "&&", "hasHook", "(", "hooks", ",", "common", ".", "AnnotationValueHelmHookCRDInstall", ")", "{", "return", "false", "\n", "}", "\n", "return", "ok", "\n", "}" ]
// IsHelmHook indicates if the supplied object is a helm hook
[ "IsHelmHook", "indicates", "if", "the", "supplied", "object", "is", "a", "helm", "hook" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/hook/hook.go#L18-L28
161,695
argoproj/argo-cd
util/hash/hash.go
FNVa
func FNVa(s string) uint32 { h := fnv.New32a() _, _ = h.Write([]byte(s)) return h.Sum32() }
go
func FNVa(s string) uint32 { h := fnv.New32a() _, _ = h.Write([]byte(s)) return h.Sum32() }
[ "func", "FNVa", "(", "s", "string", ")", "uint32", "{", "h", ":=", "fnv", ".", "New32a", "(", ")", "\n", "_", ",", "_", "=", "h", ".", "Write", "(", "[", "]", "byte", "(", "s", ")", ")", "\n", "return", "h", ".", "Sum32", "(", ")", "\n", "}" ]
// FNVa computes a FNVa hash on a string
[ "FNVa", "computes", "a", "FNVa", "hash", "on", "a", "string" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/hash/hash.go#L8-L12
161,696
argoproj/argo-cd
util/oidc/oidc.go
generateAppState
func (a *ClientApp) generateAppState(returnURL string) string { randStr := rand.RandString(10) if returnURL == "" { returnURL = "/" } err := a.cache.SetOIDCState(randStr, &cache.OIDCState{ReturnURL: returnURL}) if err != nil { // This should never happen with the in-memory cache log.Errorf("Failed to set app state: %v", err) } return randStr }
go
func (a *ClientApp) generateAppState(returnURL string) string { randStr := rand.RandString(10) if returnURL == "" { returnURL = "/" } err := a.cache.SetOIDCState(randStr, &cache.OIDCState{ReturnURL: returnURL}) if err != nil { // This should never happen with the in-memory cache log.Errorf("Failed to set app state: %v", err) } return randStr }
[ "func", "(", "a", "*", "ClientApp", ")", "generateAppState", "(", "returnURL", "string", ")", "string", "{", "randStr", ":=", "rand", ".", "RandString", "(", "10", ")", "\n", "if", "returnURL", "==", "\"", "\"", "{", "returnURL", "=", "\"", "\"", "\n", "}", "\n", "err", ":=", "a", ".", "cache", ".", "SetOIDCState", "(", "randStr", ",", "&", "cache", ".", "OIDCState", "{", "ReturnURL", ":", "returnURL", "}", ")", "\n", "if", "err", "!=", "nil", "{", "// This should never happen with the in-memory cache", "log", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "return", "randStr", "\n", "}" ]
// generateAppState creates an app state nonce
[ "generateAppState", "creates", "an", "app", "state", "nonce" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/oidc/oidc.go#L125-L136
161,697
argoproj/argo-cd
util/oidc/oidc.go
handleImplicitFlow
func (a *ClientApp) handleImplicitFlow(w http.ResponseWriter, r *http.Request, state string) { type implicitFlowValues struct { CookieName string ReturnURL string } vals := implicitFlowValues{ CookieName: common.AuthCookieName, } if state != "" { appState, err := a.verifyAppState(state) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } vals.ReturnURL = appState.ReturnURL } renderTemplate(w, implicitFlowTmpl, vals) }
go
func (a *ClientApp) handleImplicitFlow(w http.ResponseWriter, r *http.Request, state string) { type implicitFlowValues struct { CookieName string ReturnURL string } vals := implicitFlowValues{ CookieName: common.AuthCookieName, } if state != "" { appState, err := a.verifyAppState(state) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } vals.ReturnURL = appState.ReturnURL } renderTemplate(w, implicitFlowTmpl, vals) }
[ "func", "(", "a", "*", "ClientApp", ")", "handleImplicitFlow", "(", "w", "http", ".", "ResponseWriter", ",", "r", "*", "http", ".", "Request", ",", "state", "string", ")", "{", "type", "implicitFlowValues", "struct", "{", "CookieName", "string", "\n", "ReturnURL", "string", "\n", "}", "\n", "vals", ":=", "implicitFlowValues", "{", "CookieName", ":", "common", ".", "AuthCookieName", ",", "}", "\n", "if", "state", "!=", "\"", "\"", "{", "appState", ",", "err", ":=", "a", ".", "verifyAppState", "(", "state", ")", "\n", "if", "err", "!=", "nil", "{", "http", ".", "Error", "(", "w", ",", "err", ".", "Error", "(", ")", ",", "http", ".", "StatusBadRequest", ")", "\n", "return", "\n", "}", "\n", "vals", ".", "ReturnURL", "=", "appState", ".", "ReturnURL", "\n", "}", "\n", "renderTemplate", "(", "w", ",", "implicitFlowTmpl", ",", "vals", ")", "\n", "}" ]
// handleImplicitFlow completes an implicit OAuth2 flow. The id_token and state will be contained // in the URL fragment. The javascript client first redirects to the callback URL, supplying the // state nonce for verification, as well as looking up the return URL. Once verified, the client // stores the id_token from the fragment as a cookie. Finally it performs the final redirect back to // the return URL.
[ "handleImplicitFlow", "completes", "an", "implicit", "OAuth2", "flow", ".", "The", "id_token", "and", "state", "will", "be", "contained", "in", "the", "URL", "fragment", ".", "The", "javascript", "client", "first", "redirects", "to", "the", "callback", "URL", "supplying", "the", "state", "nonce", "for", "verification", "as", "well", "as", "looking", "up", "the", "return", "URL", ".", "Once", "verified", "the", "client", "stores", "the", "id_token", "from", "the", "fragment", "as", "a", "cookie", ".", "Finally", "it", "performs", "the", "final", "redirect", "back", "to", "the", "return", "URL", "." ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/oidc/oidc.go#L273-L290
161,698
argoproj/argo-cd
util/oidc/oidc.go
OfflineAccess
func OfflineAccess(scopes []string) bool { if len(scopes) == 0 { // scopes_supported is a "RECOMMENDED" discovery claim, not a required // one. If missing, assume that the provider follows the spec and has // an "offline_access" scope. return true } // See if scopes_supported has the "offline_access" scope. for _, scope := range scopes { if scope == gooidc.ScopeOfflineAccess { return true } } return false }
go
func OfflineAccess(scopes []string) bool { if len(scopes) == 0 { // scopes_supported is a "RECOMMENDED" discovery claim, not a required // one. If missing, assume that the provider follows the spec and has // an "offline_access" scope. return true } // See if scopes_supported has the "offline_access" scope. for _, scope := range scopes { if scope == gooidc.ScopeOfflineAccess { return true } } return false }
[ "func", "OfflineAccess", "(", "scopes", "[", "]", "string", ")", "bool", "{", "if", "len", "(", "scopes", ")", "==", "0", "{", "// scopes_supported is a \"RECOMMENDED\" discovery claim, not a required", "// one. If missing, assume that the provider follows the spec and has", "// an \"offline_access\" scope.", "return", "true", "\n", "}", "\n", "// See if scopes_supported has the \"offline_access\" scope.", "for", "_", ",", "scope", ":=", "range", "scopes", "{", "if", "scope", "==", "gooidc", ".", "ScopeOfflineAccess", "{", "return", "true", "\n", "}", "\n", "}", "\n", "return", "false", "\n", "}" ]
// OfflineAccess returns whether or not 'offline_access' is a supported scope
[ "OfflineAccess", "returns", "whether", "or", "not", "offline_access", "is", "a", "supported", "scope" ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/oidc/oidc.go#L332-L346
161,699
argoproj/argo-cd
util/config/reader.go
unmarshalObject
func unmarshalObject(data []byte, obj interface{}) error { // first, try unmarshaling as JSON // Based on technique from Kubectl, which supports both YAML and JSON: // https://mlafeldt.github.io/blog/teaching-go-programs-to-love-json-and-yaml/ // http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/ // Short version: JSON unmarshaling won't zero out null fields; YAML unmarshaling will. // This may have unintended effects or hard-to-catch issues when populating our application object. jsonData, err := yaml.YAMLToJSON(data) if err != nil { return err } err = json.Unmarshal(jsonData, &obj) if err != nil { return err } return err }
go
func unmarshalObject(data []byte, obj interface{}) error { // first, try unmarshaling as JSON // Based on technique from Kubectl, which supports both YAML and JSON: // https://mlafeldt.github.io/blog/teaching-go-programs-to-love-json-and-yaml/ // http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/ // Short version: JSON unmarshaling won't zero out null fields; YAML unmarshaling will. // This may have unintended effects or hard-to-catch issues when populating our application object. jsonData, err := yaml.YAMLToJSON(data) if err != nil { return err } err = json.Unmarshal(jsonData, &obj) if err != nil { return err } return err }
[ "func", "unmarshalObject", "(", "data", "[", "]", "byte", ",", "obj", "interface", "{", "}", ")", "error", "{", "// first, try unmarshaling as JSON", "// Based on technique from Kubectl, which supports both YAML and JSON:", "// https://mlafeldt.github.io/blog/teaching-go-programs-to-love-json-and-yaml/", "// http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/", "// Short version: JSON unmarshaling won't zero out null fields; YAML unmarshaling will.", "// This may have unintended effects or hard-to-catch issues when populating our application object.", "jsonData", ",", "err", ":=", "yaml", ".", "YAMLToJSON", "(", "data", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "err", "=", "json", ".", "Unmarshal", "(", "jsonData", ",", "&", "obj", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "return", "err", "\n", "}" ]
// unmarshalObject tries to convert a YAML or JSON byte array into the provided type.
[ "unmarshalObject", "tries", "to", "convert", "a", "YAML", "or", "JSON", "byte", "array", "into", "the", "provided", "type", "." ]
5c353a12f2c67d8ab0d5d9aa619c9059c5261640
https://github.com/argoproj/argo-cd/blob/5c353a12f2c67d8ab0d5d9aa619c9059c5261640/util/config/reader.go#L12-L30