id
int32 0
167k
| repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
sequencelengths 21
1.41k
| docstring
stringlengths 6
2.61k
| docstring_tokens
sequencelengths 3
215
| sha
stringlengths 40
40
| url
stringlengths 85
252
|
---|---|---|---|---|---|---|---|---|---|---|---|
159,200 | keybase/client | go/engine/login_provision.go | syncedPGPKey | func (e *loginProvision) syncedPGPKey(m libkb.MetaContext) (ret libkb.GenericKey, err error) {
defer m.Trace("loginProvision#syncedPGPKey", func() error { return err })()
key, err := e.arg.User.SyncedSecretKey(m)
if err != nil {
return nil, err
}
if key == nil {
return nil, libkb.NoSyncedPGPKeyError{}
}
m.Debug("got synced secret key")
// unlock it
// XXX improve this prompt
parg := m.SecretKeyPromptArg(libkb.SecretKeyArg{}, "sign new device")
unlocked, err := key.PromptAndUnlock(m, parg, nil, e.arg.User)
if err != nil {
return nil, err
}
m.Debug("unlocked secret key")
return unlocked, nil
} | go | func (e *loginProvision) syncedPGPKey(m libkb.MetaContext) (ret libkb.GenericKey, err error) {
defer m.Trace("loginProvision#syncedPGPKey", func() error { return err })()
key, err := e.arg.User.SyncedSecretKey(m)
if err != nil {
return nil, err
}
if key == nil {
return nil, libkb.NoSyncedPGPKeyError{}
}
m.Debug("got synced secret key")
// unlock it
// XXX improve this prompt
parg := m.SecretKeyPromptArg(libkb.SecretKeyArg{}, "sign new device")
unlocked, err := key.PromptAndUnlock(m, parg, nil, e.arg.User)
if err != nil {
return nil, err
}
m.Debug("unlocked secret key")
return unlocked, nil
} | [
"func",
"(",
"e",
"*",
"loginProvision",
")",
"syncedPGPKey",
"(",
"m",
"libkb",
".",
"MetaContext",
")",
"(",
"ret",
"libkb",
".",
"GenericKey",
",",
"err",
"error",
")",
"{",
"defer",
"m",
".",
"Trace",
"(",
"\"",
"\"",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
")",
"(",
")",
"\n\n",
"key",
",",
"err",
":=",
"e",
".",
"arg",
".",
"User",
".",
"SyncedSecretKey",
"(",
"m",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"key",
"==",
"nil",
"{",
"return",
"nil",
",",
"libkb",
".",
"NoSyncedPGPKeyError",
"{",
"}",
"\n",
"}",
"\n\n",
"m",
".",
"Debug",
"(",
"\"",
"\"",
")",
"\n\n",
"// unlock it",
"// XXX improve this prompt",
"parg",
":=",
"m",
".",
"SecretKeyPromptArg",
"(",
"libkb",
".",
"SecretKeyArg",
"{",
"}",
",",
"\"",
"\"",
")",
"\n",
"unlocked",
",",
"err",
":=",
"key",
".",
"PromptAndUnlock",
"(",
"m",
",",
"parg",
",",
"nil",
",",
"e",
".",
"arg",
".",
"User",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"m",
".",
"Debug",
"(",
"\"",
"\"",
")",
"\n",
"return",
"unlocked",
",",
"nil",
"\n",
"}"
] | // syncedPGPKey looks for a synced pgp key for e.user. If found,
// it unlocks it. | [
"syncedPGPKey",
"looks",
"for",
"a",
"synced",
"pgp",
"key",
"for",
"e",
".",
"user",
".",
"If",
"found",
"it",
"unlocks",
"it",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/login_provision.go#L605-L628 |
159,201 | keybase/client | go/engine/login_provision.go | gpgPrivateIndex | func (e *loginProvision) gpgPrivateIndex(m libkb.MetaContext) (*libkb.GpgKeyIndex, error) {
cli, err := e.gpgClient(m)
if err != nil {
return nil, err
}
// get an index of all the secret keys
index, _, err := cli.Index(true, "")
if err != nil {
return nil, err
}
return index, nil
} | go | func (e *loginProvision) gpgPrivateIndex(m libkb.MetaContext) (*libkb.GpgKeyIndex, error) {
cli, err := e.gpgClient(m)
if err != nil {
return nil, err
}
// get an index of all the secret keys
index, _, err := cli.Index(true, "")
if err != nil {
return nil, err
}
return index, nil
} | [
"func",
"(",
"e",
"*",
"loginProvision",
")",
"gpgPrivateIndex",
"(",
"m",
"libkb",
".",
"MetaContext",
")",
"(",
"*",
"libkb",
".",
"GpgKeyIndex",
",",
"error",
")",
"{",
"cli",
",",
"err",
":=",
"e",
".",
"gpgClient",
"(",
"m",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"// get an index of all the secret keys",
"index",
",",
"_",
",",
"err",
":=",
"cli",
".",
"Index",
"(",
"true",
",",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"index",
",",
"nil",
"\n",
"}"
] | // gpgPrivateIndex returns an index of the private gpg keys. | [
"gpgPrivateIndex",
"returns",
"an",
"index",
"of",
"the",
"private",
"gpg",
"keys",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/login_provision.go#L631-L644 |
159,202 | keybase/client | go/engine/login_provision.go | gpgClient | func (e *loginProvision) gpgClient(m libkb.MetaContext) (gpgInterface, error) {
if e.arg.DeviceType == libkb.DeviceTypeMobile {
return nil, libkb.GPGUnavailableError{}
}
if e.gpgCli != nil {
return e.gpgCli, nil
}
gpg := m.G().GetGpgClient()
ok, err := gpg.CanExec()
if err != nil {
return nil, err
}
if !ok {
return nil, libkb.GPGUnavailableError{}
}
e.gpgCli = gpg
return e.gpgCli, nil
} | go | func (e *loginProvision) gpgClient(m libkb.MetaContext) (gpgInterface, error) {
if e.arg.DeviceType == libkb.DeviceTypeMobile {
return nil, libkb.GPGUnavailableError{}
}
if e.gpgCli != nil {
return e.gpgCli, nil
}
gpg := m.G().GetGpgClient()
ok, err := gpg.CanExec()
if err != nil {
return nil, err
}
if !ok {
return nil, libkb.GPGUnavailableError{}
}
e.gpgCli = gpg
return e.gpgCli, nil
} | [
"func",
"(",
"e",
"*",
"loginProvision",
")",
"gpgClient",
"(",
"m",
"libkb",
".",
"MetaContext",
")",
"(",
"gpgInterface",
",",
"error",
")",
"{",
"if",
"e",
".",
"arg",
".",
"DeviceType",
"==",
"libkb",
".",
"DeviceTypeMobile",
"{",
"return",
"nil",
",",
"libkb",
".",
"GPGUnavailableError",
"{",
"}",
"\n",
"}",
"\n",
"if",
"e",
".",
"gpgCli",
"!=",
"nil",
"{",
"return",
"e",
".",
"gpgCli",
",",
"nil",
"\n",
"}",
"\n\n",
"gpg",
":=",
"m",
".",
"G",
"(",
")",
".",
"GetGpgClient",
"(",
")",
"\n",
"ok",
",",
"err",
":=",
"gpg",
".",
"CanExec",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
",",
"libkb",
".",
"GPGUnavailableError",
"{",
"}",
"\n",
"}",
"\n",
"e",
".",
"gpgCli",
"=",
"gpg",
"\n",
"return",
"e",
".",
"gpgCli",
",",
"nil",
"\n",
"}"
] | // gpgClient returns a gpg client. | [
"gpgClient",
"returns",
"a",
"gpg",
"client",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/login_provision.go#L647-L665 |
159,203 | keybase/client | go/engine/login_provision.go | checkArg | func (e *loginProvision) checkArg() error {
// check we have a good device type:
if e.arg.DeviceType != libkb.DeviceTypeDesktop && e.arg.DeviceType != libkb.DeviceTypeMobile {
return libkb.InvalidArgumentError{Msg: fmt.Sprintf("device type must be %q or %q, not %q", libkb.DeviceTypeDesktop, libkb.DeviceTypeMobile, e.arg.DeviceType)}
}
if e.arg.User == nil {
return libkb.InvalidArgumentError{Msg: "User cannot be nil"}
}
return nil
} | go | func (e *loginProvision) checkArg() error {
// check we have a good device type:
if e.arg.DeviceType != libkb.DeviceTypeDesktop && e.arg.DeviceType != libkb.DeviceTypeMobile {
return libkb.InvalidArgumentError{Msg: fmt.Sprintf("device type must be %q or %q, not %q", libkb.DeviceTypeDesktop, libkb.DeviceTypeMobile, e.arg.DeviceType)}
}
if e.arg.User == nil {
return libkb.InvalidArgumentError{Msg: "User cannot be nil"}
}
return nil
} | [
"func",
"(",
"e",
"*",
"loginProvision",
")",
"checkArg",
"(",
")",
"error",
"{",
"// check we have a good device type:",
"if",
"e",
".",
"arg",
".",
"DeviceType",
"!=",
"libkb",
".",
"DeviceTypeDesktop",
"&&",
"e",
".",
"arg",
".",
"DeviceType",
"!=",
"libkb",
".",
"DeviceTypeMobile",
"{",
"return",
"libkb",
".",
"InvalidArgumentError",
"{",
"Msg",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"libkb",
".",
"DeviceTypeDesktop",
",",
"libkb",
".",
"DeviceTypeMobile",
",",
"e",
".",
"arg",
".",
"DeviceType",
")",
"}",
"\n",
"}",
"\n\n",
"if",
"e",
".",
"arg",
".",
"User",
"==",
"nil",
"{",
"return",
"libkb",
".",
"InvalidArgumentError",
"{",
"Msg",
":",
"\"",
"\"",
"}",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // checkArg checks loginProvisionArg for sane arguments. | [
"checkArg",
"checks",
"loginProvisionArg",
"for",
"sane",
"arguments",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/login_provision.go#L668-L679 |
159,204 | keybase/client | go/engine/login_provision.go | getPaperKey | func getPaperKey(m libkb.MetaContext, lastErr error, expectedPrefix *string) (keys *libkb.DeviceWithKeys, prefix string, err error) {
passphrase, err := libkb.GetPaperKeyPassphrase(m, m.UIs().SecretUI, "", lastErr, expectedPrefix)
if err != nil {
return nil, "", err
}
return getPaperKeyFromString(m, passphrase)
} | go | func getPaperKey(m libkb.MetaContext, lastErr error, expectedPrefix *string) (keys *libkb.DeviceWithKeys, prefix string, err error) {
passphrase, err := libkb.GetPaperKeyPassphrase(m, m.UIs().SecretUI, "", lastErr, expectedPrefix)
if err != nil {
return nil, "", err
}
return getPaperKeyFromString(m, passphrase)
} | [
"func",
"getPaperKey",
"(",
"m",
"libkb",
".",
"MetaContext",
",",
"lastErr",
"error",
",",
"expectedPrefix",
"*",
"string",
")",
"(",
"keys",
"*",
"libkb",
".",
"DeviceWithKeys",
",",
"prefix",
"string",
",",
"err",
"error",
")",
"{",
"passphrase",
",",
"err",
":=",
"libkb",
".",
"GetPaperKeyPassphrase",
"(",
"m",
",",
"m",
".",
"UIs",
"(",
")",
".",
"SecretUI",
",",
"\"",
"\"",
",",
"lastErr",
",",
"expectedPrefix",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"\"",
"\"",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"getPaperKeyFromString",
"(",
"m",
",",
"passphrase",
")",
"\n",
"}"
] | // This is used by SaltpackDecrypt as well. | [
"This",
"is",
"used",
"by",
"SaltpackDecrypt",
"as",
"well",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/login_provision.go#L1133-L1140 |
159,205 | keybase/client | go/libkb/login_session.go | NewLoginSessionWithSalt | func NewLoginSessionWithSalt(g *GlobalContext, emailOrUsername string, salt []byte) *LoginSession {
ls := NewLoginSession(g, emailOrUsername)
ls.salt = salt
// XXX are these right? is this just so the salt can be retrieved?
ls.loaded = true
ls.cleared = true
return ls
} | go | func NewLoginSessionWithSalt(g *GlobalContext, emailOrUsername string, salt []byte) *LoginSession {
ls := NewLoginSession(g, emailOrUsername)
ls.salt = salt
// XXX are these right? is this just so the salt can be retrieved?
ls.loaded = true
ls.cleared = true
return ls
} | [
"func",
"NewLoginSessionWithSalt",
"(",
"g",
"*",
"GlobalContext",
",",
"emailOrUsername",
"string",
",",
"salt",
"[",
"]",
"byte",
")",
"*",
"LoginSession",
"{",
"ls",
":=",
"NewLoginSession",
"(",
"g",
",",
"emailOrUsername",
")",
"\n",
"ls",
".",
"salt",
"=",
"salt",
"\n",
"// XXX are these right? is this just so the salt can be retrieved?",
"ls",
".",
"loaded",
"=",
"true",
"\n",
"ls",
".",
"cleared",
"=",
"true",
"\n",
"return",
"ls",
"\n",
"}"
] | // Upon signup, a login session is created with a generated salt. | [
"Upon",
"signup",
"a",
"login",
"session",
"is",
"created",
"with",
"a",
"generated",
"salt",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/login_session.go#L40-L47 |
159,206 | keybase/client | go/libkb/login_session.go | Clear | func (s *LoginSession) Clear() error {
if s == nil {
return nil
}
if !s.loaded {
return ErrLoginSessionNotLoaded
}
s.loginSession = nil
s.loginSessionB64 = ""
s.cleared = true
return nil
} | go | func (s *LoginSession) Clear() error {
if s == nil {
return nil
}
if !s.loaded {
return ErrLoginSessionNotLoaded
}
s.loginSession = nil
s.loginSessionB64 = ""
s.cleared = true
return nil
} | [
"func",
"(",
"s",
"*",
"LoginSession",
")",
"Clear",
"(",
")",
"error",
"{",
"if",
"s",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"!",
"s",
".",
"loaded",
"{",
"return",
"ErrLoginSessionNotLoaded",
"\n",
"}",
"\n",
"s",
".",
"loginSession",
"=",
"nil",
"\n",
"s",
".",
"loginSessionB64",
"=",
"\"",
"\"",
"\n",
"s",
".",
"cleared",
"=",
"true",
"\n",
"return",
"nil",
"\n",
"}"
] | // Clear removes the loginSession value from s. It does not
// clear the salt. Unclear how this is useful. | [
"Clear",
"removes",
"the",
"loginSession",
"value",
"from",
"s",
".",
"It",
"does",
"not",
"clear",
"the",
"salt",
".",
"Unclear",
"how",
"this",
"is",
"useful",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/login_session.go#L113-L124 |
159,207 | keybase/client | go/stellar/stellargregor/stellargregor.go | autoClaim | func (h *Handler) autoClaim(mctx libkb.MetaContext, cli gregor1.IncomingInterface, category string, item gregor.Item) {
mctx.Debug("%v: %v received", h.Name(), category)
mctx.G().GetStellar().KickAutoClaimRunner(mctx, item.Metadata().MsgID())
} | go | func (h *Handler) autoClaim(mctx libkb.MetaContext, cli gregor1.IncomingInterface, category string, item gregor.Item) {
mctx.Debug("%v: %v received", h.Name(), category)
mctx.G().GetStellar().KickAutoClaimRunner(mctx, item.Metadata().MsgID())
} | [
"func",
"(",
"h",
"*",
"Handler",
")",
"autoClaim",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"cli",
"gregor1",
".",
"IncomingInterface",
",",
"category",
"string",
",",
"item",
"gregor",
".",
"Item",
")",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"h",
".",
"Name",
"(",
")",
",",
"category",
")",
"\n",
"mctx",
".",
"G",
"(",
")",
".",
"GetStellar",
"(",
")",
".",
"KickAutoClaimRunner",
"(",
"mctx",
",",
"item",
".",
"Metadata",
"(",
")",
".",
"MsgID",
"(",
")",
")",
"\n",
"}"
] | // The server is telling the client to claim relay payments. | [
"The",
"server",
"is",
"telling",
"the",
"client",
"to",
"claim",
"relay",
"payments",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/stellargregor/stellargregor.go#L69-L72 |
159,208 | keybase/client | go/stellar/stellargregor/stellargregor.go | paymentStatus | func (h *Handler) paymentStatus(mctx libkb.MetaContext, cli gregor1.IncomingInterface, category string, item gregor.Item) {
mctx.Debug("%v: %v received IBM, ignoring it", h.Name(), category)
// We will locally dismiss for now so that each client only plays them once:
if err := h.G().GregorState.LocalDismissItem(mctx.Ctx(), item.Metadata().MsgID()); err != nil {
h.G().Log.CDebugf(mctx.Ctx(), "failed to local dismiss payment_status: %s", err)
}
} | go | func (h *Handler) paymentStatus(mctx libkb.MetaContext, cli gregor1.IncomingInterface, category string, item gregor.Item) {
mctx.Debug("%v: %v received IBM, ignoring it", h.Name(), category)
// We will locally dismiss for now so that each client only plays them once:
if err := h.G().GregorState.LocalDismissItem(mctx.Ctx(), item.Metadata().MsgID()); err != nil {
h.G().Log.CDebugf(mctx.Ctx(), "failed to local dismiss payment_status: %s", err)
}
} | [
"func",
"(",
"h",
"*",
"Handler",
")",
"paymentStatus",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"cli",
"gregor1",
".",
"IncomingInterface",
",",
"category",
"string",
",",
"item",
"gregor",
".",
"Item",
")",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"h",
".",
"Name",
"(",
")",
",",
"category",
")",
"\n\n",
"// We will locally dismiss for now so that each client only plays them once:",
"if",
"err",
":=",
"h",
".",
"G",
"(",
")",
".",
"GregorState",
".",
"LocalDismissItem",
"(",
"mctx",
".",
"Ctx",
"(",
")",
",",
"item",
".",
"Metadata",
"(",
")",
".",
"MsgID",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"h",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"mctx",
".",
"Ctx",
"(",
")",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}"
] | // paymentStatus is an old IBM and shouldn't happen anymore | [
"paymentStatus",
"is",
"an",
"old",
"IBM",
"and",
"shouldn",
"t",
"happen",
"anymore"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/stellargregor/stellargregor.go#L115-L122 |
159,209 | keybase/client | go/kbfs/tlfhandle/const_id_getter.go | GetIDForHandle | func (c ConstIDGetter) GetIDForHandle(_ context.Context, _ *Handle) (
tlf.ID, error) {
return c.ID, nil
} | go | func (c ConstIDGetter) GetIDForHandle(_ context.Context, _ *Handle) (
tlf.ID, error) {
return c.ID, nil
} | [
"func",
"(",
"c",
"ConstIDGetter",
")",
"GetIDForHandle",
"(",
"_",
"context",
".",
"Context",
",",
"_",
"*",
"Handle",
")",
"(",
"tlf",
".",
"ID",
",",
"error",
")",
"{",
"return",
"c",
".",
"ID",
",",
"nil",
"\n",
"}"
] | // GetIDForHandle implements the IDGetter interface for ConstIDGetter. | [
"GetIDForHandle",
"implements",
"the",
"IDGetter",
"interface",
"for",
"ConstIDGetter",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/tlfhandle/const_id_getter.go#L21-L24 |
159,210 | keybase/client | go/engine/pgp_verify.go | NewPGPVerify | func NewPGPVerify(g *libkb.GlobalContext, arg *PGPVerifyArg) *PGPVerify {
return &PGPVerify{
arg: arg,
Contextified: libkb.NewContextified(g),
}
} | go | func NewPGPVerify(g *libkb.GlobalContext, arg *PGPVerifyArg) *PGPVerify {
return &PGPVerify{
arg: arg,
Contextified: libkb.NewContextified(g),
}
} | [
"func",
"NewPGPVerify",
"(",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"arg",
"*",
"PGPVerifyArg",
")",
"*",
"PGPVerify",
"{",
"return",
"&",
"PGPVerify",
"{",
"arg",
":",
"arg",
",",
"Contextified",
":",
"libkb",
".",
"NewContextified",
"(",
"g",
")",
",",
"}",
"\n",
"}"
] | // NewPGPVerify creates a PGPVerify engine. | [
"NewPGPVerify",
"creates",
"a",
"PGPVerify",
"engine",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/pgp_verify.go#L37-L42 |
159,211 | keybase/client | go/engine/pgp_verify.go | runAttached | func (e *PGPVerify) runAttached(m libkb.MetaContext) error {
arg := &PGPDecryptArg{
Source: e.source,
Sink: libkb.NopWriteCloser{W: ioutil.Discard},
AssertSigned: true,
SignedBy: e.arg.SignedBy,
}
eng := NewPGPDecrypt(m.G(), arg)
if err := RunEngine2(m, eng); err != nil {
return err
}
e.signStatus = eng.SignatureStatus()
e.signer = eng.Signer()
return nil
} | go | func (e *PGPVerify) runAttached(m libkb.MetaContext) error {
arg := &PGPDecryptArg{
Source: e.source,
Sink: libkb.NopWriteCloser{W: ioutil.Discard},
AssertSigned: true,
SignedBy: e.arg.SignedBy,
}
eng := NewPGPDecrypt(m.G(), arg)
if err := RunEngine2(m, eng); err != nil {
return err
}
e.signStatus = eng.SignatureStatus()
e.signer = eng.Signer()
return nil
} | [
"func",
"(",
"e",
"*",
"PGPVerify",
")",
"runAttached",
"(",
"m",
"libkb",
".",
"MetaContext",
")",
"error",
"{",
"arg",
":=",
"&",
"PGPDecryptArg",
"{",
"Source",
":",
"e",
".",
"source",
",",
"Sink",
":",
"libkb",
".",
"NopWriteCloser",
"{",
"W",
":",
"ioutil",
".",
"Discard",
"}",
",",
"AssertSigned",
":",
"true",
",",
"SignedBy",
":",
"e",
".",
"arg",
".",
"SignedBy",
",",
"}",
"\n",
"eng",
":=",
"NewPGPDecrypt",
"(",
"m",
".",
"G",
"(",
")",
",",
"arg",
")",
"\n",
"if",
"err",
":=",
"RunEngine2",
"(",
"m",
",",
"eng",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"e",
".",
"signStatus",
"=",
"eng",
".",
"SignatureStatus",
"(",
")",
"\n",
"e",
".",
"signer",
"=",
"eng",
".",
"Signer",
"(",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // runAttached verifies an attached signature | [
"runAttached",
"verifies",
"an",
"attached",
"signature"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/pgp_verify.go#L103-L118 |
159,212 | keybase/client | go/engine/pgp_verify.go | runDetached | func (e *PGPVerify) runDetached(m libkb.MetaContext) error {
sk, err := NewScanKeys(m)
if err != nil {
return err
}
checkfn := openpgp.CheckDetachedSignature
if libkb.IsArmored(e.arg.Signature) {
checkfn = openpgp.CheckArmoredDetachedSignature
}
signer, err := checkfn(sk, e.source, bytes.NewReader(e.arg.Signature))
if err != nil {
return err
}
e.signer = sk.KeyOwnerByEntity(signer)
e.signStatus = &libkb.SignatureStatus{IsSigned: true}
if signer != nil {
if len(signer.UnverifiedRevocations) > 0 {
return libkb.BadSigError{
E: fmt.Sprintf("Key %x belonging to %q has been revoked by its designated revoker.", signer.PrimaryKey.KeyId, e.signer.GetName()),
}
}
e.signStatus.Verified = true
e.signStatus.Entity = signer
if err := e.checkSignedBy(m); err != nil {
return err
}
var r io.Reader = bytes.NewReader(e.arg.Signature)
if libkb.IsArmored(e.arg.Signature) {
block, err := armor.Decode(r)
if err != nil {
return err
}
r = block.Body
}
p, err := packet.Read(r)
if err != nil {
return err
}
if val, ok := p.(*packet.Signature); ok {
e.signStatus.SignatureTime = val.CreationTime
}
fingerprint := libkb.PGPFingerprint(signer.PrimaryKey.Fingerprint)
OutputSignatureSuccess(m, fingerprint, e.signer, e.signStatus.SignatureTime)
}
return nil
} | go | func (e *PGPVerify) runDetached(m libkb.MetaContext) error {
sk, err := NewScanKeys(m)
if err != nil {
return err
}
checkfn := openpgp.CheckDetachedSignature
if libkb.IsArmored(e.arg.Signature) {
checkfn = openpgp.CheckArmoredDetachedSignature
}
signer, err := checkfn(sk, e.source, bytes.NewReader(e.arg.Signature))
if err != nil {
return err
}
e.signer = sk.KeyOwnerByEntity(signer)
e.signStatus = &libkb.SignatureStatus{IsSigned: true}
if signer != nil {
if len(signer.UnverifiedRevocations) > 0 {
return libkb.BadSigError{
E: fmt.Sprintf("Key %x belonging to %q has been revoked by its designated revoker.", signer.PrimaryKey.KeyId, e.signer.GetName()),
}
}
e.signStatus.Verified = true
e.signStatus.Entity = signer
if err := e.checkSignedBy(m); err != nil {
return err
}
var r io.Reader = bytes.NewReader(e.arg.Signature)
if libkb.IsArmored(e.arg.Signature) {
block, err := armor.Decode(r)
if err != nil {
return err
}
r = block.Body
}
p, err := packet.Read(r)
if err != nil {
return err
}
if val, ok := p.(*packet.Signature); ok {
e.signStatus.SignatureTime = val.CreationTime
}
fingerprint := libkb.PGPFingerprint(signer.PrimaryKey.Fingerprint)
OutputSignatureSuccess(m, fingerprint, e.signer, e.signStatus.SignatureTime)
}
return nil
} | [
"func",
"(",
"e",
"*",
"PGPVerify",
")",
"runDetached",
"(",
"m",
"libkb",
".",
"MetaContext",
")",
"error",
"{",
"sk",
",",
"err",
":=",
"NewScanKeys",
"(",
"m",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"checkfn",
":=",
"openpgp",
".",
"CheckDetachedSignature",
"\n",
"if",
"libkb",
".",
"IsArmored",
"(",
"e",
".",
"arg",
".",
"Signature",
")",
"{",
"checkfn",
"=",
"openpgp",
".",
"CheckArmoredDetachedSignature",
"\n",
"}",
"\n",
"signer",
",",
"err",
":=",
"checkfn",
"(",
"sk",
",",
"e",
".",
"source",
",",
"bytes",
".",
"NewReader",
"(",
"e",
".",
"arg",
".",
"Signature",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"e",
".",
"signer",
"=",
"sk",
".",
"KeyOwnerByEntity",
"(",
"signer",
")",
"\n",
"e",
".",
"signStatus",
"=",
"&",
"libkb",
".",
"SignatureStatus",
"{",
"IsSigned",
":",
"true",
"}",
"\n\n",
"if",
"signer",
"!=",
"nil",
"{",
"if",
"len",
"(",
"signer",
".",
"UnverifiedRevocations",
")",
">",
"0",
"{",
"return",
"libkb",
".",
"BadSigError",
"{",
"E",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"signer",
".",
"PrimaryKey",
".",
"KeyId",
",",
"e",
".",
"signer",
".",
"GetName",
"(",
")",
")",
",",
"}",
"\n",
"}",
"\n\n",
"e",
".",
"signStatus",
".",
"Verified",
"=",
"true",
"\n",
"e",
".",
"signStatus",
".",
"Entity",
"=",
"signer",
"\n",
"if",
"err",
":=",
"e",
".",
"checkSignedBy",
"(",
"m",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"var",
"r",
"io",
".",
"Reader",
"=",
"bytes",
".",
"NewReader",
"(",
"e",
".",
"arg",
".",
"Signature",
")",
"\n",
"if",
"libkb",
".",
"IsArmored",
"(",
"e",
".",
"arg",
".",
"Signature",
")",
"{",
"block",
",",
"err",
":=",
"armor",
".",
"Decode",
"(",
"r",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"r",
"=",
"block",
".",
"Body",
"\n",
"}",
"\n\n",
"p",
",",
"err",
":=",
"packet",
".",
"Read",
"(",
"r",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"val",
",",
"ok",
":=",
"p",
".",
"(",
"*",
"packet",
".",
"Signature",
")",
";",
"ok",
"{",
"e",
".",
"signStatus",
".",
"SignatureTime",
"=",
"val",
".",
"CreationTime",
"\n",
"}",
"\n\n",
"fingerprint",
":=",
"libkb",
".",
"PGPFingerprint",
"(",
"signer",
".",
"PrimaryKey",
".",
"Fingerprint",
")",
"\n",
"OutputSignatureSuccess",
"(",
"m",
",",
"fingerprint",
",",
"e",
".",
"signer",
",",
"e",
".",
"signStatus",
".",
"SignatureTime",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // runDetached verifies a detached signature | [
"runDetached",
"verifies",
"a",
"detached",
"signature"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/pgp_verify.go#L121-L174 |
159,213 | keybase/client | go/engine/pgp_verify.go | runClearsign | func (e *PGPVerify) runClearsign(m libkb.MetaContext) error {
// clearsign decode only works with the whole data slice, not a reader
// so have to read it all here:
msg, err := ioutil.ReadAll(e.source)
if err != nil {
return err
}
b, _ := clearsign.Decode(msg)
if b == nil {
return errors.New("Unable to decode clearsigned message")
}
sigBody, err := ioutil.ReadAll(b.ArmoredSignature.Body)
if err != nil {
return err
}
sk, err := NewScanKeys(m)
if err != nil {
return err
}
signer, err := openpgp.CheckDetachedSignature(sk, bytes.NewReader(b.Bytes), bytes.NewReader(sigBody))
if err != nil {
return fmt.Errorf("Check sig error: %s", err)
}
e.signer = sk.KeyOwnerByEntity(signer)
e.signStatus = &libkb.SignatureStatus{IsSigned: true}
if signer != nil {
if len(signer.UnverifiedRevocations) > 0 {
return libkb.BadSigError{
E: fmt.Sprintf("Key %x belonging to %q has been revoked by its designated revoker.", signer.PrimaryKey.KeyId, e.signer.GetName()),
}
}
e.signStatus.Verified = true
e.signStatus.Entity = signer
if err := e.checkSignedBy(m); err != nil {
return err
}
p, err := packet.Read(bytes.NewReader(sigBody))
if err != nil {
return err
}
if val, ok := p.(*packet.Signature); ok {
e.signStatus.SignatureTime = val.CreationTime
}
fingerprint := libkb.PGPFingerprint(signer.PrimaryKey.Fingerprint)
OutputSignatureSuccess(m, fingerprint, e.signer, e.signStatus.SignatureTime)
}
return nil
} | go | func (e *PGPVerify) runClearsign(m libkb.MetaContext) error {
// clearsign decode only works with the whole data slice, not a reader
// so have to read it all here:
msg, err := ioutil.ReadAll(e.source)
if err != nil {
return err
}
b, _ := clearsign.Decode(msg)
if b == nil {
return errors.New("Unable to decode clearsigned message")
}
sigBody, err := ioutil.ReadAll(b.ArmoredSignature.Body)
if err != nil {
return err
}
sk, err := NewScanKeys(m)
if err != nil {
return err
}
signer, err := openpgp.CheckDetachedSignature(sk, bytes.NewReader(b.Bytes), bytes.NewReader(sigBody))
if err != nil {
return fmt.Errorf("Check sig error: %s", err)
}
e.signer = sk.KeyOwnerByEntity(signer)
e.signStatus = &libkb.SignatureStatus{IsSigned: true}
if signer != nil {
if len(signer.UnverifiedRevocations) > 0 {
return libkb.BadSigError{
E: fmt.Sprintf("Key %x belonging to %q has been revoked by its designated revoker.", signer.PrimaryKey.KeyId, e.signer.GetName()),
}
}
e.signStatus.Verified = true
e.signStatus.Entity = signer
if err := e.checkSignedBy(m); err != nil {
return err
}
p, err := packet.Read(bytes.NewReader(sigBody))
if err != nil {
return err
}
if val, ok := p.(*packet.Signature); ok {
e.signStatus.SignatureTime = val.CreationTime
}
fingerprint := libkb.PGPFingerprint(signer.PrimaryKey.Fingerprint)
OutputSignatureSuccess(m, fingerprint, e.signer, e.signStatus.SignatureTime)
}
return nil
} | [
"func",
"(",
"e",
"*",
"PGPVerify",
")",
"runClearsign",
"(",
"m",
"libkb",
".",
"MetaContext",
")",
"error",
"{",
"// clearsign decode only works with the whole data slice, not a reader",
"// so have to read it all here:",
"msg",
",",
"err",
":=",
"ioutil",
".",
"ReadAll",
"(",
"e",
".",
"source",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"b",
",",
"_",
":=",
"clearsign",
".",
"Decode",
"(",
"msg",
")",
"\n",
"if",
"b",
"==",
"nil",
"{",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"sigBody",
",",
"err",
":=",
"ioutil",
".",
"ReadAll",
"(",
"b",
".",
"ArmoredSignature",
".",
"Body",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"sk",
",",
"err",
":=",
"NewScanKeys",
"(",
"m",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"signer",
",",
"err",
":=",
"openpgp",
".",
"CheckDetachedSignature",
"(",
"sk",
",",
"bytes",
".",
"NewReader",
"(",
"b",
".",
"Bytes",
")",
",",
"bytes",
".",
"NewReader",
"(",
"sigBody",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"e",
".",
"signer",
"=",
"sk",
".",
"KeyOwnerByEntity",
"(",
"signer",
")",
"\n",
"e",
".",
"signStatus",
"=",
"&",
"libkb",
".",
"SignatureStatus",
"{",
"IsSigned",
":",
"true",
"}",
"\n\n",
"if",
"signer",
"!=",
"nil",
"{",
"if",
"len",
"(",
"signer",
".",
"UnverifiedRevocations",
")",
">",
"0",
"{",
"return",
"libkb",
".",
"BadSigError",
"{",
"E",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"signer",
".",
"PrimaryKey",
".",
"KeyId",
",",
"e",
".",
"signer",
".",
"GetName",
"(",
")",
")",
",",
"}",
"\n",
"}",
"\n\n",
"e",
".",
"signStatus",
".",
"Verified",
"=",
"true",
"\n",
"e",
".",
"signStatus",
".",
"Entity",
"=",
"signer",
"\n",
"if",
"err",
":=",
"e",
".",
"checkSignedBy",
"(",
"m",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"p",
",",
"err",
":=",
"packet",
".",
"Read",
"(",
"bytes",
".",
"NewReader",
"(",
"sigBody",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"val",
",",
"ok",
":=",
"p",
".",
"(",
"*",
"packet",
".",
"Signature",
")",
";",
"ok",
"{",
"e",
".",
"signStatus",
".",
"SignatureTime",
"=",
"val",
".",
"CreationTime",
"\n",
"}",
"\n\n",
"fingerprint",
":=",
"libkb",
".",
"PGPFingerprint",
"(",
"signer",
".",
"PrimaryKey",
".",
"Fingerprint",
")",
"\n",
"OutputSignatureSuccess",
"(",
"m",
",",
"fingerprint",
",",
"e",
".",
"signer",
",",
"e",
".",
"signStatus",
".",
"SignatureTime",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // runClearsign verifies a clearsign signature | [
"runClearsign",
"verifies",
"a",
"clearsign",
"signature"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/pgp_verify.go#L177-L234 |
159,214 | keybase/client | go/client/chat_svc_handler.go | ListV1 | func (c *chatServiceHandler) ListV1(ctx context.Context, opts listOptionsV1) Reply {
var cl ChatList
var rlimits []chat1.RateLimit
var pagination *chat1.Pagination
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
topicType, err := TopicTypeFromStrDefault(opts.TopicType)
if err != nil {
return c.errReply(err)
}
if opts.SkipUnbox {
res, err := client.GetInboxUILocal(ctx, chat1.GetInboxUILocalArg{
Query: &chat1.GetInboxLocalQuery{
Status: utils.VisibleChatConversationStatuses(),
TopicType: &topicType,
UnreadOnly: opts.UnreadOnly,
OneChatTypePerTLF: new(bool),
},
Pagination: opts.Pagination,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
})
if err != nil {
return c.errReply(err)
}
pagination = res.Pagination
rlimits = utils.AggRateLimits(res.RateLimits)
if opts.FailOffline && res.Offline {
return c.errReply(chat.OfflineError{})
}
cl = ChatList{
Offline: res.Offline,
IdentifyFailures: res.IdentifyFailures,
}
for _, conv := range res.ConversationsRemote {
cl.Conversations = append(cl.Conversations, c.exportRemoteConv(ctx, conv))
}
} else {
res, err := client.GetInboxAndUnboxLocal(ctx, chat1.GetInboxAndUnboxLocalArg{
Query: &chat1.GetInboxLocalQuery{
Status: utils.VisibleChatConversationStatuses(),
TopicType: &topicType,
UnreadOnly: opts.UnreadOnly,
OneChatTypePerTLF: new(bool),
},
Pagination: opts.Pagination,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
})
if err != nil {
return c.errReply(err)
}
pagination = res.Pagination
rlimits = utils.AggRateLimits(res.RateLimits)
if opts.FailOffline && res.Offline {
return c.errReply(chat.OfflineError{})
}
cl = ChatList{
Offline: res.Offline,
IdentifyFailures: res.IdentifyFailures,
}
for _, conv := range res.Conversations {
if !opts.ShowErrors && conv.Error != nil {
continue
}
cl.Conversations = append(cl.Conversations, c.exportLocalConv(ctx, conv))
}
}
cl.Pagination = pagination
cl.RateLimits.RateLimits = c.aggRateLimits(rlimits)
return Reply{Result: cl}
} | go | func (c *chatServiceHandler) ListV1(ctx context.Context, opts listOptionsV1) Reply {
var cl ChatList
var rlimits []chat1.RateLimit
var pagination *chat1.Pagination
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
topicType, err := TopicTypeFromStrDefault(opts.TopicType)
if err != nil {
return c.errReply(err)
}
if opts.SkipUnbox {
res, err := client.GetInboxUILocal(ctx, chat1.GetInboxUILocalArg{
Query: &chat1.GetInboxLocalQuery{
Status: utils.VisibleChatConversationStatuses(),
TopicType: &topicType,
UnreadOnly: opts.UnreadOnly,
OneChatTypePerTLF: new(bool),
},
Pagination: opts.Pagination,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
})
if err != nil {
return c.errReply(err)
}
pagination = res.Pagination
rlimits = utils.AggRateLimits(res.RateLimits)
if opts.FailOffline && res.Offline {
return c.errReply(chat.OfflineError{})
}
cl = ChatList{
Offline: res.Offline,
IdentifyFailures: res.IdentifyFailures,
}
for _, conv := range res.ConversationsRemote {
cl.Conversations = append(cl.Conversations, c.exportRemoteConv(ctx, conv))
}
} else {
res, err := client.GetInboxAndUnboxLocal(ctx, chat1.GetInboxAndUnboxLocalArg{
Query: &chat1.GetInboxLocalQuery{
Status: utils.VisibleChatConversationStatuses(),
TopicType: &topicType,
UnreadOnly: opts.UnreadOnly,
OneChatTypePerTLF: new(bool),
},
Pagination: opts.Pagination,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
})
if err != nil {
return c.errReply(err)
}
pagination = res.Pagination
rlimits = utils.AggRateLimits(res.RateLimits)
if opts.FailOffline && res.Offline {
return c.errReply(chat.OfflineError{})
}
cl = ChatList{
Offline: res.Offline,
IdentifyFailures: res.IdentifyFailures,
}
for _, conv := range res.Conversations {
if !opts.ShowErrors && conv.Error != nil {
continue
}
cl.Conversations = append(cl.Conversations, c.exportLocalConv(ctx, conv))
}
}
cl.Pagination = pagination
cl.RateLimits.RateLimits = c.aggRateLimits(rlimits)
return Reply{Result: cl}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"ListV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"listOptionsV1",
")",
"Reply",
"{",
"var",
"cl",
"ChatList",
"\n",
"var",
"rlimits",
"[",
"]",
"chat1",
".",
"RateLimit",
"\n",
"var",
"pagination",
"*",
"chat1",
".",
"Pagination",
"\n",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"topicType",
",",
"err",
":=",
"TopicTypeFromStrDefault",
"(",
"opts",
".",
"TopicType",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"opts",
".",
"SkipUnbox",
"{",
"res",
",",
"err",
":=",
"client",
".",
"GetInboxUILocal",
"(",
"ctx",
",",
"chat1",
".",
"GetInboxUILocalArg",
"{",
"Query",
":",
"&",
"chat1",
".",
"GetInboxLocalQuery",
"{",
"Status",
":",
"utils",
".",
"VisibleChatConversationStatuses",
"(",
")",
",",
"TopicType",
":",
"&",
"topicType",
",",
"UnreadOnly",
":",
"opts",
".",
"UnreadOnly",
",",
"OneChatTypePerTLF",
":",
"new",
"(",
"bool",
")",
",",
"}",
",",
"Pagination",
":",
"opts",
".",
"Pagination",
",",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_CLI",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"pagination",
"=",
"res",
".",
"Pagination",
"\n",
"rlimits",
"=",
"utils",
".",
"AggRateLimits",
"(",
"res",
".",
"RateLimits",
")",
"\n",
"if",
"opts",
".",
"FailOffline",
"&&",
"res",
".",
"Offline",
"{",
"return",
"c",
".",
"errReply",
"(",
"chat",
".",
"OfflineError",
"{",
"}",
")",
"\n",
"}",
"\n",
"cl",
"=",
"ChatList",
"{",
"Offline",
":",
"res",
".",
"Offline",
",",
"IdentifyFailures",
":",
"res",
".",
"IdentifyFailures",
",",
"}",
"\n",
"for",
"_",
",",
"conv",
":=",
"range",
"res",
".",
"ConversationsRemote",
"{",
"cl",
".",
"Conversations",
"=",
"append",
"(",
"cl",
".",
"Conversations",
",",
"c",
".",
"exportRemoteConv",
"(",
"ctx",
",",
"conv",
")",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"res",
",",
"err",
":=",
"client",
".",
"GetInboxAndUnboxLocal",
"(",
"ctx",
",",
"chat1",
".",
"GetInboxAndUnboxLocalArg",
"{",
"Query",
":",
"&",
"chat1",
".",
"GetInboxLocalQuery",
"{",
"Status",
":",
"utils",
".",
"VisibleChatConversationStatuses",
"(",
")",
",",
"TopicType",
":",
"&",
"topicType",
",",
"UnreadOnly",
":",
"opts",
".",
"UnreadOnly",
",",
"OneChatTypePerTLF",
":",
"new",
"(",
"bool",
")",
",",
"}",
",",
"Pagination",
":",
"opts",
".",
"Pagination",
",",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_CLI",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"pagination",
"=",
"res",
".",
"Pagination",
"\n",
"rlimits",
"=",
"utils",
".",
"AggRateLimits",
"(",
"res",
".",
"RateLimits",
")",
"\n",
"if",
"opts",
".",
"FailOffline",
"&&",
"res",
".",
"Offline",
"{",
"return",
"c",
".",
"errReply",
"(",
"chat",
".",
"OfflineError",
"{",
"}",
")",
"\n",
"}",
"\n",
"cl",
"=",
"ChatList",
"{",
"Offline",
":",
"res",
".",
"Offline",
",",
"IdentifyFailures",
":",
"res",
".",
"IdentifyFailures",
",",
"}",
"\n",
"for",
"_",
",",
"conv",
":=",
"range",
"res",
".",
"Conversations",
"{",
"if",
"!",
"opts",
".",
"ShowErrors",
"&&",
"conv",
".",
"Error",
"!=",
"nil",
"{",
"continue",
"\n",
"}",
"\n",
"cl",
".",
"Conversations",
"=",
"append",
"(",
"cl",
".",
"Conversations",
",",
"c",
".",
"exportLocalConv",
"(",
"ctx",
",",
"conv",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"cl",
".",
"Pagination",
"=",
"pagination",
"\n",
"cl",
".",
"RateLimits",
".",
"RateLimits",
"=",
"c",
".",
"aggRateLimits",
"(",
"rlimits",
")",
"\n",
"return",
"Reply",
"{",
"Result",
":",
"cl",
"}",
"\n",
"}"
] | // ListV1 implements ChatServiceHandler.ListV1. | [
"ListV1",
"implements",
"ChatServiceHandler",
".",
"ListV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L122-L193 |
159,215 | keybase/client | go/client/chat_svc_handler.go | ReadV1 | func (c *chatServiceHandler) ReadV1(ctx context.Context, opts readOptionsV1) Reply {
var rlimits []chat1.RateLimit
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
conv, rlimits, err := c.findConversation(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
arg := chat1.GetThreadLocalArg{
ConversationID: conv.Info.Id,
Pagination: opts.Pagination,
Query: &chat1.GetThreadQuery{
MarkAsRead: !opts.Peek,
},
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
}
threadView, err := client.GetThreadLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
rlimits = append(rlimits, threadView.RateLimits...)
// Check to see if this was fetched offline and we should fail
if opts.FailOffline && threadView.Offline {
return c.errReply(chat.OfflineError{})
}
// This could be lower than the truth if any messages were
// posted between the last two gregor rpcs.
readMsgID := conv.ReaderInfo.ReadMsgid
selfUID := c.G().Env.GetUID()
if selfUID.IsNil() {
c.G().Log.Warning("Could not get self UID for api")
}
messages, err := c.formatMessages(ctx, threadView.Thread.Messages, conv, selfUID, readMsgID, opts.UnreadOnly)
if err != nil {
return c.errReply(err)
}
thread := Thread{
Offline: threadView.Offline,
IdentifyFailures: threadView.IdentifyFailures,
Pagination: threadView.Thread.Pagination,
Messages: messages,
}
thread.RateLimits.RateLimits = c.aggRateLimits(rlimits)
return Reply{Result: thread}
} | go | func (c *chatServiceHandler) ReadV1(ctx context.Context, opts readOptionsV1) Reply {
var rlimits []chat1.RateLimit
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
conv, rlimits, err := c.findConversation(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
arg := chat1.GetThreadLocalArg{
ConversationID: conv.Info.Id,
Pagination: opts.Pagination,
Query: &chat1.GetThreadQuery{
MarkAsRead: !opts.Peek,
},
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
}
threadView, err := client.GetThreadLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
rlimits = append(rlimits, threadView.RateLimits...)
// Check to see if this was fetched offline and we should fail
if opts.FailOffline && threadView.Offline {
return c.errReply(chat.OfflineError{})
}
// This could be lower than the truth if any messages were
// posted between the last two gregor rpcs.
readMsgID := conv.ReaderInfo.ReadMsgid
selfUID := c.G().Env.GetUID()
if selfUID.IsNil() {
c.G().Log.Warning("Could not get self UID for api")
}
messages, err := c.formatMessages(ctx, threadView.Thread.Messages, conv, selfUID, readMsgID, opts.UnreadOnly)
if err != nil {
return c.errReply(err)
}
thread := Thread{
Offline: threadView.Offline,
IdentifyFailures: threadView.IdentifyFailures,
Pagination: threadView.Thread.Pagination,
Messages: messages,
}
thread.RateLimits.RateLimits = c.aggRateLimits(rlimits)
return Reply{Result: thread}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"ReadV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"readOptionsV1",
")",
"Reply",
"{",
"var",
"rlimits",
"[",
"]",
"chat1",
".",
"RateLimit",
"\n",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"conv",
",",
"rlimits",
",",
"err",
":=",
"c",
".",
"findConversation",
"(",
"ctx",
",",
"opts",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"arg",
":=",
"chat1",
".",
"GetThreadLocalArg",
"{",
"ConversationID",
":",
"conv",
".",
"Info",
".",
"Id",
",",
"Pagination",
":",
"opts",
".",
"Pagination",
",",
"Query",
":",
"&",
"chat1",
".",
"GetThreadQuery",
"{",
"MarkAsRead",
":",
"!",
"opts",
".",
"Peek",
",",
"}",
",",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_CLI",
",",
"}",
"\n",
"threadView",
",",
"err",
":=",
"client",
".",
"GetThreadLocal",
"(",
"ctx",
",",
"arg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"rlimits",
"=",
"append",
"(",
"rlimits",
",",
"threadView",
".",
"RateLimits",
"...",
")",
"\n\n",
"// Check to see if this was fetched offline and we should fail",
"if",
"opts",
".",
"FailOffline",
"&&",
"threadView",
".",
"Offline",
"{",
"return",
"c",
".",
"errReply",
"(",
"chat",
".",
"OfflineError",
"{",
"}",
")",
"\n",
"}",
"\n\n",
"// This could be lower than the truth if any messages were",
"// posted between the last two gregor rpcs.",
"readMsgID",
":=",
"conv",
".",
"ReaderInfo",
".",
"ReadMsgid",
"\n\n",
"selfUID",
":=",
"c",
".",
"G",
"(",
")",
".",
"Env",
".",
"GetUID",
"(",
")",
"\n",
"if",
"selfUID",
".",
"IsNil",
"(",
")",
"{",
"c",
".",
"G",
"(",
")",
".",
"Log",
".",
"Warning",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"messages",
",",
"err",
":=",
"c",
".",
"formatMessages",
"(",
"ctx",
",",
"threadView",
".",
"Thread",
".",
"Messages",
",",
"conv",
",",
"selfUID",
",",
"readMsgID",
",",
"opts",
".",
"UnreadOnly",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"thread",
":=",
"Thread",
"{",
"Offline",
":",
"threadView",
".",
"Offline",
",",
"IdentifyFailures",
":",
"threadView",
".",
"IdentifyFailures",
",",
"Pagination",
":",
"threadView",
".",
"Thread",
".",
"Pagination",
",",
"Messages",
":",
"messages",
",",
"}",
"\n\n",
"thread",
".",
"RateLimits",
".",
"RateLimits",
"=",
"c",
".",
"aggRateLimits",
"(",
"rlimits",
")",
"\n",
"return",
"Reply",
"{",
"Result",
":",
"thread",
"}",
"\n",
"}"
] | // ReadV1 implements ChatServiceHandler.ReadV1. | [
"ReadV1",
"implements",
"ChatServiceHandler",
".",
"ReadV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L363-L417 |
159,216 | keybase/client | go/client/chat_svc_handler.go | GetV1 | func (c *chatServiceHandler) GetV1(ctx context.Context, opts getOptionsV1) Reply {
var rlimits []chat1.RateLimit
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
conv, rlimits, err := c.findConversation(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
arg := chat1.GetMessagesLocalArg{
ConversationID: conv.Info.Id,
MessageIDs: opts.MessageIDs,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
}
res, err := client.GetMessagesLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
// Check to see if this was fetched offline and we should fail
if opts.FailOffline && res.Offline {
return c.errReply(chat.OfflineError{})
}
selfUID := c.G().Env.GetUID()
if selfUID.IsNil() {
c.G().Log.Warning("Could not get self UID for api")
}
messages, err := c.formatMessages(ctx, res.Messages, conv, selfUID, 0 /* readMsgID */, false /* unreadOnly */)
if err != nil {
return c.errReply(err)
}
thread := Thread{
Offline: res.Offline,
IdentifyFailures: res.IdentifyFailures,
Messages: messages,
}
thread.RateLimits.RateLimits = c.aggRateLimits(rlimits)
return Reply{Result: thread}
} | go | func (c *chatServiceHandler) GetV1(ctx context.Context, opts getOptionsV1) Reply {
var rlimits []chat1.RateLimit
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
conv, rlimits, err := c.findConversation(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
arg := chat1.GetMessagesLocalArg{
ConversationID: conv.Info.Id,
MessageIDs: opts.MessageIDs,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
}
res, err := client.GetMessagesLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
// Check to see if this was fetched offline and we should fail
if opts.FailOffline && res.Offline {
return c.errReply(chat.OfflineError{})
}
selfUID := c.G().Env.GetUID()
if selfUID.IsNil() {
c.G().Log.Warning("Could not get self UID for api")
}
messages, err := c.formatMessages(ctx, res.Messages, conv, selfUID, 0 /* readMsgID */, false /* unreadOnly */)
if err != nil {
return c.errReply(err)
}
thread := Thread{
Offline: res.Offline,
IdentifyFailures: res.IdentifyFailures,
Messages: messages,
}
thread.RateLimits.RateLimits = c.aggRateLimits(rlimits)
return Reply{Result: thread}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"GetV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"getOptionsV1",
")",
"Reply",
"{",
"var",
"rlimits",
"[",
"]",
"chat1",
".",
"RateLimit",
"\n",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"conv",
",",
"rlimits",
",",
"err",
":=",
"c",
".",
"findConversation",
"(",
"ctx",
",",
"opts",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"arg",
":=",
"chat1",
".",
"GetMessagesLocalArg",
"{",
"ConversationID",
":",
"conv",
".",
"Info",
".",
"Id",
",",
"MessageIDs",
":",
"opts",
".",
"MessageIDs",
",",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_CLI",
",",
"}",
"\n\n",
"res",
",",
"err",
":=",
"client",
".",
"GetMessagesLocal",
"(",
"ctx",
",",
"arg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// Check to see if this was fetched offline and we should fail",
"if",
"opts",
".",
"FailOffline",
"&&",
"res",
".",
"Offline",
"{",
"return",
"c",
".",
"errReply",
"(",
"chat",
".",
"OfflineError",
"{",
"}",
")",
"\n",
"}",
"\n\n",
"selfUID",
":=",
"c",
".",
"G",
"(",
")",
".",
"Env",
".",
"GetUID",
"(",
")",
"\n",
"if",
"selfUID",
".",
"IsNil",
"(",
")",
"{",
"c",
".",
"G",
"(",
")",
".",
"Log",
".",
"Warning",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"messages",
",",
"err",
":=",
"c",
".",
"formatMessages",
"(",
"ctx",
",",
"res",
".",
"Messages",
",",
"conv",
",",
"selfUID",
",",
"0",
"/* readMsgID */",
",",
"false",
"/* unreadOnly */",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"thread",
":=",
"Thread",
"{",
"Offline",
":",
"res",
".",
"Offline",
",",
"IdentifyFailures",
":",
"res",
".",
"IdentifyFailures",
",",
"Messages",
":",
"messages",
",",
"}",
"\n",
"thread",
".",
"RateLimits",
".",
"RateLimits",
"=",
"c",
".",
"aggRateLimits",
"(",
"rlimits",
")",
"\n",
"return",
"Reply",
"{",
"Result",
":",
"thread",
"}",
"\n",
"}"
] | // GetV1 implements ChatServiceHandler.GetV1. | [
"GetV1",
"implements",
"ChatServiceHandler",
".",
"GetV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L420-L465 |
159,217 | keybase/client | go/client/chat_svc_handler.go | SendV1 | func (c *chatServiceHandler) SendV1(ctx context.Context, opts sendOptionsV1, ui chat1.ChatUiInterface) Reply {
arg := sendArgV1{
channel: opts.Channel,
body: chat1.NewMessageBodyWithText(chat1.MessageText{Body: opts.Message.Body}),
mtype: chat1.MessageType_TEXT,
response: "message sent",
nonblock: opts.Nonblock,
ephemeralLifetime: opts.EphemeralLifetime,
replyTo: opts.ReplyTo,
}
return c.sendV1(ctx, arg, ui)
} | go | func (c *chatServiceHandler) SendV1(ctx context.Context, opts sendOptionsV1, ui chat1.ChatUiInterface) Reply {
arg := sendArgV1{
channel: opts.Channel,
body: chat1.NewMessageBodyWithText(chat1.MessageText{Body: opts.Message.Body}),
mtype: chat1.MessageType_TEXT,
response: "message sent",
nonblock: opts.Nonblock,
ephemeralLifetime: opts.EphemeralLifetime,
replyTo: opts.ReplyTo,
}
return c.sendV1(ctx, arg, ui)
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"SendV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"sendOptionsV1",
",",
"ui",
"chat1",
".",
"ChatUiInterface",
")",
"Reply",
"{",
"arg",
":=",
"sendArgV1",
"{",
"channel",
":",
"opts",
".",
"Channel",
",",
"body",
":",
"chat1",
".",
"NewMessageBodyWithText",
"(",
"chat1",
".",
"MessageText",
"{",
"Body",
":",
"opts",
".",
"Message",
".",
"Body",
"}",
")",
",",
"mtype",
":",
"chat1",
".",
"MessageType_TEXT",
",",
"response",
":",
"\"",
"\"",
",",
"nonblock",
":",
"opts",
".",
"Nonblock",
",",
"ephemeralLifetime",
":",
"opts",
".",
"EphemeralLifetime",
",",
"replyTo",
":",
"opts",
".",
"ReplyTo",
",",
"}",
"\n",
"return",
"c",
".",
"sendV1",
"(",
"ctx",
",",
"arg",
",",
"ui",
")",
"\n",
"}"
] | // SendV1 implements ChatServiceHandler.SendV1. | [
"SendV1",
"implements",
"ChatServiceHandler",
".",
"SendV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L468-L479 |
159,218 | keybase/client | go/client/chat_svc_handler.go | DeleteV1 | func (c *chatServiceHandler) DeleteV1(ctx context.Context, opts deleteOptionsV1) Reply {
messages := []chat1.MessageID{opts.MessageID}
arg := sendArgV1{
channel: opts.Channel,
mtype: chat1.MessageType_DELETE,
supersedes: opts.MessageID,
deletes: messages,
response: "message deleted",
// NOTE: The service will fill in the IDs of edit messages that also need to be deleted.
body: chat1.NewMessageBodyWithDelete(chat1.MessageDelete{MessageIDs: messages}),
}
return c.sendV1(ctx, arg, utils.DummyChatUI{})
} | go | func (c *chatServiceHandler) DeleteV1(ctx context.Context, opts deleteOptionsV1) Reply {
messages := []chat1.MessageID{opts.MessageID}
arg := sendArgV1{
channel: opts.Channel,
mtype: chat1.MessageType_DELETE,
supersedes: opts.MessageID,
deletes: messages,
response: "message deleted",
// NOTE: The service will fill in the IDs of edit messages that also need to be deleted.
body: chat1.NewMessageBodyWithDelete(chat1.MessageDelete{MessageIDs: messages}),
}
return c.sendV1(ctx, arg, utils.DummyChatUI{})
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"DeleteV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"deleteOptionsV1",
")",
"Reply",
"{",
"messages",
":=",
"[",
"]",
"chat1",
".",
"MessageID",
"{",
"opts",
".",
"MessageID",
"}",
"\n",
"arg",
":=",
"sendArgV1",
"{",
"channel",
":",
"opts",
".",
"Channel",
",",
"mtype",
":",
"chat1",
".",
"MessageType_DELETE",
",",
"supersedes",
":",
"opts",
".",
"MessageID",
",",
"deletes",
":",
"messages",
",",
"response",
":",
"\"",
"\"",
",",
"// NOTE: The service will fill in the IDs of edit messages that also need to be deleted.",
"body",
":",
"chat1",
".",
"NewMessageBodyWithDelete",
"(",
"chat1",
".",
"MessageDelete",
"{",
"MessageIDs",
":",
"messages",
"}",
")",
",",
"}",
"\n",
"return",
"c",
".",
"sendV1",
"(",
"ctx",
",",
"arg",
",",
"utils",
".",
"DummyChatUI",
"{",
"}",
")",
"\n",
"}"
] | // DeleteV1 implements ChatServiceHandler.DeleteV1. | [
"DeleteV1",
"implements",
"ChatServiceHandler",
".",
"DeleteV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L482-L495 |
159,219 | keybase/client | go/client/chat_svc_handler.go | EditV1 | func (c *chatServiceHandler) EditV1(ctx context.Context, opts editOptionsV1) Reply {
arg := sendArgV1{
channel: opts.Channel,
body: chat1.NewMessageBodyWithEdit(chat1.MessageEdit{MessageID: opts.MessageID, Body: opts.Message.Body}),
mtype: chat1.MessageType_EDIT,
supersedes: opts.MessageID,
response: "message edited",
}
return c.sendV1(ctx, arg, utils.DummyChatUI{})
} | go | func (c *chatServiceHandler) EditV1(ctx context.Context, opts editOptionsV1) Reply {
arg := sendArgV1{
channel: opts.Channel,
body: chat1.NewMessageBodyWithEdit(chat1.MessageEdit{MessageID: opts.MessageID, Body: opts.Message.Body}),
mtype: chat1.MessageType_EDIT,
supersedes: opts.MessageID,
response: "message edited",
}
return c.sendV1(ctx, arg, utils.DummyChatUI{})
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"EditV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"editOptionsV1",
")",
"Reply",
"{",
"arg",
":=",
"sendArgV1",
"{",
"channel",
":",
"opts",
".",
"Channel",
",",
"body",
":",
"chat1",
".",
"NewMessageBodyWithEdit",
"(",
"chat1",
".",
"MessageEdit",
"{",
"MessageID",
":",
"opts",
".",
"MessageID",
",",
"Body",
":",
"opts",
".",
"Message",
".",
"Body",
"}",
")",
",",
"mtype",
":",
"chat1",
".",
"MessageType_EDIT",
",",
"supersedes",
":",
"opts",
".",
"MessageID",
",",
"response",
":",
"\"",
"\"",
",",
"}",
"\n",
"return",
"c",
".",
"sendV1",
"(",
"ctx",
",",
"arg",
",",
"utils",
".",
"DummyChatUI",
"{",
"}",
")",
"\n",
"}"
] | // EditV1 implements ChatServiceHandler.EditV1. | [
"EditV1",
"implements",
"ChatServiceHandler",
".",
"EditV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L498-L508 |
159,220 | keybase/client | go/client/chat_svc_handler.go | ReactionV1 | func (c *chatServiceHandler) ReactionV1(ctx context.Context, opts reactionOptionsV1) Reply {
arg := sendArgV1{
channel: opts.Channel,
body: chat1.NewMessageBodyWithReaction(chat1.MessageReaction{MessageID: opts.MessageID, Body: opts.Message.Body}),
mtype: chat1.MessageType_REACTION,
supersedes: opts.MessageID,
response: "message reacted to",
}
return c.sendV1(ctx, arg, utils.DummyChatUI{})
} | go | func (c *chatServiceHandler) ReactionV1(ctx context.Context, opts reactionOptionsV1) Reply {
arg := sendArgV1{
channel: opts.Channel,
body: chat1.NewMessageBodyWithReaction(chat1.MessageReaction{MessageID: opts.MessageID, Body: opts.Message.Body}),
mtype: chat1.MessageType_REACTION,
supersedes: opts.MessageID,
response: "message reacted to",
}
return c.sendV1(ctx, arg, utils.DummyChatUI{})
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"ReactionV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"reactionOptionsV1",
")",
"Reply",
"{",
"arg",
":=",
"sendArgV1",
"{",
"channel",
":",
"opts",
".",
"Channel",
",",
"body",
":",
"chat1",
".",
"NewMessageBodyWithReaction",
"(",
"chat1",
".",
"MessageReaction",
"{",
"MessageID",
":",
"opts",
".",
"MessageID",
",",
"Body",
":",
"opts",
".",
"Message",
".",
"Body",
"}",
")",
",",
"mtype",
":",
"chat1",
".",
"MessageType_REACTION",
",",
"supersedes",
":",
"opts",
".",
"MessageID",
",",
"response",
":",
"\"",
"\"",
",",
"}",
"\n",
"return",
"c",
".",
"sendV1",
"(",
"ctx",
",",
"arg",
",",
"utils",
".",
"DummyChatUI",
"{",
"}",
")",
"\n",
"}"
] | // ReactionV1 implements ChatServiceHandler.ReactionV1. | [
"ReactionV1",
"implements",
"ChatServiceHandler",
".",
"ReactionV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L511-L520 |
159,221 | keybase/client | go/client/chat_svc_handler.go | DownloadV1 | func (c *chatServiceHandler) DownloadV1(ctx context.Context, opts downloadOptionsV1,
chatUI chat1.ChatUiInterface) Reply {
if opts.NoStream && opts.Output != "-" {
return c.downloadV1NoStream(ctx, opts, chatUI)
}
var fsink Sink
if opts.Output == "-" {
fsink = &StdoutSink{}
} else {
fsink = NewFileSink(c.G(), opts.Output)
}
defer fsink.Close()
sink := c.G().XStreams.ExportWriter(fsink)
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
protocols := []rpc.Protocol{
NewStreamUIProtocol(c.G()),
chat1.ChatUiProtocol(chatUI),
}
if err := RegisterProtocolsWithContext(protocols, c.G()); err != nil {
return c.errReply(err)
}
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
arg := chat1.DownloadAttachmentLocalArg{
ConversationID: convID,
MessageID: opts.MessageID,
Sink: sink,
Preview: opts.Preview,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
}
dres, err := client.DownloadAttachmentLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
rlimits = append(rlimits, dres.RateLimits...)
if opts.Output != "-" {
if err := attachments.Quarantine(ctx, opts.Output); err != nil {
c.G().Log.Warning("failed to quarantine attachment download: %s", err)
}
}
res := SendRes{
Message: fmt.Sprintf("attachment downloaded to %s", opts.Output),
RateLimits: RateLimits{
RateLimits: c.aggRateLimits(rlimits),
},
IdentifyFailures: dres.IdentifyFailures,
}
return Reply{Result: res}
} | go | func (c *chatServiceHandler) DownloadV1(ctx context.Context, opts downloadOptionsV1,
chatUI chat1.ChatUiInterface) Reply {
if opts.NoStream && opts.Output != "-" {
return c.downloadV1NoStream(ctx, opts, chatUI)
}
var fsink Sink
if opts.Output == "-" {
fsink = &StdoutSink{}
} else {
fsink = NewFileSink(c.G(), opts.Output)
}
defer fsink.Close()
sink := c.G().XStreams.ExportWriter(fsink)
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
protocols := []rpc.Protocol{
NewStreamUIProtocol(c.G()),
chat1.ChatUiProtocol(chatUI),
}
if err := RegisterProtocolsWithContext(protocols, c.G()); err != nil {
return c.errReply(err)
}
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
arg := chat1.DownloadAttachmentLocalArg{
ConversationID: convID,
MessageID: opts.MessageID,
Sink: sink,
Preview: opts.Preview,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
}
dres, err := client.DownloadAttachmentLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
rlimits = append(rlimits, dres.RateLimits...)
if opts.Output != "-" {
if err := attachments.Quarantine(ctx, opts.Output); err != nil {
c.G().Log.Warning("failed to quarantine attachment download: %s", err)
}
}
res := SendRes{
Message: fmt.Sprintf("attachment downloaded to %s", opts.Output),
RateLimits: RateLimits{
RateLimits: c.aggRateLimits(rlimits),
},
IdentifyFailures: dres.IdentifyFailures,
}
return Reply{Result: res}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"DownloadV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"downloadOptionsV1",
",",
"chatUI",
"chat1",
".",
"ChatUiInterface",
")",
"Reply",
"{",
"if",
"opts",
".",
"NoStream",
"&&",
"opts",
".",
"Output",
"!=",
"\"",
"\"",
"{",
"return",
"c",
".",
"downloadV1NoStream",
"(",
"ctx",
",",
"opts",
",",
"chatUI",
")",
"\n",
"}",
"\n",
"var",
"fsink",
"Sink",
"\n",
"if",
"opts",
".",
"Output",
"==",
"\"",
"\"",
"{",
"fsink",
"=",
"&",
"StdoutSink",
"{",
"}",
"\n",
"}",
"else",
"{",
"fsink",
"=",
"NewFileSink",
"(",
"c",
".",
"G",
"(",
")",
",",
"opts",
".",
"Output",
")",
"\n",
"}",
"\n",
"defer",
"fsink",
".",
"Close",
"(",
")",
"\n",
"sink",
":=",
"c",
".",
"G",
"(",
")",
".",
"XStreams",
".",
"ExportWriter",
"(",
"fsink",
")",
"\n\n",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"protocols",
":=",
"[",
"]",
"rpc",
".",
"Protocol",
"{",
"NewStreamUIProtocol",
"(",
"c",
".",
"G",
"(",
")",
")",
",",
"chat1",
".",
"ChatUiProtocol",
"(",
"chatUI",
")",
",",
"}",
"\n",
"if",
"err",
":=",
"RegisterProtocolsWithContext",
"(",
"protocols",
",",
"c",
".",
"G",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"convID",
",",
"rlimits",
",",
"err",
":=",
"c",
".",
"resolveAPIConvID",
"(",
"ctx",
",",
"opts",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"arg",
":=",
"chat1",
".",
"DownloadAttachmentLocalArg",
"{",
"ConversationID",
":",
"convID",
",",
"MessageID",
":",
"opts",
".",
"MessageID",
",",
"Sink",
":",
"sink",
",",
"Preview",
":",
"opts",
".",
"Preview",
",",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_CLI",
",",
"}",
"\n\n",
"dres",
",",
"err",
":=",
"client",
".",
"DownloadAttachmentLocal",
"(",
"ctx",
",",
"arg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"rlimits",
"=",
"append",
"(",
"rlimits",
",",
"dres",
".",
"RateLimits",
"...",
")",
"\n",
"if",
"opts",
".",
"Output",
"!=",
"\"",
"\"",
"{",
"if",
"err",
":=",
"attachments",
".",
"Quarantine",
"(",
"ctx",
",",
"opts",
".",
"Output",
")",
";",
"err",
"!=",
"nil",
"{",
"c",
".",
"G",
"(",
")",
".",
"Log",
".",
"Warning",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"res",
":=",
"SendRes",
"{",
"Message",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"opts",
".",
"Output",
")",
",",
"RateLimits",
":",
"RateLimits",
"{",
"RateLimits",
":",
"c",
".",
"aggRateLimits",
"(",
"rlimits",
")",
",",
"}",
",",
"IdentifyFailures",
":",
"dres",
".",
"IdentifyFailures",
",",
"}",
"\n\n",
"return",
"Reply",
"{",
"Result",
":",
"res",
"}",
"\n",
"}"
] | // DownloadV1 implements ChatServiceHandler.DownloadV1. | [
"DownloadV1",
"implements",
"ChatServiceHandler",
".",
"DownloadV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L611-L670 |
159,222 | keybase/client | go/client/chat_svc_handler.go | downloadV1NoStream | func (c *chatServiceHandler) downloadV1NoStream(ctx context.Context, opts downloadOptionsV1,
chatUI chat1.ChatUiInterface) Reply {
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
protocols := []rpc.Protocol{
NewStreamUIProtocol(c.G()),
chat1.ChatUiProtocol(chatUI),
}
if err := RegisterProtocolsWithContext(protocols, c.G()); err != nil {
return c.errReply(err)
}
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
arg := chat1.DownloadFileAttachmentLocalArg{
ConversationID: convID,
MessageID: opts.MessageID,
Preview: opts.Preview,
Filename: opts.Output,
}
dres, err := client.DownloadFileAttachmentLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
rlimits = append(rlimits, dres.RateLimits...)
res := SendRes{
Message: fmt.Sprintf("attachment downloaded to %s", opts.Output),
RateLimits: RateLimits{
RateLimits: c.aggRateLimits(rlimits),
},
}
return Reply{Result: res}
} | go | func (c *chatServiceHandler) downloadV1NoStream(ctx context.Context, opts downloadOptionsV1,
chatUI chat1.ChatUiInterface) Reply {
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
protocols := []rpc.Protocol{
NewStreamUIProtocol(c.G()),
chat1.ChatUiProtocol(chatUI),
}
if err := RegisterProtocolsWithContext(protocols, c.G()); err != nil {
return c.errReply(err)
}
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
arg := chat1.DownloadFileAttachmentLocalArg{
ConversationID: convID,
MessageID: opts.MessageID,
Preview: opts.Preview,
Filename: opts.Output,
}
dres, err := client.DownloadFileAttachmentLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
rlimits = append(rlimits, dres.RateLimits...)
res := SendRes{
Message: fmt.Sprintf("attachment downloaded to %s", opts.Output),
RateLimits: RateLimits{
RateLimits: c.aggRateLimits(rlimits),
},
}
return Reply{Result: res}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"downloadV1NoStream",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"downloadOptionsV1",
",",
"chatUI",
"chat1",
".",
"ChatUiInterface",
")",
"Reply",
"{",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"protocols",
":=",
"[",
"]",
"rpc",
".",
"Protocol",
"{",
"NewStreamUIProtocol",
"(",
"c",
".",
"G",
"(",
")",
")",
",",
"chat1",
".",
"ChatUiProtocol",
"(",
"chatUI",
")",
",",
"}",
"\n",
"if",
"err",
":=",
"RegisterProtocolsWithContext",
"(",
"protocols",
",",
"c",
".",
"G",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"convID",
",",
"rlimits",
",",
"err",
":=",
"c",
".",
"resolveAPIConvID",
"(",
"ctx",
",",
"opts",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"arg",
":=",
"chat1",
".",
"DownloadFileAttachmentLocalArg",
"{",
"ConversationID",
":",
"convID",
",",
"MessageID",
":",
"opts",
".",
"MessageID",
",",
"Preview",
":",
"opts",
".",
"Preview",
",",
"Filename",
":",
"opts",
".",
"Output",
",",
"}",
"\n\n",
"dres",
",",
"err",
":=",
"client",
".",
"DownloadFileAttachmentLocal",
"(",
"ctx",
",",
"arg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"rlimits",
"=",
"append",
"(",
"rlimits",
",",
"dres",
".",
"RateLimits",
"...",
")",
"\n\n",
"res",
":=",
"SendRes",
"{",
"Message",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"opts",
".",
"Output",
")",
",",
"RateLimits",
":",
"RateLimits",
"{",
"RateLimits",
":",
"c",
".",
"aggRateLimits",
"(",
"rlimits",
")",
",",
"}",
",",
"}",
"\n\n",
"return",
"Reply",
"{",
"Result",
":",
"res",
"}",
"\n",
"}"
] | // downloadV1NoStream uses DownloadFileAttachmentLocal instead of DownloadAttachmentLocal. | [
"downloadV1NoStream",
"uses",
"DownloadFileAttachmentLocal",
"instead",
"of",
"DownloadAttachmentLocal",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L673-L713 |
159,223 | keybase/client | go/client/chat_svc_handler.go | SetStatusV1 | func (c *chatServiceHandler) SetStatusV1(ctx context.Context, opts setStatusOptionsV1) Reply {
var rlimits []chat1.RateLimit
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
status, ok := chat1.ConversationStatusMap[strings.ToUpper(opts.Status)]
if !ok {
return c.errReply(fmt.Errorf("unsupported status: '%v'", opts.Status))
}
setStatusArg := chat1.SetConversationStatusLocalArg{
ConversationID: convID,
Status: status,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
}
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
localRes, err := client.SetConversationStatusLocal(ctx, setStatusArg)
if err != nil {
return c.errReply(err)
}
rlimits = append(rlimits, localRes.RateLimits...)
res := EmptyRes{
RateLimits: RateLimits{
c.aggRateLimits(rlimits),
},
}
return Reply{Result: res}
} | go | func (c *chatServiceHandler) SetStatusV1(ctx context.Context, opts setStatusOptionsV1) Reply {
var rlimits []chat1.RateLimit
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
status, ok := chat1.ConversationStatusMap[strings.ToUpper(opts.Status)]
if !ok {
return c.errReply(fmt.Errorf("unsupported status: '%v'", opts.Status))
}
setStatusArg := chat1.SetConversationStatusLocalArg{
ConversationID: convID,
Status: status,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
}
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
localRes, err := client.SetConversationStatusLocal(ctx, setStatusArg)
if err != nil {
return c.errReply(err)
}
rlimits = append(rlimits, localRes.RateLimits...)
res := EmptyRes{
RateLimits: RateLimits{
c.aggRateLimits(rlimits),
},
}
return Reply{Result: res}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"SetStatusV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"setStatusOptionsV1",
")",
"Reply",
"{",
"var",
"rlimits",
"[",
"]",
"chat1",
".",
"RateLimit",
"\n\n",
"convID",
",",
"rlimits",
",",
"err",
":=",
"c",
".",
"resolveAPIConvID",
"(",
"ctx",
",",
"opts",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"status",
",",
"ok",
":=",
"chat1",
".",
"ConversationStatusMap",
"[",
"strings",
".",
"ToUpper",
"(",
"opts",
".",
"Status",
")",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"c",
".",
"errReply",
"(",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"opts",
".",
"Status",
")",
")",
"\n",
"}",
"\n\n",
"setStatusArg",
":=",
"chat1",
".",
"SetConversationStatusLocalArg",
"{",
"ConversationID",
":",
"convID",
",",
"Status",
":",
"status",
",",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_CLI",
",",
"}",
"\n\n",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"localRes",
",",
"err",
":=",
"client",
".",
"SetConversationStatusLocal",
"(",
"ctx",
",",
"setStatusArg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"rlimits",
"=",
"append",
"(",
"rlimits",
",",
"localRes",
".",
"RateLimits",
"...",
")",
"\n\n",
"res",
":=",
"EmptyRes",
"{",
"RateLimits",
":",
"RateLimits",
"{",
"c",
".",
"aggRateLimits",
"(",
"rlimits",
")",
",",
"}",
",",
"}",
"\n",
"return",
"Reply",
"{",
"Result",
":",
"res",
"}",
"\n",
"}"
] | // SetStatusV1 implements ChatServiceHandler.SetStatusV1. | [
"SetStatusV1",
"implements",
"ChatServiceHandler",
".",
"SetStatusV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L716-L750 |
159,224 | keybase/client | go/client/chat_svc_handler.go | MarkV1 | func (c *chatServiceHandler) MarkV1(ctx context.Context, opts markOptionsV1) Reply {
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
arg := chat1.MarkAsReadLocalArg{
ConversationID: convID,
MsgID: opts.MessageID,
}
res, err := client.MarkAsReadLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
allLimits := append(rlimits, res.RateLimits...)
cres := EmptyRes{
RateLimits: RateLimits{
c.aggRateLimits(allLimits),
},
}
return Reply{Result: cres}
} | go | func (c *chatServiceHandler) MarkV1(ctx context.Context, opts markOptionsV1) Reply {
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
arg := chat1.MarkAsReadLocalArg{
ConversationID: convID,
MsgID: opts.MessageID,
}
res, err := client.MarkAsReadLocal(ctx, arg)
if err != nil {
return c.errReply(err)
}
allLimits := append(rlimits, res.RateLimits...)
cres := EmptyRes{
RateLimits: RateLimits{
c.aggRateLimits(allLimits),
},
}
return Reply{Result: cres}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"MarkV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"markOptionsV1",
")",
"Reply",
"{",
"convID",
",",
"rlimits",
",",
"err",
":=",
"c",
".",
"resolveAPIConvID",
"(",
"ctx",
",",
"opts",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"arg",
":=",
"chat1",
".",
"MarkAsReadLocalArg",
"{",
"ConversationID",
":",
"convID",
",",
"MsgID",
":",
"opts",
".",
"MessageID",
",",
"}",
"\n\n",
"res",
",",
"err",
":=",
"client",
".",
"MarkAsReadLocal",
"(",
"ctx",
",",
"arg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"allLimits",
":=",
"append",
"(",
"rlimits",
",",
"res",
".",
"RateLimits",
"...",
")",
"\n",
"cres",
":=",
"EmptyRes",
"{",
"RateLimits",
":",
"RateLimits",
"{",
"c",
".",
"aggRateLimits",
"(",
"allLimits",
")",
",",
"}",
",",
"}",
"\n",
"return",
"Reply",
"{",
"Result",
":",
"cres",
"}",
"\n",
"}"
] | // MarkV1 implements ChatServiceHandler.MarkV1. | [
"MarkV1",
"implements",
"ChatServiceHandler",
".",
"MarkV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L753-L781 |
159,225 | keybase/client | go/client/chat_svc_handler.go | SearchInboxV1 | func (c *chatServiceHandler) SearchInboxV1(ctx context.Context, opts searchInboxOptionsV1) Reply {
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
if opts.MaxHits <= 0 {
opts.MaxHits = 10
}
reindexMode := chat1.ReIndexingMode_NONE
if opts.ForceReindex {
reindexMode = chat1.ReIndexingMode_PRESEARCH_SYNC
}
searchOpts := chat1.SearchOpts{
ReindexMode: reindexMode,
SentBy: opts.SentBy,
MaxHits: opts.MaxHits,
BeforeContext: opts.BeforeContext,
AfterContext: opts.AfterContext,
}
if opts.SentBefore != "" && opts.SentAfter != "" {
err := fmt.Errorf("Only one of `sent_before` and `sent_after` can be specified")
return c.errReply(err)
}
if opts.SentBefore != "" {
sentBefore, err := dateparse.ParseAny(opts.SentBefore)
if err != nil {
return c.errReply(err)
}
searchOpts.SentBefore = gregor1.ToTime(sentBefore)
}
if opts.SentAfter != "" {
sentAfter, err := dateparse.ParseAny(opts.SentAfter)
if err != nil {
return c.errReply(err)
}
searchOpts.SentAfter = gregor1.ToTime(sentAfter)
}
arg := chat1.SearchInboxArg{
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
Query: opts.Query,
Opts: searchOpts,
}
res, err := client.SearchInbox(ctx, arg)
if err != nil {
return c.errReply(err)
}
searchRes := SearchInboxRes{
Results: res.Res,
RateLimits: RateLimits{
c.aggRateLimits(res.RateLimits),
},
IdentifyFailures: res.IdentifyFailures,
}
return Reply{Result: searchRes}
} | go | func (c *chatServiceHandler) SearchInboxV1(ctx context.Context, opts searchInboxOptionsV1) Reply {
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
if opts.MaxHits <= 0 {
opts.MaxHits = 10
}
reindexMode := chat1.ReIndexingMode_NONE
if opts.ForceReindex {
reindexMode = chat1.ReIndexingMode_PRESEARCH_SYNC
}
searchOpts := chat1.SearchOpts{
ReindexMode: reindexMode,
SentBy: opts.SentBy,
MaxHits: opts.MaxHits,
BeforeContext: opts.BeforeContext,
AfterContext: opts.AfterContext,
}
if opts.SentBefore != "" && opts.SentAfter != "" {
err := fmt.Errorf("Only one of `sent_before` and `sent_after` can be specified")
return c.errReply(err)
}
if opts.SentBefore != "" {
sentBefore, err := dateparse.ParseAny(opts.SentBefore)
if err != nil {
return c.errReply(err)
}
searchOpts.SentBefore = gregor1.ToTime(sentBefore)
}
if opts.SentAfter != "" {
sentAfter, err := dateparse.ParseAny(opts.SentAfter)
if err != nil {
return c.errReply(err)
}
searchOpts.SentAfter = gregor1.ToTime(sentAfter)
}
arg := chat1.SearchInboxArg{
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
Query: opts.Query,
Opts: searchOpts,
}
res, err := client.SearchInbox(ctx, arg)
if err != nil {
return c.errReply(err)
}
searchRes := SearchInboxRes{
Results: res.Res,
RateLimits: RateLimits{
c.aggRateLimits(res.RateLimits),
},
IdentifyFailures: res.IdentifyFailures,
}
return Reply{Result: searchRes}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"SearchInboxV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"searchInboxOptionsV1",
")",
"Reply",
"{",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"if",
"opts",
".",
"MaxHits",
"<=",
"0",
"{",
"opts",
".",
"MaxHits",
"=",
"10",
"\n",
"}",
"\n\n",
"reindexMode",
":=",
"chat1",
".",
"ReIndexingMode_NONE",
"\n",
"if",
"opts",
".",
"ForceReindex",
"{",
"reindexMode",
"=",
"chat1",
".",
"ReIndexingMode_PRESEARCH_SYNC",
"\n",
"}",
"\n",
"searchOpts",
":=",
"chat1",
".",
"SearchOpts",
"{",
"ReindexMode",
":",
"reindexMode",
",",
"SentBy",
":",
"opts",
".",
"SentBy",
",",
"MaxHits",
":",
"opts",
".",
"MaxHits",
",",
"BeforeContext",
":",
"opts",
".",
"BeforeContext",
",",
"AfterContext",
":",
"opts",
".",
"AfterContext",
",",
"}",
"\n\n",
"if",
"opts",
".",
"SentBefore",
"!=",
"\"",
"\"",
"&&",
"opts",
".",
"SentAfter",
"!=",
"\"",
"\"",
"{",
"err",
":=",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"opts",
".",
"SentBefore",
"!=",
"\"",
"\"",
"{",
"sentBefore",
",",
"err",
":=",
"dateparse",
".",
"ParseAny",
"(",
"opts",
".",
"SentBefore",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"searchOpts",
".",
"SentBefore",
"=",
"gregor1",
".",
"ToTime",
"(",
"sentBefore",
")",
"\n",
"}",
"\n",
"if",
"opts",
".",
"SentAfter",
"!=",
"\"",
"\"",
"{",
"sentAfter",
",",
"err",
":=",
"dateparse",
".",
"ParseAny",
"(",
"opts",
".",
"SentAfter",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"searchOpts",
".",
"SentAfter",
"=",
"gregor1",
".",
"ToTime",
"(",
"sentAfter",
")",
"\n",
"}",
"\n\n",
"arg",
":=",
"chat1",
".",
"SearchInboxArg",
"{",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_CLI",
",",
"Query",
":",
"opts",
".",
"Query",
",",
"Opts",
":",
"searchOpts",
",",
"}",
"\n\n",
"res",
",",
"err",
":=",
"client",
".",
"SearchInbox",
"(",
"ctx",
",",
"arg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"searchRes",
":=",
"SearchInboxRes",
"{",
"Results",
":",
"res",
".",
"Res",
",",
"RateLimits",
":",
"RateLimits",
"{",
"c",
".",
"aggRateLimits",
"(",
"res",
".",
"RateLimits",
")",
",",
"}",
",",
"IdentifyFailures",
":",
"res",
".",
"IdentifyFailures",
",",
"}",
"\n",
"return",
"Reply",
"{",
"Result",
":",
"searchRes",
"}",
"\n",
"}"
] | // SearchInbox implements ChatServiceHandler.SearchInboxV1. | [
"SearchInbox",
"implements",
"ChatServiceHandler",
".",
"SearchInboxV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L784-L844 |
159,226 | keybase/client | go/client/chat_svc_handler.go | SearchRegexpV1 | func (c *chatServiceHandler) SearchRegexpV1(ctx context.Context, opts searchRegexpOptionsV1) Reply {
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
if opts.MaxHits <= 0 {
opts.MaxHits = 10
}
if opts.MaxMessages <= 0 {
opts.MaxMessages = 10000
}
searchOpts := chat1.SearchOpts{
SentBy: opts.SentBy,
MaxHits: opts.MaxHits,
MaxMessages: opts.MaxMessages,
BeforeContext: opts.BeforeContext,
AfterContext: opts.AfterContext,
}
if opts.SentBefore != "" && opts.SentAfter != "" {
err := fmt.Errorf("Only one of `sent_before` and `sent_after` can be specified")
return c.errReply(err)
}
if opts.SentBefore != "" {
sentBefore, err := dateparse.ParseAny(opts.SentBefore)
if err != nil {
return c.errReply(err)
}
searchOpts.SentBefore = gregor1.ToTime(sentBefore)
}
if opts.SentAfter != "" {
sentAfter, err := dateparse.ParseAny(opts.SentAfter)
if err != nil {
return c.errReply(err)
}
searchOpts.SentAfter = gregor1.ToTime(sentAfter)
}
searchOpts.IsRegex = opts.IsRegex
arg := chat1.SearchRegexpArg{
ConvID: convID,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
Query: opts.Query,
Opts: searchOpts,
}
res, err := client.SearchRegexp(ctx, arg)
if err != nil {
return c.errReply(err)
}
allLimits := append(rlimits, res.RateLimits...)
searchRes := SearchRegexpRes{
Hits: res.Hits,
RateLimits: RateLimits{
c.aggRateLimits(allLimits),
},
IdentifyFailures: res.IdentifyFailures,
}
return Reply{Result: searchRes}
} | go | func (c *chatServiceHandler) SearchRegexpV1(ctx context.Context, opts searchRegexpOptionsV1) Reply {
convID, rlimits, err := c.resolveAPIConvID(ctx, opts.Channel)
if err != nil {
return c.errReply(err)
}
client, err := GetChatLocalClient(c.G())
if err != nil {
return c.errReply(err)
}
if opts.MaxHits <= 0 {
opts.MaxHits = 10
}
if opts.MaxMessages <= 0 {
opts.MaxMessages = 10000
}
searchOpts := chat1.SearchOpts{
SentBy: opts.SentBy,
MaxHits: opts.MaxHits,
MaxMessages: opts.MaxMessages,
BeforeContext: opts.BeforeContext,
AfterContext: opts.AfterContext,
}
if opts.SentBefore != "" && opts.SentAfter != "" {
err := fmt.Errorf("Only one of `sent_before` and `sent_after` can be specified")
return c.errReply(err)
}
if opts.SentBefore != "" {
sentBefore, err := dateparse.ParseAny(opts.SentBefore)
if err != nil {
return c.errReply(err)
}
searchOpts.SentBefore = gregor1.ToTime(sentBefore)
}
if opts.SentAfter != "" {
sentAfter, err := dateparse.ParseAny(opts.SentAfter)
if err != nil {
return c.errReply(err)
}
searchOpts.SentAfter = gregor1.ToTime(sentAfter)
}
searchOpts.IsRegex = opts.IsRegex
arg := chat1.SearchRegexpArg{
ConvID: convID,
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_CLI,
Query: opts.Query,
Opts: searchOpts,
}
res, err := client.SearchRegexp(ctx, arg)
if err != nil {
return c.errReply(err)
}
allLimits := append(rlimits, res.RateLimits...)
searchRes := SearchRegexpRes{
Hits: res.Hits,
RateLimits: RateLimits{
c.aggRateLimits(allLimits),
},
IdentifyFailures: res.IdentifyFailures,
}
return Reply{Result: searchRes}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"SearchRegexpV1",
"(",
"ctx",
"context",
".",
"Context",
",",
"opts",
"searchRegexpOptionsV1",
")",
"Reply",
"{",
"convID",
",",
"rlimits",
",",
"err",
":=",
"c",
".",
"resolveAPIConvID",
"(",
"ctx",
",",
"opts",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"client",
",",
"err",
":=",
"GetChatLocalClient",
"(",
"c",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"if",
"opts",
".",
"MaxHits",
"<=",
"0",
"{",
"opts",
".",
"MaxHits",
"=",
"10",
"\n",
"}",
"\n\n",
"if",
"opts",
".",
"MaxMessages",
"<=",
"0",
"{",
"opts",
".",
"MaxMessages",
"=",
"10000",
"\n",
"}",
"\n\n",
"searchOpts",
":=",
"chat1",
".",
"SearchOpts",
"{",
"SentBy",
":",
"opts",
".",
"SentBy",
",",
"MaxHits",
":",
"opts",
".",
"MaxHits",
",",
"MaxMessages",
":",
"opts",
".",
"MaxMessages",
",",
"BeforeContext",
":",
"opts",
".",
"BeforeContext",
",",
"AfterContext",
":",
"opts",
".",
"AfterContext",
",",
"}",
"\n\n",
"if",
"opts",
".",
"SentBefore",
"!=",
"\"",
"\"",
"&&",
"opts",
".",
"SentAfter",
"!=",
"\"",
"\"",
"{",
"err",
":=",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"opts",
".",
"SentBefore",
"!=",
"\"",
"\"",
"{",
"sentBefore",
",",
"err",
":=",
"dateparse",
".",
"ParseAny",
"(",
"opts",
".",
"SentBefore",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"searchOpts",
".",
"SentBefore",
"=",
"gregor1",
".",
"ToTime",
"(",
"sentBefore",
")",
"\n",
"}",
"\n",
"if",
"opts",
".",
"SentAfter",
"!=",
"\"",
"\"",
"{",
"sentAfter",
",",
"err",
":=",
"dateparse",
".",
"ParseAny",
"(",
"opts",
".",
"SentAfter",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n",
"searchOpts",
".",
"SentAfter",
"=",
"gregor1",
".",
"ToTime",
"(",
"sentAfter",
")",
"\n",
"}",
"\n",
"searchOpts",
".",
"IsRegex",
"=",
"opts",
".",
"IsRegex",
"\n\n",
"arg",
":=",
"chat1",
".",
"SearchRegexpArg",
"{",
"ConvID",
":",
"convID",
",",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_CLI",
",",
"Query",
":",
"opts",
".",
"Query",
",",
"Opts",
":",
"searchOpts",
",",
"}",
"\n\n",
"res",
",",
"err",
":=",
"client",
".",
"SearchRegexp",
"(",
"ctx",
",",
"arg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"c",
".",
"errReply",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"allLimits",
":=",
"append",
"(",
"rlimits",
",",
"res",
".",
"RateLimits",
"...",
")",
"\n",
"searchRes",
":=",
"SearchRegexpRes",
"{",
"Hits",
":",
"res",
".",
"Hits",
",",
"RateLimits",
":",
"RateLimits",
"{",
"c",
".",
"aggRateLimits",
"(",
"allLimits",
")",
",",
"}",
",",
"IdentifyFailures",
":",
"res",
".",
"IdentifyFailures",
",",
"}",
"\n",
"return",
"Reply",
"{",
"Result",
":",
"searchRes",
"}",
"\n",
"}"
] | // SearchRegexpV1 implements ChatServiceHandler.SearchRegexpV1. | [
"SearchRegexpV1",
"implements",
"ChatServiceHandler",
".",
"SearchRegexpV1",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L847-L915 |
159,227 | keybase/client | go/client/chat_svc_handler.go | convertMsgBody | func (c *chatServiceHandler) convertMsgBody(mb chat1.MessageBody) MsgContent {
return MsgContent{
TypeName: strings.ToLower(chat1.MessageTypeRevMap[mb.MessageType__]),
Text: mb.Text__,
Attachment: mb.Attachment__,
Edit: mb.Edit__,
Reaction: mb.Reaction__,
Delete: mb.Delete__,
Metadata: mb.Metadata__,
Headline: mb.Headline__,
AttachmentUploaded: mb.Attachmentuploaded__,
System: mb.System__,
SendPayment: mb.Sendpayment__,
RequestPayment: mb.Requestpayment__,
Unfurl: mb.Unfurl__,
Flip: mb.Flip__,
}
} | go | func (c *chatServiceHandler) convertMsgBody(mb chat1.MessageBody) MsgContent {
return MsgContent{
TypeName: strings.ToLower(chat1.MessageTypeRevMap[mb.MessageType__]),
Text: mb.Text__,
Attachment: mb.Attachment__,
Edit: mb.Edit__,
Reaction: mb.Reaction__,
Delete: mb.Delete__,
Metadata: mb.Metadata__,
Headline: mb.Headline__,
AttachmentUploaded: mb.Attachmentuploaded__,
System: mb.System__,
SendPayment: mb.Sendpayment__,
RequestPayment: mb.Requestpayment__,
Unfurl: mb.Unfurl__,
Flip: mb.Flip__,
}
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"convertMsgBody",
"(",
"mb",
"chat1",
".",
"MessageBody",
")",
"MsgContent",
"{",
"return",
"MsgContent",
"{",
"TypeName",
":",
"strings",
".",
"ToLower",
"(",
"chat1",
".",
"MessageTypeRevMap",
"[",
"mb",
".",
"MessageType__",
"]",
")",
",",
"Text",
":",
"mb",
".",
"Text__",
",",
"Attachment",
":",
"mb",
".",
"Attachment__",
",",
"Edit",
":",
"mb",
".",
"Edit__",
",",
"Reaction",
":",
"mb",
".",
"Reaction__",
",",
"Delete",
":",
"mb",
".",
"Delete__",
",",
"Metadata",
":",
"mb",
".",
"Metadata__",
",",
"Headline",
":",
"mb",
".",
"Headline__",
",",
"AttachmentUploaded",
":",
"mb",
".",
"Attachmentuploaded__",
",",
"System",
":",
"mb",
".",
"System__",
",",
"SendPayment",
":",
"mb",
".",
"Sendpayment__",
",",
"RequestPayment",
":",
"mb",
".",
"Requestpayment__",
",",
"Unfurl",
":",
"mb",
".",
"Unfurl__",
",",
"Flip",
":",
"mb",
".",
"Flip__",
",",
"}",
"\n",
"}"
] | // need this to get message type name | [
"need",
"this",
"to",
"get",
"message",
"type",
"name"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L1147-L1164 |
159,228 | keybase/client | go/client/chat_svc_handler.go | resolveAPIConvID | func (c *chatServiceHandler) resolveAPIConvID(ctx context.Context, channel ChatChannel) (chat1.ConversationID, []chat1.RateLimit, error) {
conv, limits, err := c.findConversation(ctx, channel)
if err != nil {
return chat1.ConversationID{}, nil, err
}
return conv.Info.Id, limits, nil
} | go | func (c *chatServiceHandler) resolveAPIConvID(ctx context.Context, channel ChatChannel) (chat1.ConversationID, []chat1.RateLimit, error) {
conv, limits, err := c.findConversation(ctx, channel)
if err != nil {
return chat1.ConversationID{}, nil, err
}
return conv.Info.Id, limits, nil
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"resolveAPIConvID",
"(",
"ctx",
"context",
".",
"Context",
",",
"channel",
"ChatChannel",
")",
"(",
"chat1",
".",
"ConversationID",
",",
"[",
"]",
"chat1",
".",
"RateLimit",
",",
"error",
")",
"{",
"conv",
",",
"limits",
",",
"err",
":=",
"c",
".",
"findConversation",
"(",
"ctx",
",",
"channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"chat1",
".",
"ConversationID",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"conv",
".",
"Info",
".",
"Id",
",",
"limits",
",",
"nil",
"\n",
"}"
] | // Resolve the ConvID of the specified conversation.
// Uses tlfclient and GetInboxAndUnboxLocal's ConversationsUnverified. | [
"Resolve",
"the",
"ConvID",
"of",
"the",
"specified",
"conversation",
".",
"Uses",
"tlfclient",
"and",
"GetInboxAndUnboxLocal",
"s",
"ConversationsUnverified",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L1208-L1214 |
159,229 | keybase/client | go/client/chat_svc_handler.go | findConversation | func (c *chatServiceHandler) findConversation(ctx context.Context, channel ChatChannel) (chat1.ConversationLocal, []chat1.RateLimit, error) {
var conv chat1.ConversationLocal
var rlimits []chat1.RateLimit
if channel.IsNil() {
return conv, rlimits, errors.New("missing conversation specificer")
}
existing, existingRl, err := c.getExistingConvs(ctx, channel)
if err != nil {
return conv, rlimits, err
}
rlimits = append(rlimits, existingRl...)
if len(existing) > 1 {
return conv, rlimits, fmt.Errorf("multiple conversations matched %q", channel.Name)
}
if len(existing) == 0 {
return conv, rlimits, fmt.Errorf("no conversations matched %q", channel.Name)
}
return existing[0], rlimits, nil
} | go | func (c *chatServiceHandler) findConversation(ctx context.Context, channel ChatChannel) (chat1.ConversationLocal, []chat1.RateLimit, error) {
var conv chat1.ConversationLocal
var rlimits []chat1.RateLimit
if channel.IsNil() {
return conv, rlimits, errors.New("missing conversation specificer")
}
existing, existingRl, err := c.getExistingConvs(ctx, channel)
if err != nil {
return conv, rlimits, err
}
rlimits = append(rlimits, existingRl...)
if len(existing) > 1 {
return conv, rlimits, fmt.Errorf("multiple conversations matched %q", channel.Name)
}
if len(existing) == 0 {
return conv, rlimits, fmt.Errorf("no conversations matched %q", channel.Name)
}
return existing[0], rlimits, nil
} | [
"func",
"(",
"c",
"*",
"chatServiceHandler",
")",
"findConversation",
"(",
"ctx",
"context",
".",
"Context",
",",
"channel",
"ChatChannel",
")",
"(",
"chat1",
".",
"ConversationLocal",
",",
"[",
"]",
"chat1",
".",
"RateLimit",
",",
"error",
")",
"{",
"var",
"conv",
"chat1",
".",
"ConversationLocal",
"\n",
"var",
"rlimits",
"[",
"]",
"chat1",
".",
"RateLimit",
"\n\n",
"if",
"channel",
".",
"IsNil",
"(",
")",
"{",
"return",
"conv",
",",
"rlimits",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"existing",
",",
"existingRl",
",",
"err",
":=",
"c",
".",
"getExistingConvs",
"(",
"ctx",
",",
"channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"conv",
",",
"rlimits",
",",
"err",
"\n",
"}",
"\n",
"rlimits",
"=",
"append",
"(",
"rlimits",
",",
"existingRl",
"...",
")",
"\n\n",
"if",
"len",
"(",
"existing",
")",
">",
"1",
"{",
"return",
"conv",
",",
"rlimits",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"channel",
".",
"Name",
")",
"\n",
"}",
"\n",
"if",
"len",
"(",
"existing",
")",
"==",
"0",
"{",
"return",
"conv",
",",
"rlimits",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"channel",
".",
"Name",
")",
"\n",
"}",
"\n\n",
"return",
"existing",
"[",
"0",
"]",
",",
"rlimits",
",",
"nil",
"\n",
"}"
] | // findConversation finds a conversation.
// Uses tlfclient and GetInboxAndUnboxLocal's ConversationsUnverified. | [
"findConversation",
"finds",
"a",
"conversation",
".",
"Uses",
"tlfclient",
"and",
"GetInboxAndUnboxLocal",
"s",
"ConversationsUnverified",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/chat_svc_handler.go#L1218-L1240 |
159,230 | keybase/client | go/kbfs/libcontext/delayed_cancellation.go | NewContextWithReplayFrom | func NewContextWithReplayFrom(ctx context.Context) (context.Context, error) {
if replays, ok := ctx.Value(CtxReplayKey).([]CtxReplayFunc); ok {
newCtx := context.Background()
for _, replay := range replays {
newCtx = replay(newCtx)
}
replays, _ := ctx.Value(CtxReplayKey).([]CtxReplayFunc)
newCtx = context.WithValue(newCtx, CtxReplayKey, replays)
return newCtx, nil
}
return nil, CtxNotReplayableError{}
} | go | func NewContextWithReplayFrom(ctx context.Context) (context.Context, error) {
if replays, ok := ctx.Value(CtxReplayKey).([]CtxReplayFunc); ok {
newCtx := context.Background()
for _, replay := range replays {
newCtx = replay(newCtx)
}
replays, _ := ctx.Value(CtxReplayKey).([]CtxReplayFunc)
newCtx = context.WithValue(newCtx, CtxReplayKey, replays)
return newCtx, nil
}
return nil, CtxNotReplayableError{}
} | [
"func",
"NewContextWithReplayFrom",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"context",
".",
"Context",
",",
"error",
")",
"{",
"if",
"replays",
",",
"ok",
":=",
"ctx",
".",
"Value",
"(",
"CtxReplayKey",
")",
".",
"(",
"[",
"]",
"CtxReplayFunc",
")",
";",
"ok",
"{",
"newCtx",
":=",
"context",
".",
"Background",
"(",
")",
"\n",
"for",
"_",
",",
"replay",
":=",
"range",
"replays",
"{",
"newCtx",
"=",
"replay",
"(",
"newCtx",
")",
"\n",
"}",
"\n",
"replays",
",",
"_",
":=",
"ctx",
".",
"Value",
"(",
"CtxReplayKey",
")",
".",
"(",
"[",
"]",
"CtxReplayFunc",
")",
"\n",
"newCtx",
"=",
"context",
".",
"WithValue",
"(",
"newCtx",
",",
"CtxReplayKey",
",",
"replays",
")",
"\n",
"return",
"newCtx",
",",
"nil",
"\n",
"}",
"\n",
"return",
"nil",
",",
"CtxNotReplayableError",
"{",
"}",
"\n",
"}"
] | // NewContextWithReplayFrom constructs a new context out of ctx by calling all
// attached replay functions. This disconnects any existing context.CancelFunc. | [
"NewContextWithReplayFrom",
"constructs",
"a",
"new",
"context",
"out",
"of",
"ctx",
"by",
"calling",
"all",
"attached",
"replay",
"functions",
".",
"This",
"disconnects",
"any",
"existing",
"context",
".",
"CancelFunc",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libcontext/delayed_cancellation.go#L116-L127 |
159,231 | keybase/client | go/kbfs/libcontext/delayed_cancellation.go | BackgroundContextWithCancellationDelayer | func BackgroundContextWithCancellationDelayer() context.Context {
if ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
context.Background(), func(c context.Context) context.Context {
return c
})); err != nil {
panic(err)
} else {
return ctx
}
} | go | func BackgroundContextWithCancellationDelayer() context.Context {
if ctx, err := NewContextWithCancellationDelayer(NewContextReplayable(
context.Background(), func(c context.Context) context.Context {
return c
})); err != nil {
panic(err)
} else {
return ctx
}
} | [
"func",
"BackgroundContextWithCancellationDelayer",
"(",
")",
"context",
".",
"Context",
"{",
"if",
"ctx",
",",
"err",
":=",
"NewContextWithCancellationDelayer",
"(",
"NewContextReplayable",
"(",
"context",
".",
"Background",
"(",
")",
",",
"func",
"(",
"c",
"context",
".",
"Context",
")",
"context",
".",
"Context",
"{",
"return",
"c",
"\n",
"}",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"else",
"{",
"return",
"ctx",
"\n",
"}",
"\n",
"}"
] | // BackgroundContextWithCancellationDelayer generate a "Background"
// context that is cancellation delayable | [
"BackgroundContextWithCancellationDelayer",
"generate",
"a",
"Background",
"context",
"that",
"is",
"cancellation",
"delayable"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libcontext/delayed_cancellation.go#L237-L246 |
159,232 | keybase/client | go/client/cmd_simplefs_sync_disable.go | NewCmdSimpleFSSyncDisable | func NewCmdSimpleFSSyncDisable(
cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "disable",
ArgumentHelp: "[path-to-folder]",
Usage: "Stops syncing the given folder to local storage",
Action: func(c *cli.Context) {
cl.ChooseCommand(&CmdSimpleFSSyncDisable{
Contextified: libkb.NewContextified(g)}, "disable", c)
cl.SetNoStandalone()
},
}
} | go | func NewCmdSimpleFSSyncDisable(
cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "disable",
ArgumentHelp: "[path-to-folder]",
Usage: "Stops syncing the given folder to local storage",
Action: func(c *cli.Context) {
cl.ChooseCommand(&CmdSimpleFSSyncDisable{
Contextified: libkb.NewContextified(g)}, "disable", c)
cl.SetNoStandalone()
},
}
} | [
"func",
"NewCmdSimpleFSSyncDisable",
"(",
"cl",
"*",
"libcmdline",
".",
"CommandLine",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
")",
"cli",
".",
"Command",
"{",
"return",
"cli",
".",
"Command",
"{",
"Name",
":",
"\"",
"\"",
",",
"ArgumentHelp",
":",
"\"",
"\"",
",",
"Usage",
":",
"\"",
"\"",
",",
"Action",
":",
"func",
"(",
"c",
"*",
"cli",
".",
"Context",
")",
"{",
"cl",
".",
"ChooseCommand",
"(",
"&",
"CmdSimpleFSSyncDisable",
"{",
"Contextified",
":",
"libkb",
".",
"NewContextified",
"(",
"g",
")",
"}",
",",
"\"",
"\"",
",",
"c",
")",
"\n",
"cl",
".",
"SetNoStandalone",
"(",
")",
"\n",
"}",
",",
"}",
"\n",
"}"
] | // NewCmdSimpleFSSyncDisable creates a new cli.Command. | [
"NewCmdSimpleFSSyncDisable",
"creates",
"a",
"new",
"cli",
".",
"Command",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/cmd_simplefs_sync_disable.go#L24-L36 |
159,233 | keybase/client | go/libkb/util_windows.go | SafeWriteToFile | func SafeWriteToFile(g SafeWriteLogger, t SafeWriter, mode os.FileMode) error {
var err error
for i := 0; i < 5; i++ {
if err != nil {
g.Debug("Retrying failed safeWriteToFileOnce - %s", err)
time.Sleep(10 * time.Millisecond)
}
err = safeWriteToFileOnce(g, t, mode)
if err == nil {
break
}
}
return err
} | go | func SafeWriteToFile(g SafeWriteLogger, t SafeWriter, mode os.FileMode) error {
var err error
for i := 0; i < 5; i++ {
if err != nil {
g.Debug("Retrying failed safeWriteToFileOnce - %s", err)
time.Sleep(10 * time.Millisecond)
}
err = safeWriteToFileOnce(g, t, mode)
if err == nil {
break
}
}
return err
} | [
"func",
"SafeWriteToFile",
"(",
"g",
"SafeWriteLogger",
",",
"t",
"SafeWriter",
",",
"mode",
"os",
".",
"FileMode",
")",
"error",
"{",
"var",
"err",
"error",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"5",
";",
"i",
"++",
"{",
"if",
"err",
"!=",
"nil",
"{",
"g",
".",
"Debug",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"time",
".",
"Sleep",
"(",
"10",
"*",
"time",
".",
"Millisecond",
")",
"\n",
"}",
"\n",
"err",
"=",
"safeWriteToFileOnce",
"(",
"g",
",",
"t",
",",
"mode",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}"
] | // SafeWriteToFile retries safeWriteToFileOnce a few times on Windows,
// in case AV programs interfere with 2 writes in quick succession. | [
"SafeWriteToFile",
"retries",
"safeWriteToFileOnce",
"a",
"few",
"times",
"on",
"Windows",
"in",
"case",
"AV",
"programs",
"interfere",
"with",
"2",
"writes",
"in",
"quick",
"succession",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/util_windows.go#L133-L147 |
159,234 | keybase/client | go/libkb/util_windows.go | renameFile | func renameFile(g *GlobalContext, src string, dest string) error {
var err error
for i := 0; i < 5; i++ {
if err != nil {
g.Log.Debug("Retrying failed os.Rename - %s", err)
time.Sleep(10 * time.Millisecond)
}
err = os.Rename(src, dest)
if err == nil {
break
}
}
return err
} | go | func renameFile(g *GlobalContext, src string, dest string) error {
var err error
for i := 0; i < 5; i++ {
if err != nil {
g.Log.Debug("Retrying failed os.Rename - %s", err)
time.Sleep(10 * time.Millisecond)
}
err = os.Rename(src, dest)
if err == nil {
break
}
}
return err
} | [
"func",
"renameFile",
"(",
"g",
"*",
"GlobalContext",
",",
"src",
"string",
",",
"dest",
"string",
")",
"error",
"{",
"var",
"err",
"error",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"5",
";",
"i",
"++",
"{",
"if",
"err",
"!=",
"nil",
"{",
"g",
".",
"Log",
".",
"Debug",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"time",
".",
"Sleep",
"(",
"10",
"*",
"time",
".",
"Millisecond",
")",
"\n",
"}",
"\n",
"err",
"=",
"os",
".",
"Rename",
"(",
"src",
",",
"dest",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}"
] | // renameFile performs some retries on Windows,
// similar to SafeWriteToFile | [
"renameFile",
"performs",
"some",
"retries",
"on",
"Windows",
"similar",
"to",
"SafeWriteToFile"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/util_windows.go#L151-L164 |
159,235 | keybase/client | go/libkb/util_windows.go | notifyShell | func notifyShell(path string) {
pathEncoded := utf16.Encode([]rune(path))
if len(pathEncoded) > 0 {
shChangeNotifyProc.Call(
uintptr(0x00002000), // SHCNE_UPDATEITEM
uintptr(0x0005), // SHCNF_PATHW
uintptr(unsafe.Pointer(&pathEncoded[0])),
0)
}
} | go | func notifyShell(path string) {
pathEncoded := utf16.Encode([]rune(path))
if len(pathEncoded) > 0 {
shChangeNotifyProc.Call(
uintptr(0x00002000), // SHCNE_UPDATEITEM
uintptr(0x0005), // SHCNF_PATHW
uintptr(unsafe.Pointer(&pathEncoded[0])),
0)
}
} | [
"func",
"notifyShell",
"(",
"path",
"string",
")",
"{",
"pathEncoded",
":=",
"utf16",
".",
"Encode",
"(",
"[",
"]",
"rune",
"(",
"path",
")",
")",
"\n",
"if",
"len",
"(",
"pathEncoded",
")",
">",
"0",
"{",
"shChangeNotifyProc",
".",
"Call",
"(",
"uintptr",
"(",
"0x00002000",
")",
",",
"// SHCNE_UPDATEITEM",
"uintptr",
"(",
"0x0005",
")",
",",
"// SHCNF_PATHW",
"uintptr",
"(",
"unsafe",
".",
"Pointer",
"(",
"&",
"pathEncoded",
"[",
"0",
"]",
")",
")",
",",
"0",
")",
"\n",
"}",
"\n",
"}"
] | // Notify the shell that the thing located at path has changed | [
"Notify",
"the",
"shell",
"that",
"the",
"thing",
"located",
"at",
"path",
"has",
"changed"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/util_windows.go#L189-L198 |
159,236 | keybase/client | go/libkb/util_windows.go | ChangeMountIcon | func ChangeMountIcon(oldMount string, newMount string) error {
if oldMount != "" {
// DeleteKey doesn't work if there are subkeys
registry.DeleteKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+oldMount[:1]+`\DefaultIcon`)
registry.DeleteKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+oldMount[:1]+`\DefaultLabel`)
registry.DeleteKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+oldMount[:1])
notifyShell(oldMount)
}
if newMount == "" {
return nil
}
k, _, err := registry.CreateKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+newMount[:1]+`\DefaultIcon`, registry.SET_VALUE|registry.CREATE_SUB_KEY|registry.WRITE)
defer k.Close()
if err != nil {
return err
}
keybaseExe, err := utils.BinPath()
if err != nil {
return err
}
// Use the second icon bound into keybase.exe - hence the 1
err = k.SetStringValue("", keybaseExe+",1")
if err != nil {
return err
}
// Also give a nice label
k2, _, err := registry.CreateKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+newMount[:1]+`\DefaultLabel`, registry.SET_VALUE|registry.CREATE_SUB_KEY|registry.WRITE)
defer k2.Close()
err = k2.SetStringValue("", "Keybase")
notifyShell(newMount)
return err
} | go | func ChangeMountIcon(oldMount string, newMount string) error {
if oldMount != "" {
// DeleteKey doesn't work if there are subkeys
registry.DeleteKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+oldMount[:1]+`\DefaultIcon`)
registry.DeleteKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+oldMount[:1]+`\DefaultLabel`)
registry.DeleteKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+oldMount[:1])
notifyShell(oldMount)
}
if newMount == "" {
return nil
}
k, _, err := registry.CreateKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+newMount[:1]+`\DefaultIcon`, registry.SET_VALUE|registry.CREATE_SUB_KEY|registry.WRITE)
defer k.Close()
if err != nil {
return err
}
keybaseExe, err := utils.BinPath()
if err != nil {
return err
}
// Use the second icon bound into keybase.exe - hence the 1
err = k.SetStringValue("", keybaseExe+",1")
if err != nil {
return err
}
// Also give a nice label
k2, _, err := registry.CreateKey(registry.CURRENT_USER, `SOFTWARE\Classes\Applications\Explorer.exe\Drives\`+newMount[:1]+`\DefaultLabel`, registry.SET_VALUE|registry.CREATE_SUB_KEY|registry.WRITE)
defer k2.Close()
err = k2.SetStringValue("", "Keybase")
notifyShell(newMount)
return err
} | [
"func",
"ChangeMountIcon",
"(",
"oldMount",
"string",
",",
"newMount",
"string",
")",
"error",
"{",
"if",
"oldMount",
"!=",
"\"",
"\"",
"{",
"// DeleteKey doesn't work if there are subkeys",
"registry",
".",
"DeleteKey",
"(",
"registry",
".",
"CURRENT_USER",
",",
"`SOFTWARE\\Classes\\Applications\\Explorer.exe\\Drives\\`",
"+",
"oldMount",
"[",
":",
"1",
"]",
"+",
"`\\DefaultIcon`",
")",
"\n",
"registry",
".",
"DeleteKey",
"(",
"registry",
".",
"CURRENT_USER",
",",
"`SOFTWARE\\Classes\\Applications\\Explorer.exe\\Drives\\`",
"+",
"oldMount",
"[",
":",
"1",
"]",
"+",
"`\\DefaultLabel`",
")",
"\n",
"registry",
".",
"DeleteKey",
"(",
"registry",
".",
"CURRENT_USER",
",",
"`SOFTWARE\\Classes\\Applications\\Explorer.exe\\Drives\\`",
"+",
"oldMount",
"[",
":",
"1",
"]",
")",
"\n",
"notifyShell",
"(",
"oldMount",
")",
"\n",
"}",
"\n",
"if",
"newMount",
"==",
"\"",
"\"",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"k",
",",
"_",
",",
"err",
":=",
"registry",
".",
"CreateKey",
"(",
"registry",
".",
"CURRENT_USER",
",",
"`SOFTWARE\\Classes\\Applications\\Explorer.exe\\Drives\\`",
"+",
"newMount",
"[",
":",
"1",
"]",
"+",
"`\\DefaultIcon`",
",",
"registry",
".",
"SET_VALUE",
"|",
"registry",
".",
"CREATE_SUB_KEY",
"|",
"registry",
".",
"WRITE",
")",
"\n",
"defer",
"k",
".",
"Close",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"keybaseExe",
",",
"err",
":=",
"utils",
".",
"BinPath",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"// Use the second icon bound into keybase.exe - hence the 1",
"err",
"=",
"k",
".",
"SetStringValue",
"(",
"\"",
"\"",
",",
"keybaseExe",
"+",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Also give a nice label",
"k2",
",",
"_",
",",
"err",
":=",
"registry",
".",
"CreateKey",
"(",
"registry",
".",
"CURRENT_USER",
",",
"`SOFTWARE\\Classes\\Applications\\Explorer.exe\\Drives\\`",
"+",
"newMount",
"[",
":",
"1",
"]",
"+",
"`\\DefaultLabel`",
",",
"registry",
".",
"SET_VALUE",
"|",
"registry",
".",
"CREATE_SUB_KEY",
"|",
"registry",
".",
"WRITE",
")",
"\n",
"defer",
"k2",
".",
"Close",
"(",
")",
"\n",
"err",
"=",
"k2",
".",
"SetStringValue",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"notifyShell",
"(",
"newMount",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // Manipulate registry entries to reflect the mount point icon in the shell | [
"Manipulate",
"registry",
"entries",
"to",
"reflect",
"the",
"mount",
"point",
"icon",
"in",
"the",
"shell"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/util_windows.go#L201-L233 |
159,237 | keybase/client | go/kbfs/libfuse/folderlist.go | PathType | func (fl *FolderList) PathType() tlfhandle.PathType {
switch fl.tlfType {
case tlf.Private:
return tlfhandle.PrivatePathType
case tlf.Public:
return tlfhandle.PublicPathType
case tlf.SingleTeam:
return tlfhandle.SingleTeamPathType
default:
panic(fmt.Sprintf("Unsupported tlf type: %s", fl.tlfType))
}
} | go | func (fl *FolderList) PathType() tlfhandle.PathType {
switch fl.tlfType {
case tlf.Private:
return tlfhandle.PrivatePathType
case tlf.Public:
return tlfhandle.PublicPathType
case tlf.SingleTeam:
return tlfhandle.SingleTeamPathType
default:
panic(fmt.Sprintf("Unsupported tlf type: %s", fl.tlfType))
}
} | [
"func",
"(",
"fl",
"*",
"FolderList",
")",
"PathType",
"(",
")",
"tlfhandle",
".",
"PathType",
"{",
"switch",
"fl",
".",
"tlfType",
"{",
"case",
"tlf",
".",
"Private",
":",
"return",
"tlfhandle",
".",
"PrivatePathType",
"\n",
"case",
"tlf",
".",
"Public",
":",
"return",
"tlfhandle",
".",
"PublicPathType",
"\n",
"case",
"tlf",
".",
"SingleTeam",
":",
"return",
"tlfhandle",
".",
"SingleTeamPathType",
"\n",
"default",
":",
"panic",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"fl",
".",
"tlfType",
")",
")",
"\n",
"}",
"\n",
"}"
] | // PathType returns PathType for this folder | [
"PathType",
"returns",
"PathType",
"for",
"this",
"folder"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/folderlist.go#L137-L148 |
159,238 | keybase/client | go/kbfs/libfuse/folderlist.go | Create | func (fl *FolderList) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (_ fs.Node, _ fs.Handle, err error) {
fl.fs.vlog.CLogf(ctx, libkb.VLog1, "FL Create")
tlfName := tlf.CanonicalName(req.Name)
defer func() { err = fl.processError(ctx, libkbfs.WriteMode, tlfName, err) }()
if strings.HasPrefix(req.Name, "._") {
// Quietly ignore writes to special macOS files, without
// triggering a notification.
return nil, nil, syscall.ENOENT
}
return nil, nil, libkbfs.NewWriteUnsupportedError(tlfhandle.BuildCanonicalPath(fl.PathType(), string(tlfName)))
} | go | func (fl *FolderList) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (_ fs.Node, _ fs.Handle, err error) {
fl.fs.vlog.CLogf(ctx, libkb.VLog1, "FL Create")
tlfName := tlf.CanonicalName(req.Name)
defer func() { err = fl.processError(ctx, libkbfs.WriteMode, tlfName, err) }()
if strings.HasPrefix(req.Name, "._") {
// Quietly ignore writes to special macOS files, without
// triggering a notification.
return nil, nil, syscall.ENOENT
}
return nil, nil, libkbfs.NewWriteUnsupportedError(tlfhandle.BuildCanonicalPath(fl.PathType(), string(tlfName)))
} | [
"func",
"(",
"fl",
"*",
"FolderList",
")",
"Create",
"(",
"ctx",
"context",
".",
"Context",
",",
"req",
"*",
"fuse",
".",
"CreateRequest",
",",
"resp",
"*",
"fuse",
".",
"CreateResponse",
")",
"(",
"_",
"fs",
".",
"Node",
",",
"_",
"fs",
".",
"Handle",
",",
"err",
"error",
")",
"{",
"fl",
".",
"fs",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
")",
"\n",
"tlfName",
":=",
"tlf",
".",
"CanonicalName",
"(",
"req",
".",
"Name",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"err",
"=",
"fl",
".",
"processError",
"(",
"ctx",
",",
"libkbfs",
".",
"WriteMode",
",",
"tlfName",
",",
"err",
")",
"}",
"(",
")",
"\n",
"if",
"strings",
".",
"HasPrefix",
"(",
"req",
".",
"Name",
",",
"\"",
"\"",
")",
"{",
"// Quietly ignore writes to special macOS files, without",
"// triggering a notification.",
"return",
"nil",
",",
"nil",
",",
"syscall",
".",
"ENOENT",
"\n",
"}",
"\n",
"return",
"nil",
",",
"nil",
",",
"libkbfs",
".",
"NewWriteUnsupportedError",
"(",
"tlfhandle",
".",
"BuildCanonicalPath",
"(",
"fl",
".",
"PathType",
"(",
")",
",",
"string",
"(",
"tlfName",
")",
")",
")",
"\n",
"}"
] | // Create implements the fs.NodeCreater interface for FolderList. | [
"Create",
"implements",
"the",
"fs",
".",
"NodeCreater",
"interface",
"for",
"FolderList",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/folderlist.go#L151-L161 |
159,239 | keybase/client | go/kbfs/libfuse/folderlist.go | Mkdir | func (fl *FolderList) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (_ fs.Node, err error) {
fl.fs.vlog.CLogf(ctx, libkb.VLog1, "FL Mkdir")
tlfName := tlf.CanonicalName(req.Name)
defer func() { err = fl.processError(ctx, libkbfs.WriteMode, tlfName, err) }()
return nil, libkbfs.NewWriteUnsupportedError(tlfhandle.BuildCanonicalPath(fl.PathType(), string(tlfName)))
} | go | func (fl *FolderList) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (_ fs.Node, err error) {
fl.fs.vlog.CLogf(ctx, libkb.VLog1, "FL Mkdir")
tlfName := tlf.CanonicalName(req.Name)
defer func() { err = fl.processError(ctx, libkbfs.WriteMode, tlfName, err) }()
return nil, libkbfs.NewWriteUnsupportedError(tlfhandle.BuildCanonicalPath(fl.PathType(), string(tlfName)))
} | [
"func",
"(",
"fl",
"*",
"FolderList",
")",
"Mkdir",
"(",
"ctx",
"context",
".",
"Context",
",",
"req",
"*",
"fuse",
".",
"MkdirRequest",
")",
"(",
"_",
"fs",
".",
"Node",
",",
"err",
"error",
")",
"{",
"fl",
".",
"fs",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
")",
"\n",
"tlfName",
":=",
"tlf",
".",
"CanonicalName",
"(",
"req",
".",
"Name",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"err",
"=",
"fl",
".",
"processError",
"(",
"ctx",
",",
"libkbfs",
".",
"WriteMode",
",",
"tlfName",
",",
"err",
")",
"}",
"(",
")",
"\n",
"return",
"nil",
",",
"libkbfs",
".",
"NewWriteUnsupportedError",
"(",
"tlfhandle",
".",
"BuildCanonicalPath",
"(",
"fl",
".",
"PathType",
"(",
")",
",",
"string",
"(",
"tlfName",
")",
")",
")",
"\n",
"}"
] | // Mkdir implements the fs.NodeMkdirer interface for FolderList. | [
"Mkdir",
"implements",
"the",
"fs",
".",
"NodeMkdirer",
"interface",
"for",
"FolderList",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/folderlist.go#L164-L169 |
159,240 | keybase/client | go/kbfs/libfuse/folderlist.go | Remove | func (fl *FolderList) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
fl.fs.vlog.CLogf(ctx, libkb.VLog1, "FolderList Remove %s", req.Name)
defer func() { err = fl.fs.processError(ctx, libkbfs.WriteMode, err) }()
h, err := tlfhandle.ParseHandlePreferredQuick(
ctx, fl.fs.config.KBPKI(), fl.fs.config, req.Name, fl.tlfType)
switch err := errors.Cause(err).(type) {
case nil:
func() {
fl.mu.Lock()
defer fl.mu.Unlock()
if tlf, ok := fl.folders[req.Name]; ok {
// Fake future attr calls for this TLF until the user
// actually opens the TLF again, because some OSes (*cough
// OS X cough*) like to do immediate lookup/attr calls
// right after doing a remove, which would otherwise end
// up re-adding the favorite.
tlf.clearStoredDir()
}
}()
// TODO how to handle closing down the folderbranchops
// object? Open files may still exist long after removing
// the favorite.
return fl.fs.config.KBFSOps().DeleteFavorite(ctx, h.ToFavorite())
case idutil.TlfNameNotCanonical:
return nil
default:
return err
}
} | go | func (fl *FolderList) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
fl.fs.vlog.CLogf(ctx, libkb.VLog1, "FolderList Remove %s", req.Name)
defer func() { err = fl.fs.processError(ctx, libkbfs.WriteMode, err) }()
h, err := tlfhandle.ParseHandlePreferredQuick(
ctx, fl.fs.config.KBPKI(), fl.fs.config, req.Name, fl.tlfType)
switch err := errors.Cause(err).(type) {
case nil:
func() {
fl.mu.Lock()
defer fl.mu.Unlock()
if tlf, ok := fl.folders[req.Name]; ok {
// Fake future attr calls for this TLF until the user
// actually opens the TLF again, because some OSes (*cough
// OS X cough*) like to do immediate lookup/attr calls
// right after doing a remove, which would otherwise end
// up re-adding the favorite.
tlf.clearStoredDir()
}
}()
// TODO how to handle closing down the folderbranchops
// object? Open files may still exist long after removing
// the favorite.
return fl.fs.config.KBFSOps().DeleteFavorite(ctx, h.ToFavorite())
case idutil.TlfNameNotCanonical:
return nil
default:
return err
}
} | [
"func",
"(",
"fl",
"*",
"FolderList",
")",
"Remove",
"(",
"ctx",
"context",
".",
"Context",
",",
"req",
"*",
"fuse",
".",
"RemoveRequest",
")",
"(",
"err",
"error",
")",
"{",
"fl",
".",
"fs",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"req",
".",
"Name",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"err",
"=",
"fl",
".",
"fs",
".",
"processError",
"(",
"ctx",
",",
"libkbfs",
".",
"WriteMode",
",",
"err",
")",
"}",
"(",
")",
"\n\n",
"h",
",",
"err",
":=",
"tlfhandle",
".",
"ParseHandlePreferredQuick",
"(",
"ctx",
",",
"fl",
".",
"fs",
".",
"config",
".",
"KBPKI",
"(",
")",
",",
"fl",
".",
"fs",
".",
"config",
",",
"req",
".",
"Name",
",",
"fl",
".",
"tlfType",
")",
"\n\n",
"switch",
"err",
":=",
"errors",
".",
"Cause",
"(",
"err",
")",
".",
"(",
"type",
")",
"{",
"case",
"nil",
":",
"func",
"(",
")",
"{",
"fl",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"fl",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"if",
"tlf",
",",
"ok",
":=",
"fl",
".",
"folders",
"[",
"req",
".",
"Name",
"]",
";",
"ok",
"{",
"// Fake future attr calls for this TLF until the user",
"// actually opens the TLF again, because some OSes (*cough",
"// OS X cough*) like to do immediate lookup/attr calls",
"// right after doing a remove, which would otherwise end",
"// up re-adding the favorite.",
"tlf",
".",
"clearStoredDir",
"(",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"// TODO how to handle closing down the folderbranchops",
"// object? Open files may still exist long after removing",
"// the favorite.",
"return",
"fl",
".",
"fs",
".",
"config",
".",
"KBFSOps",
"(",
")",
".",
"DeleteFavorite",
"(",
"ctx",
",",
"h",
".",
"ToFavorite",
"(",
")",
")",
"\n\n",
"case",
"idutil",
".",
"TlfNameNotCanonical",
":",
"return",
"nil",
"\n\n",
"default",
":",
"return",
"err",
"\n",
"}",
"\n",
"}"
] | // Remove implements the fs.NodeRemover interface for FolderList. | [
"Remove",
"implements",
"the",
"fs",
".",
"NodeRemover",
"interface",
"for",
"FolderList",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfuse/folderlist.go#L287-L320 |
159,241 | keybase/client | go/kbfs/libgit/autogit_manager.go | NewAutogitManager | func NewAutogitManager(
config libkbfs.Config, browserCacheSize int) *AutogitManager {
log := config.MakeLogger("")
browserCache, err := lru.New(browserCacheSize)
if err != nil {
panic(err.Error())
}
sharedCache, err := newLRUSharedInBrowserCache()
if err != nil {
panic(err.Error())
}
return &AutogitManager{
config: config,
log: log,
deferLog: log.CloneWithAddedDepth(1),
registeredFBs: make(map[data.FolderBranch]bool),
repoNodesForWatchedIDs: make(map[libkbfs.NodeID]*repoDirNode),
deleteCancels: make(map[string]context.CancelFunc),
browserCache: browserCache,
sharedInBrowserCache: sharedCache,
}
} | go | func NewAutogitManager(
config libkbfs.Config, browserCacheSize int) *AutogitManager {
log := config.MakeLogger("")
browserCache, err := lru.New(browserCacheSize)
if err != nil {
panic(err.Error())
}
sharedCache, err := newLRUSharedInBrowserCache()
if err != nil {
panic(err.Error())
}
return &AutogitManager{
config: config,
log: log,
deferLog: log.CloneWithAddedDepth(1),
registeredFBs: make(map[data.FolderBranch]bool),
repoNodesForWatchedIDs: make(map[libkbfs.NodeID]*repoDirNode),
deleteCancels: make(map[string]context.CancelFunc),
browserCache: browserCache,
sharedInBrowserCache: sharedCache,
}
} | [
"func",
"NewAutogitManager",
"(",
"config",
"libkbfs",
".",
"Config",
",",
"browserCacheSize",
"int",
")",
"*",
"AutogitManager",
"{",
"log",
":=",
"config",
".",
"MakeLogger",
"(",
"\"",
"\"",
")",
"\n",
"browserCache",
",",
"err",
":=",
"lru",
".",
"New",
"(",
"browserCacheSize",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"sharedCache",
",",
"err",
":=",
"newLRUSharedInBrowserCache",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n\n",
"return",
"&",
"AutogitManager",
"{",
"config",
":",
"config",
",",
"log",
":",
"log",
",",
"deferLog",
":",
"log",
".",
"CloneWithAddedDepth",
"(",
"1",
")",
",",
"registeredFBs",
":",
"make",
"(",
"map",
"[",
"data",
".",
"FolderBranch",
"]",
"bool",
")",
",",
"repoNodesForWatchedIDs",
":",
"make",
"(",
"map",
"[",
"libkbfs",
".",
"NodeID",
"]",
"*",
"repoDirNode",
")",
",",
"deleteCancels",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"context",
".",
"CancelFunc",
")",
",",
"browserCache",
":",
"browserCache",
",",
"sharedInBrowserCache",
":",
"sharedCache",
",",
"}",
"\n",
"}"
] | // NewAutogitManager constructs a new AutogitManager instance. | [
"NewAutogitManager",
"constructs",
"a",
"new",
"AutogitManager",
"instance",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libgit/autogit_manager.go#L76-L98 |
159,242 | keybase/client | go/kbfs/libgit/autogit_manager.go | Shutdown | func (am *AutogitManager) Shutdown() {
am.registryLock.Lock()
defer am.registryLock.Unlock()
am.shutdown = true
for _, cancel := range am.deleteCancels {
cancel()
}
} | go | func (am *AutogitManager) Shutdown() {
am.registryLock.Lock()
defer am.registryLock.Unlock()
am.shutdown = true
for _, cancel := range am.deleteCancels {
cancel()
}
} | [
"func",
"(",
"am",
"*",
"AutogitManager",
")",
"Shutdown",
"(",
")",
"{",
"am",
".",
"registryLock",
".",
"Lock",
"(",
")",
"\n",
"defer",
"am",
".",
"registryLock",
".",
"Unlock",
"(",
")",
"\n",
"am",
".",
"shutdown",
"=",
"true",
"\n",
"for",
"_",
",",
"cancel",
":=",
"range",
"am",
".",
"deleteCancels",
"{",
"cancel",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Shutdown shuts down this manager. | [
"Shutdown",
"shuts",
"down",
"this",
"manager",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libgit/autogit_manager.go#L101-L108 |
159,243 | keybase/client | go/kbfs/libgit/autogit_manager.go | LocalChange | func (am *AutogitManager) LocalChange(
ctx context.Context, node libkbfs.Node, wr libkbfs.WriteRange) {
// Do nothing.
} | go | func (am *AutogitManager) LocalChange(
ctx context.Context, node libkbfs.Node, wr libkbfs.WriteRange) {
// Do nothing.
} | [
"func",
"(",
"am",
"*",
"AutogitManager",
")",
"LocalChange",
"(",
"ctx",
"context",
".",
"Context",
",",
"node",
"libkbfs",
".",
"Node",
",",
"wr",
"libkbfs",
".",
"WriteRange",
")",
"{",
"// Do nothing.",
"}"
] | // LocalChange implements the libkbfs.Observer interface for AutogitManager. | [
"LocalChange",
"implements",
"the",
"libkbfs",
".",
"Observer",
"interface",
"for",
"AutogitManager",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libgit/autogit_manager.go#L223-L226 |
159,244 | keybase/client | go/kbfs/libgit/autogit_manager.go | BatchChanges | func (am *AutogitManager) BatchChanges(
ctx context.Context, _ []libkbfs.NodeChange,
affectedNodeIDs []libkbfs.NodeID) {
nodes, repoNodeIDs := am.getNodesToInvalidate(affectedNodeIDs)
go am.clearInvalidatedBrowsers(repoNodeIDs)
for _, node := range nodes {
node := node
go func() {
ctx := libkbfs.CtxWithRandomIDReplayable(
context.Background(), ctxAutogitIDKey, ctxAutogitOpID, am.log)
am.config.KBFSOps().InvalidateNodeAndChildren(ctx, node)
}()
}
} | go | func (am *AutogitManager) BatchChanges(
ctx context.Context, _ []libkbfs.NodeChange,
affectedNodeIDs []libkbfs.NodeID) {
nodes, repoNodeIDs := am.getNodesToInvalidate(affectedNodeIDs)
go am.clearInvalidatedBrowsers(repoNodeIDs)
for _, node := range nodes {
node := node
go func() {
ctx := libkbfs.CtxWithRandomIDReplayable(
context.Background(), ctxAutogitIDKey, ctxAutogitOpID, am.log)
am.config.KBFSOps().InvalidateNodeAndChildren(ctx, node)
}()
}
} | [
"func",
"(",
"am",
"*",
"AutogitManager",
")",
"BatchChanges",
"(",
"ctx",
"context",
".",
"Context",
",",
"_",
"[",
"]",
"libkbfs",
".",
"NodeChange",
",",
"affectedNodeIDs",
"[",
"]",
"libkbfs",
".",
"NodeID",
")",
"{",
"nodes",
",",
"repoNodeIDs",
":=",
"am",
".",
"getNodesToInvalidate",
"(",
"affectedNodeIDs",
")",
"\n",
"go",
"am",
".",
"clearInvalidatedBrowsers",
"(",
"repoNodeIDs",
")",
"\n",
"for",
"_",
",",
"node",
":=",
"range",
"nodes",
"{",
"node",
":=",
"node",
"\n",
"go",
"func",
"(",
")",
"{",
"ctx",
":=",
"libkbfs",
".",
"CtxWithRandomIDReplayable",
"(",
"context",
".",
"Background",
"(",
")",
",",
"ctxAutogitIDKey",
",",
"ctxAutogitOpID",
",",
"am",
".",
"log",
")",
"\n",
"am",
".",
"config",
".",
"KBFSOps",
"(",
")",
".",
"InvalidateNodeAndChildren",
"(",
"ctx",
",",
"node",
")",
"\n",
"}",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // BatchChanges implements the libkbfs.Observer interface for AutogitManager. | [
"BatchChanges",
"implements",
"the",
"libkbfs",
".",
"Observer",
"interface",
"for",
"AutogitManager",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libgit/autogit_manager.go#L276-L289 |
159,245 | keybase/client | go/kbfs/libgit/autogit_manager.go | GetBrowserForRepo | func (am *AutogitManager) GetBrowserForRepo(
ctx context.Context, gitFS *libfs.FS, repoName string,
branch plumbing.ReferenceName, subdir string) (*libfs.FS, *Browser, error) {
am.browserLock.Lock()
defer am.browserLock.Unlock()
return am.getBrowserForRepoLocked(ctx, gitFS, repoName, branch, subdir)
} | go | func (am *AutogitManager) GetBrowserForRepo(
ctx context.Context, gitFS *libfs.FS, repoName string,
branch plumbing.ReferenceName, subdir string) (*libfs.FS, *Browser, error) {
am.browserLock.Lock()
defer am.browserLock.Unlock()
return am.getBrowserForRepoLocked(ctx, gitFS, repoName, branch, subdir)
} | [
"func",
"(",
"am",
"*",
"AutogitManager",
")",
"GetBrowserForRepo",
"(",
"ctx",
"context",
".",
"Context",
",",
"gitFS",
"*",
"libfs",
".",
"FS",
",",
"repoName",
"string",
",",
"branch",
"plumbing",
".",
"ReferenceName",
",",
"subdir",
"string",
")",
"(",
"*",
"libfs",
".",
"FS",
",",
"*",
"Browser",
",",
"error",
")",
"{",
"am",
".",
"browserLock",
".",
"Lock",
"(",
")",
"\n",
"defer",
"am",
".",
"browserLock",
".",
"Unlock",
"(",
")",
"\n",
"return",
"am",
".",
"getBrowserForRepoLocked",
"(",
"ctx",
",",
"gitFS",
",",
"repoName",
",",
"branch",
",",
"subdir",
")",
"\n",
"}"
] | // GetBrowserForRepo returns the root FS for the specified repo and a
// `Browser` for the branch and subdir. | [
"GetBrowserForRepo",
"returns",
"the",
"root",
"FS",
"for",
"the",
"specified",
"repo",
"and",
"a",
"Browser",
"for",
"the",
"branch",
"and",
"subdir",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libgit/autogit_manager.go#L356-L362 |
159,246 | keybase/client | go/kbfs/libgit/autogit_manager.go | StartAutogit | func StartAutogit(config libkbfs.Config, browserCacheSize int) func() {
am := NewAutogitManager(config, browserCacheSize)
rw := rootWrapper{am}
config.AddRootNodeWrapper(rw.wrap)
return am.Shutdown
} | go | func StartAutogit(config libkbfs.Config, browserCacheSize int) func() {
am := NewAutogitManager(config, browserCacheSize)
rw := rootWrapper{am}
config.AddRootNodeWrapper(rw.wrap)
return am.Shutdown
} | [
"func",
"StartAutogit",
"(",
"config",
"libkbfs",
".",
"Config",
",",
"browserCacheSize",
"int",
")",
"func",
"(",
")",
"{",
"am",
":=",
"NewAutogitManager",
"(",
"config",
",",
"browserCacheSize",
")",
"\n",
"rw",
":=",
"rootWrapper",
"{",
"am",
"}",
"\n",
"config",
".",
"AddRootNodeWrapper",
"(",
"rw",
".",
"wrap",
")",
"\n",
"return",
"am",
".",
"Shutdown",
"\n",
"}"
] | // StartAutogit launches autogit, and returns a function that should
// be called on shutdown. | [
"StartAutogit",
"launches",
"autogit",
"and",
"returns",
"a",
"function",
"that",
"should",
"be",
"called",
"on",
"shutdown",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libgit/autogit_manager.go#L366-L371 |
159,247 | keybase/client | go/install/install.go | Description | func (c ComponentName) Description() string {
switch c {
case ComponentNameService:
return "Service"
case ComponentNameKBFS:
return "KBFS"
case ComponentNameApp:
return "App"
case ComponentNameCLI:
return "Command Line"
case ComponentNameUpdater:
return "Updater"
case ComponentNameFuse:
return "Fuse"
case ComponentNameHelper:
return "Privileged Helper Tool"
case ComponentNameKBNM:
return "Browser Native Messaging"
case ComponentNameCLIPaths:
return "Command Line (privileged)"
case ComponentNameRedirector:
return "Redirector (privileged)"
}
return "Unknown"
} | go | func (c ComponentName) Description() string {
switch c {
case ComponentNameService:
return "Service"
case ComponentNameKBFS:
return "KBFS"
case ComponentNameApp:
return "App"
case ComponentNameCLI:
return "Command Line"
case ComponentNameUpdater:
return "Updater"
case ComponentNameFuse:
return "Fuse"
case ComponentNameHelper:
return "Privileged Helper Tool"
case ComponentNameKBNM:
return "Browser Native Messaging"
case ComponentNameCLIPaths:
return "Command Line (privileged)"
case ComponentNameRedirector:
return "Redirector (privileged)"
}
return "Unknown"
} | [
"func",
"(",
"c",
"ComponentName",
")",
"Description",
"(",
")",
"string",
"{",
"switch",
"c",
"{",
"case",
"ComponentNameService",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameKBFS",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameApp",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameCLI",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameUpdater",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameFuse",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameHelper",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameKBNM",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameCLIPaths",
":",
"return",
"\"",
"\"",
"\n",
"case",
"ComponentNameRedirector",
":",
"return",
"\"",
"\"",
"\n",
"}",
"\n",
"return",
"\"",
"\"",
"\n",
"}"
] | // Description returns description for component name | [
"Description",
"returns",
"description",
"for",
"component",
"name"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/install/install.go#L79-L103 |
159,248 | keybase/client | go/install/install.go | ComponentNameFromString | func ComponentNameFromString(s string) ComponentName {
switch s {
case string(ComponentNameCLI):
return ComponentNameCLI
case string(ComponentNameService):
return ComponentNameService
case string(ComponentNameKBFS):
return ComponentNameKBFS
case string(ComponentNameKBNM):
return ComponentNameKBNM
case string(ComponentNameUpdater):
return ComponentNameUpdater
case string(ComponentNameApp):
return ComponentNameApp
case string(ComponentNameFuse):
return ComponentNameFuse
case string(ComponentNameHelper):
return ComponentNameHelper
case string(ComponentNameCLIPaths):
return ComponentNameCLIPaths
case string(ComponentNameRedirector):
return ComponentNameRedirector
}
return ComponentNameUnknown
} | go | func ComponentNameFromString(s string) ComponentName {
switch s {
case string(ComponentNameCLI):
return ComponentNameCLI
case string(ComponentNameService):
return ComponentNameService
case string(ComponentNameKBFS):
return ComponentNameKBFS
case string(ComponentNameKBNM):
return ComponentNameKBNM
case string(ComponentNameUpdater):
return ComponentNameUpdater
case string(ComponentNameApp):
return ComponentNameApp
case string(ComponentNameFuse):
return ComponentNameFuse
case string(ComponentNameHelper):
return ComponentNameHelper
case string(ComponentNameCLIPaths):
return ComponentNameCLIPaths
case string(ComponentNameRedirector):
return ComponentNameRedirector
}
return ComponentNameUnknown
} | [
"func",
"ComponentNameFromString",
"(",
"s",
"string",
")",
"ComponentName",
"{",
"switch",
"s",
"{",
"case",
"string",
"(",
"ComponentNameCLI",
")",
":",
"return",
"ComponentNameCLI",
"\n",
"case",
"string",
"(",
"ComponentNameService",
")",
":",
"return",
"ComponentNameService",
"\n",
"case",
"string",
"(",
"ComponentNameKBFS",
")",
":",
"return",
"ComponentNameKBFS",
"\n",
"case",
"string",
"(",
"ComponentNameKBNM",
")",
":",
"return",
"ComponentNameKBNM",
"\n",
"case",
"string",
"(",
"ComponentNameUpdater",
")",
":",
"return",
"ComponentNameUpdater",
"\n",
"case",
"string",
"(",
"ComponentNameApp",
")",
":",
"return",
"ComponentNameApp",
"\n",
"case",
"string",
"(",
"ComponentNameFuse",
")",
":",
"return",
"ComponentNameFuse",
"\n",
"case",
"string",
"(",
"ComponentNameHelper",
")",
":",
"return",
"ComponentNameHelper",
"\n",
"case",
"string",
"(",
"ComponentNameCLIPaths",
")",
":",
"return",
"ComponentNameCLIPaths",
"\n",
"case",
"string",
"(",
"ComponentNameRedirector",
")",
":",
"return",
"ComponentNameRedirector",
"\n",
"}",
"\n",
"return",
"ComponentNameUnknown",
"\n",
"}"
] | // ComponentNameFromString returns ComponentName from a string | [
"ComponentNameFromString",
"returns",
"ComponentName",
"from",
"a",
"string"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/install/install.go#L106-L130 |
159,249 | keybase/client | go/install/install.go | ResolveInstallStatus | func ResolveInstallStatus(version string, bundleVersion string, lastExitStatus string, log Log) (installStatus keybase1.InstallStatus, installAction keybase1.InstallAction, status keybase1.Status) {
installStatus = keybase1.InstallStatus_UNKNOWN
installAction = keybase1.InstallAction_UNKNOWN
if version != "" && bundleVersion != "" {
sv, err := semver.Make(version)
if err != nil {
installStatus = keybase1.InstallStatus_ERROR
installAction = keybase1.InstallAction_REINSTALL
status = keybase1.StatusFromCode(keybase1.StatusCode_SCInvalidVersionError, err.Error())
return
}
bsv, err := semver.Make(bundleVersion)
// Invalid bundle version
if err != nil {
installStatus = keybase1.InstallStatus_ERROR
installAction = keybase1.InstallAction_NONE
status = keybase1.StatusFromCode(keybase1.StatusCode_SCInvalidVersionError, err.Error())
return
}
if bsv.GT(sv) {
installStatus = keybase1.InstallStatus_INSTALLED
installAction = keybase1.InstallAction_UPGRADE
} else if bsv.EQ(sv) {
installStatus = keybase1.InstallStatus_INSTALLED
installAction = keybase1.InstallAction_NONE
} else if bsv.LT(sv) {
// It's ok if we have a bundled version less than what was installed
log.Warning("Bundle version (%s) is less than installed version (%s)", bundleVersion, version)
installStatus = keybase1.InstallStatus_INSTALLED
installAction = keybase1.InstallAction_NONE
}
} else if version != "" && bundleVersion == "" {
installStatus = keybase1.InstallStatus_INSTALLED
} else if version == "" && bundleVersion != "" {
installStatus = keybase1.InstallStatus_NOT_INSTALLED
installAction = keybase1.InstallAction_INSTALL
}
// If we have an unknown install status, then let's try to re-install.
if bundleVersion != "" && installStatus == keybase1.InstallStatus_UNKNOWN && (version != "" || lastExitStatus != "") {
installAction = keybase1.InstallAction_REINSTALL
installStatus = keybase1.InstallStatus_INSTALLED
}
status = keybase1.StatusOK("")
return
} | go | func ResolveInstallStatus(version string, bundleVersion string, lastExitStatus string, log Log) (installStatus keybase1.InstallStatus, installAction keybase1.InstallAction, status keybase1.Status) {
installStatus = keybase1.InstallStatus_UNKNOWN
installAction = keybase1.InstallAction_UNKNOWN
if version != "" && bundleVersion != "" {
sv, err := semver.Make(version)
if err != nil {
installStatus = keybase1.InstallStatus_ERROR
installAction = keybase1.InstallAction_REINSTALL
status = keybase1.StatusFromCode(keybase1.StatusCode_SCInvalidVersionError, err.Error())
return
}
bsv, err := semver.Make(bundleVersion)
// Invalid bundle version
if err != nil {
installStatus = keybase1.InstallStatus_ERROR
installAction = keybase1.InstallAction_NONE
status = keybase1.StatusFromCode(keybase1.StatusCode_SCInvalidVersionError, err.Error())
return
}
if bsv.GT(sv) {
installStatus = keybase1.InstallStatus_INSTALLED
installAction = keybase1.InstallAction_UPGRADE
} else if bsv.EQ(sv) {
installStatus = keybase1.InstallStatus_INSTALLED
installAction = keybase1.InstallAction_NONE
} else if bsv.LT(sv) {
// It's ok if we have a bundled version less than what was installed
log.Warning("Bundle version (%s) is less than installed version (%s)", bundleVersion, version)
installStatus = keybase1.InstallStatus_INSTALLED
installAction = keybase1.InstallAction_NONE
}
} else if version != "" && bundleVersion == "" {
installStatus = keybase1.InstallStatus_INSTALLED
} else if version == "" && bundleVersion != "" {
installStatus = keybase1.InstallStatus_NOT_INSTALLED
installAction = keybase1.InstallAction_INSTALL
}
// If we have an unknown install status, then let's try to re-install.
if bundleVersion != "" && installStatus == keybase1.InstallStatus_UNKNOWN && (version != "" || lastExitStatus != "") {
installAction = keybase1.InstallAction_REINSTALL
installStatus = keybase1.InstallStatus_INSTALLED
}
status = keybase1.StatusOK("")
return
} | [
"func",
"ResolveInstallStatus",
"(",
"version",
"string",
",",
"bundleVersion",
"string",
",",
"lastExitStatus",
"string",
",",
"log",
"Log",
")",
"(",
"installStatus",
"keybase1",
".",
"InstallStatus",
",",
"installAction",
"keybase1",
".",
"InstallAction",
",",
"status",
"keybase1",
".",
"Status",
")",
"{",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_UNKNOWN",
"\n",
"installAction",
"=",
"keybase1",
".",
"InstallAction_UNKNOWN",
"\n",
"if",
"version",
"!=",
"\"",
"\"",
"&&",
"bundleVersion",
"!=",
"\"",
"\"",
"{",
"sv",
",",
"err",
":=",
"semver",
".",
"Make",
"(",
"version",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_ERROR",
"\n",
"installAction",
"=",
"keybase1",
".",
"InstallAction_REINSTALL",
"\n",
"status",
"=",
"keybase1",
".",
"StatusFromCode",
"(",
"keybase1",
".",
"StatusCode_SCInvalidVersionError",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"bsv",
",",
"err",
":=",
"semver",
".",
"Make",
"(",
"bundleVersion",
")",
"\n",
"// Invalid bundle version",
"if",
"err",
"!=",
"nil",
"{",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_ERROR",
"\n",
"installAction",
"=",
"keybase1",
".",
"InstallAction_NONE",
"\n",
"status",
"=",
"keybase1",
".",
"StatusFromCode",
"(",
"keybase1",
".",
"StatusCode_SCInvalidVersionError",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"bsv",
".",
"GT",
"(",
"sv",
")",
"{",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_INSTALLED",
"\n",
"installAction",
"=",
"keybase1",
".",
"InstallAction_UPGRADE",
"\n",
"}",
"else",
"if",
"bsv",
".",
"EQ",
"(",
"sv",
")",
"{",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_INSTALLED",
"\n",
"installAction",
"=",
"keybase1",
".",
"InstallAction_NONE",
"\n",
"}",
"else",
"if",
"bsv",
".",
"LT",
"(",
"sv",
")",
"{",
"// It's ok if we have a bundled version less than what was installed",
"log",
".",
"Warning",
"(",
"\"",
"\"",
",",
"bundleVersion",
",",
"version",
")",
"\n",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_INSTALLED",
"\n",
"installAction",
"=",
"keybase1",
".",
"InstallAction_NONE",
"\n",
"}",
"\n",
"}",
"else",
"if",
"version",
"!=",
"\"",
"\"",
"&&",
"bundleVersion",
"==",
"\"",
"\"",
"{",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_INSTALLED",
"\n",
"}",
"else",
"if",
"version",
"==",
"\"",
"\"",
"&&",
"bundleVersion",
"!=",
"\"",
"\"",
"{",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_NOT_INSTALLED",
"\n",
"installAction",
"=",
"keybase1",
".",
"InstallAction_INSTALL",
"\n",
"}",
"\n\n",
"// If we have an unknown install status, then let's try to re-install.",
"if",
"bundleVersion",
"!=",
"\"",
"\"",
"&&",
"installStatus",
"==",
"keybase1",
".",
"InstallStatus_UNKNOWN",
"&&",
"(",
"version",
"!=",
"\"",
"\"",
"||",
"lastExitStatus",
"!=",
"\"",
"\"",
")",
"{",
"installAction",
"=",
"keybase1",
".",
"InstallAction_REINSTALL",
"\n",
"installStatus",
"=",
"keybase1",
".",
"InstallStatus_INSTALLED",
"\n",
"}",
"\n\n",
"status",
"=",
"keybase1",
".",
"StatusOK",
"(",
"\"",
"\"",
")",
"\n",
"return",
"\n",
"}"
] | // ResolveInstallStatus will determine necessary install actions for the current environment | [
"ResolveInstallStatus",
"will",
"determine",
"necessary",
"install",
"actions",
"for",
"the",
"current",
"environment"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/install/install.go#L133-L179 |
159,250 | keybase/client | go/install/install.go | UpdaterBinPath | func UpdaterBinPath() (string, error) {
path, err := BinPath()
if err != nil {
return "", err
}
name, err := updaterBinName()
if err != nil {
return "", err
}
return filepath.Join(filepath.Dir(path), name), nil
} | go | func UpdaterBinPath() (string, error) {
path, err := BinPath()
if err != nil {
return "", err
}
name, err := updaterBinName()
if err != nil {
return "", err
}
return filepath.Join(filepath.Dir(path), name), nil
} | [
"func",
"UpdaterBinPath",
"(",
")",
"(",
"string",
",",
"error",
")",
"{",
"path",
",",
"err",
":=",
"BinPath",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"err",
"\n",
"}",
"\n",
"name",
",",
"err",
":=",
"updaterBinName",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"err",
"\n",
"}",
"\n",
"return",
"filepath",
".",
"Join",
"(",
"filepath",
".",
"Dir",
"(",
"path",
")",
",",
"name",
")",
",",
"nil",
"\n",
"}"
] | // UpdaterBinPath returns the path to the updater executable, by default is in
// the same directory as the keybase executable. | [
"UpdaterBinPath",
"returns",
"the",
"path",
"to",
"the",
"updater",
"executable",
"by",
"default",
"is",
"in",
"the",
"same",
"directory",
"as",
"the",
"keybase",
"executable",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/install/install.go#L276-L286 |
159,251 | keybase/client | go/teams/box_public_summary.go | newBoxPublicSummaryFromTable | func newBoxPublicSummaryFromTable(table boxPublicSummaryTable) (*boxPublicSummary, error) {
ret := boxPublicSummary{
table: table,
}
err := ret.encode()
if err != nil {
return nil, err
}
return &ret, nil
} | go | func newBoxPublicSummaryFromTable(table boxPublicSummaryTable) (*boxPublicSummary, error) {
ret := boxPublicSummary{
table: table,
}
err := ret.encode()
if err != nil {
return nil, err
}
return &ret, nil
} | [
"func",
"newBoxPublicSummaryFromTable",
"(",
"table",
"boxPublicSummaryTable",
")",
"(",
"*",
"boxPublicSummary",
",",
"error",
")",
"{",
"ret",
":=",
"boxPublicSummary",
"{",
"table",
":",
"table",
",",
"}",
"\n",
"err",
":=",
"ret",
".",
"encode",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"&",
"ret",
",",
"nil",
"\n",
"}"
] | // encode only ever gets called with the boxPublicSummary is being constructed. This means
// we don't allow mutation. Thus, we just encode it once, since if ever canonical encoding
// stops working, it won't matter, we'll still get consistent results. | [
"encode",
"only",
"ever",
"gets",
"called",
"with",
"the",
"boxPublicSummary",
"is",
"being",
"constructed",
".",
"This",
"means",
"we",
"don",
"t",
"allow",
"mutation",
".",
"Thus",
"we",
"just",
"encode",
"it",
"once",
"since",
"if",
"ever",
"canonical",
"encoding",
"stops",
"working",
"it",
"won",
"t",
"matter",
"we",
"ll",
"still",
"get",
"consistent",
"results",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/box_public_summary.go#L33-L42 |
159,252 | keybase/client | go/libkb/secret_store_secretservice.go | GetUsersWithStoredSecrets | func (s *SecretStoreRevokableSecretService) GetUsersWithStoredSecrets(mctx MetaContext) (usernames []string, err error) {
defer mctx.TraceTimed("SecretStoreRevokableSecretService.GetUsersWithStoredSecrets", func() error { return err })()
identifierKeystore := s.identifierKeystore(mctx)
suffixedUsernames, err := identifierKeystore.AllKeys(mctx, identifierKeystoreSuffix)
if err != nil {
return nil, err
}
for _, suffixedUsername := range suffixedUsernames {
usernames = append(usernames, strings.TrimSuffix(suffixedUsername, identifierKeystoreSuffix))
}
return usernames, nil
} | go | func (s *SecretStoreRevokableSecretService) GetUsersWithStoredSecrets(mctx MetaContext) (usernames []string, err error) {
defer mctx.TraceTimed("SecretStoreRevokableSecretService.GetUsersWithStoredSecrets", func() error { return err })()
identifierKeystore := s.identifierKeystore(mctx)
suffixedUsernames, err := identifierKeystore.AllKeys(mctx, identifierKeystoreSuffix)
if err != nil {
return nil, err
}
for _, suffixedUsername := range suffixedUsernames {
usernames = append(usernames, strings.TrimSuffix(suffixedUsername, identifierKeystoreSuffix))
}
return usernames, nil
} | [
"func",
"(",
"s",
"*",
"SecretStoreRevokableSecretService",
")",
"GetUsersWithStoredSecrets",
"(",
"mctx",
"MetaContext",
")",
"(",
"usernames",
"[",
"]",
"string",
",",
"err",
"error",
")",
"{",
"defer",
"mctx",
".",
"TraceTimed",
"(",
"\"",
"\"",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
")",
"(",
")",
"\n",
"identifierKeystore",
":=",
"s",
".",
"identifierKeystore",
"(",
"mctx",
")",
"\n",
"suffixedUsernames",
",",
"err",
":=",
"identifierKeystore",
".",
"AllKeys",
"(",
"mctx",
",",
"identifierKeystoreSuffix",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"for",
"_",
",",
"suffixedUsername",
":=",
"range",
"suffixedUsernames",
"{",
"usernames",
"=",
"append",
"(",
"usernames",
",",
"strings",
".",
"TrimSuffix",
"(",
"suffixedUsername",
",",
"identifierKeystoreSuffix",
")",
")",
"\n",
"}",
"\n",
"return",
"usernames",
",",
"nil",
"\n",
"}"
] | // Note that in the case of corruption, not all of these usernames may actually
// be able to be logged in as due to the noise file being corrupted, the
// keyring being uninstalled, etc. | [
"Note",
"that",
"in",
"the",
"case",
"of",
"corruption",
"not",
"all",
"of",
"these",
"usernames",
"may",
"actually",
"be",
"able",
"to",
"be",
"logged",
"in",
"as",
"due",
"to",
"the",
"noise",
"file",
"being",
"corrupted",
"the",
"keyring",
"being",
"uninstalled",
"etc",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/secret_store_secretservice.go#L269-L280 |
159,253 | keybase/client | go/stellar/build.go | identifyForReview | func identifyForReview(mctx libkb.MetaContext, assertion string,
successCh chan<- struct{},
trackFailCh chan<- struct{},
errCh chan<- error) {
// Goroutines that are blocked on otherwise unreachable channels are not GC'd.
// So use ctx to clean up.
sendSuccess := func() {
mctx.Debug("identifyForReview(%v) -> success", assertion)
select {
case successCh <- struct{}{}:
case <-mctx.Ctx().Done():
}
}
sendTrackFail := func() {
mctx.Debug("identifyForReview(%v) -> fail", assertion)
select {
case trackFailCh <- struct{}{}:
case <-mctx.Ctx().Done():
}
}
sendErr := func(err error) {
mctx.Debug("identifyForReview(%v) -> err %v", assertion, err)
select {
case errCh <- err:
case <-mctx.Ctx().Done():
}
}
mctx.Debug("identifyForReview(%v)", assertion)
reason := fmt.Sprintf("Identify transaction recipient: %s", assertion)
eng := engine.NewResolveThenIdentify2(mctx.G(), &keybase1.Identify2Arg{
UserAssertion: assertion,
CanSuppressUI: true,
NoErrorOnTrackFailure: true, // take heed
Reason: keybase1.IdentifyReason{Reason: reason},
IdentifyBehavior: keybase1.TLFIdentifyBehavior_RESOLVE_AND_CHECK,
})
err := engine.RunEngine2(mctx, eng)
if err != nil {
sendErr(err)
return
}
idRes, err := eng.Result(mctx)
if err != nil {
sendErr(err)
return
}
if idRes == nil {
sendErr(fmt.Errorf("missing identify result"))
return
}
mctx.Debug("identifyForReview: uv: %v", idRes.Upk.Current.ToUserVersion())
if idRes.TrackBreaks != nil {
sendTrackFail()
return
}
sendSuccess()
} | go | func identifyForReview(mctx libkb.MetaContext, assertion string,
successCh chan<- struct{},
trackFailCh chan<- struct{},
errCh chan<- error) {
// Goroutines that are blocked on otherwise unreachable channels are not GC'd.
// So use ctx to clean up.
sendSuccess := func() {
mctx.Debug("identifyForReview(%v) -> success", assertion)
select {
case successCh <- struct{}{}:
case <-mctx.Ctx().Done():
}
}
sendTrackFail := func() {
mctx.Debug("identifyForReview(%v) -> fail", assertion)
select {
case trackFailCh <- struct{}{}:
case <-mctx.Ctx().Done():
}
}
sendErr := func(err error) {
mctx.Debug("identifyForReview(%v) -> err %v", assertion, err)
select {
case errCh <- err:
case <-mctx.Ctx().Done():
}
}
mctx.Debug("identifyForReview(%v)", assertion)
reason := fmt.Sprintf("Identify transaction recipient: %s", assertion)
eng := engine.NewResolveThenIdentify2(mctx.G(), &keybase1.Identify2Arg{
UserAssertion: assertion,
CanSuppressUI: true,
NoErrorOnTrackFailure: true, // take heed
Reason: keybase1.IdentifyReason{Reason: reason},
IdentifyBehavior: keybase1.TLFIdentifyBehavior_RESOLVE_AND_CHECK,
})
err := engine.RunEngine2(mctx, eng)
if err != nil {
sendErr(err)
return
}
idRes, err := eng.Result(mctx)
if err != nil {
sendErr(err)
return
}
if idRes == nil {
sendErr(fmt.Errorf("missing identify result"))
return
}
mctx.Debug("identifyForReview: uv: %v", idRes.Upk.Current.ToUserVersion())
if idRes.TrackBreaks != nil {
sendTrackFail()
return
}
sendSuccess()
} | [
"func",
"identifyForReview",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"assertion",
"string",
",",
"successCh",
"chan",
"<-",
"struct",
"{",
"}",
",",
"trackFailCh",
"chan",
"<-",
"struct",
"{",
"}",
",",
"errCh",
"chan",
"<-",
"error",
")",
"{",
"// Goroutines that are blocked on otherwise unreachable channels are not GC'd.",
"// So use ctx to clean up.",
"sendSuccess",
":=",
"func",
"(",
")",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"assertion",
")",
"\n",
"select",
"{",
"case",
"successCh",
"<-",
"struct",
"{",
"}",
"{",
"}",
":",
"case",
"<-",
"mctx",
".",
"Ctx",
"(",
")",
".",
"Done",
"(",
")",
":",
"}",
"\n",
"}",
"\n",
"sendTrackFail",
":=",
"func",
"(",
")",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"assertion",
")",
"\n",
"select",
"{",
"case",
"trackFailCh",
"<-",
"struct",
"{",
"}",
"{",
"}",
":",
"case",
"<-",
"mctx",
".",
"Ctx",
"(",
")",
".",
"Done",
"(",
")",
":",
"}",
"\n",
"}",
"\n",
"sendErr",
":=",
"func",
"(",
"err",
"error",
")",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"assertion",
",",
"err",
")",
"\n",
"select",
"{",
"case",
"errCh",
"<-",
"err",
":",
"case",
"<-",
"mctx",
".",
"Ctx",
"(",
")",
".",
"Done",
"(",
")",
":",
"}",
"\n",
"}",
"\n\n",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"assertion",
")",
"\n",
"reason",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"assertion",
")",
"\n",
"eng",
":=",
"engine",
".",
"NewResolveThenIdentify2",
"(",
"mctx",
".",
"G",
"(",
")",
",",
"&",
"keybase1",
".",
"Identify2Arg",
"{",
"UserAssertion",
":",
"assertion",
",",
"CanSuppressUI",
":",
"true",
",",
"NoErrorOnTrackFailure",
":",
"true",
",",
"// take heed",
"Reason",
":",
"keybase1",
".",
"IdentifyReason",
"{",
"Reason",
":",
"reason",
"}",
",",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_RESOLVE_AND_CHECK",
",",
"}",
")",
"\n",
"err",
":=",
"engine",
".",
"RunEngine2",
"(",
"mctx",
",",
"eng",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"sendErr",
"(",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"idRes",
",",
"err",
":=",
"eng",
".",
"Result",
"(",
"mctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"sendErr",
"(",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"idRes",
"==",
"nil",
"{",
"sendErr",
"(",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"idRes",
".",
"Upk",
".",
"Current",
".",
"ToUserVersion",
"(",
")",
")",
"\n",
"if",
"idRes",
".",
"TrackBreaks",
"!=",
"nil",
"{",
"sendTrackFail",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"sendSuccess",
"(",
")",
"\n",
"}"
] | // identifyForReview runs identify on a user, looking only for tracking breaks.
// Sends a value to exactly one of the three channels. | [
"identifyForReview",
"runs",
"identify",
"on",
"a",
"user",
"looking",
"only",
"for",
"tracking",
"breaks",
".",
"Sends",
"a",
"value",
"to",
"exactly",
"one",
"of",
"the",
"three",
"channels",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/build.go#L507-L564 |
159,254 | keybase/client | go/stellar/build.go | isFollowingForReview | func isFollowingForReview(mctx libkb.MetaContext, assertion string) (isFollowing bool, err error) {
// The 'following' check blocks sending, and is not that important, so impose a timeout.
var cancel func()
mctx, cancel = mctx.WithTimeout(time.Second * 5)
defer cancel()
err = mctx.G().GetFullSelfer().WithSelf(mctx.Ctx(), func(u *libkb.User) error {
idTable := u.IDTable()
if idTable == nil {
return nil
}
targetUsername := libkb.NewNormalizedUsername(assertion)
for _, track := range idTable.GetTrackList() {
if trackedUsername, err := track.GetTrackedUsername(); err == nil {
if trackedUsername.Eq(targetUsername) {
isFollowing = true
return nil
}
}
}
return nil
})
return isFollowing, err
} | go | func isFollowingForReview(mctx libkb.MetaContext, assertion string) (isFollowing bool, err error) {
// The 'following' check blocks sending, and is not that important, so impose a timeout.
var cancel func()
mctx, cancel = mctx.WithTimeout(time.Second * 5)
defer cancel()
err = mctx.G().GetFullSelfer().WithSelf(mctx.Ctx(), func(u *libkb.User) error {
idTable := u.IDTable()
if idTable == nil {
return nil
}
targetUsername := libkb.NewNormalizedUsername(assertion)
for _, track := range idTable.GetTrackList() {
if trackedUsername, err := track.GetTrackedUsername(); err == nil {
if trackedUsername.Eq(targetUsername) {
isFollowing = true
return nil
}
}
}
return nil
})
return isFollowing, err
} | [
"func",
"isFollowingForReview",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"assertion",
"string",
")",
"(",
"isFollowing",
"bool",
",",
"err",
"error",
")",
"{",
"// The 'following' check blocks sending, and is not that important, so impose a timeout.",
"var",
"cancel",
"func",
"(",
")",
"\n",
"mctx",
",",
"cancel",
"=",
"mctx",
".",
"WithTimeout",
"(",
"time",
".",
"Second",
"*",
"5",
")",
"\n",
"defer",
"cancel",
"(",
")",
"\n",
"err",
"=",
"mctx",
".",
"G",
"(",
")",
".",
"GetFullSelfer",
"(",
")",
".",
"WithSelf",
"(",
"mctx",
".",
"Ctx",
"(",
")",
",",
"func",
"(",
"u",
"*",
"libkb",
".",
"User",
")",
"error",
"{",
"idTable",
":=",
"u",
".",
"IDTable",
"(",
")",
"\n",
"if",
"idTable",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"targetUsername",
":=",
"libkb",
".",
"NewNormalizedUsername",
"(",
"assertion",
")",
"\n",
"for",
"_",
",",
"track",
":=",
"range",
"idTable",
".",
"GetTrackList",
"(",
")",
"{",
"if",
"trackedUsername",
",",
"err",
":=",
"track",
".",
"GetTrackedUsername",
"(",
")",
";",
"err",
"==",
"nil",
"{",
"if",
"trackedUsername",
".",
"Eq",
"(",
"targetUsername",
")",
"{",
"isFollowing",
"=",
"true",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"return",
"isFollowing",
",",
"err",
"\n",
"}"
] | // Whether the logged-in user following the recipient.
// Unresolved assertions will false negative. | [
"Whether",
"the",
"logged",
"-",
"in",
"user",
"following",
"the",
"recipient",
".",
"Unresolved",
"assertions",
"will",
"false",
"negative",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/build.go#L568-L590 |
159,255 | keybase/client | go/stellar/build.go | SubtractFeeSoft | func SubtractFeeSoft(mctx libkb.MetaContext, availableStr string, baseFee uint64) string {
available, err := stellarnet.ParseStellarAmount(availableStr)
if err != nil {
mctx.Debug("error parsing available balance: %v", err)
return availableStr
}
available -= int64(baseFee)
if available < 0 {
available = 0
}
return stellarnet.StringFromStellarAmount(available)
} | go | func SubtractFeeSoft(mctx libkb.MetaContext, availableStr string, baseFee uint64) string {
available, err := stellarnet.ParseStellarAmount(availableStr)
if err != nil {
mctx.Debug("error parsing available balance: %v", err)
return availableStr
}
available -= int64(baseFee)
if available < 0 {
available = 0
}
return stellarnet.StringFromStellarAmount(available)
} | [
"func",
"SubtractFeeSoft",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"availableStr",
"string",
",",
"baseFee",
"uint64",
")",
"string",
"{",
"available",
",",
"err",
":=",
"stellarnet",
".",
"ParseStellarAmount",
"(",
"availableStr",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"availableStr",
"\n",
"}",
"\n",
"available",
"-=",
"int64",
"(",
"baseFee",
")",
"\n",
"if",
"available",
"<",
"0",
"{",
"available",
"=",
"0",
"\n",
"}",
"\n",
"return",
"stellarnet",
".",
"StringFromStellarAmount",
"(",
"available",
")",
"\n",
"}"
] | // Subtract baseFee from the available balance.
// This shows the real available balance assuming an intent to send a 1 op tx.
// Does not error out, just shows the inaccurate answer. | [
"Subtract",
"baseFee",
"from",
"the",
"available",
"balance",
".",
"This",
"shows",
"the",
"real",
"available",
"balance",
"assuming",
"an",
"intent",
"to",
"send",
"a",
"1",
"op",
"tx",
".",
"Does",
"not",
"error",
"out",
"just",
"shows",
"the",
"inaccurate",
"answer",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/build.go#L892-L903 |
159,256 | keybase/client | go/stellar/build.go | CheckReadyToSend | func (b *buildPaymentData) CheckReadyToSend(arg stellar1.SendPaymentLocalArg) error {
if !b.ReadyToSend {
if !b.ReadyToReview {
// Payment is not even ready for review.
return fmt.Errorf("this payment is not ready to send")
}
// Payment is ready to review but has not been reviewed.
return fmt.Errorf("this payment has not been reviewed")
}
if b.Frozen == nil {
return fmt.Errorf("payment is ready to send but missing frozen values")
}
if !arg.From.Eq(b.Frozen.From) {
return fmt.Errorf("mismatched from account: %v != %v", arg.From, b.Frozen.From)
}
if arg.To != b.Frozen.To {
return fmt.Errorf("mismatched recipient: %v != %v", arg.To, b.Frozen.To)
}
if arg.ToIsAccountID != b.Frozen.ToIsAccountID {
return fmt.Errorf("mismatches account ID type (expected %v)", b.Frozen.ToIsAccountID)
}
// Check the true amount and asset that will be sent.
// Don't bother checking the display worth. It's finicky and the server does a coarse check.
if arg.Amount != b.Frozen.Amount {
return fmt.Errorf("mismatched amount: %v != %v", arg.Amount, b.Frozen.Amount)
}
if !arg.Asset.SameAsset(b.Frozen.Asset) {
return fmt.Errorf("mismatched asset: %v != %v", arg.Asset, b.Frozen.Asset)
}
return nil
} | go | func (b *buildPaymentData) CheckReadyToSend(arg stellar1.SendPaymentLocalArg) error {
if !b.ReadyToSend {
if !b.ReadyToReview {
// Payment is not even ready for review.
return fmt.Errorf("this payment is not ready to send")
}
// Payment is ready to review but has not been reviewed.
return fmt.Errorf("this payment has not been reviewed")
}
if b.Frozen == nil {
return fmt.Errorf("payment is ready to send but missing frozen values")
}
if !arg.From.Eq(b.Frozen.From) {
return fmt.Errorf("mismatched from account: %v != %v", arg.From, b.Frozen.From)
}
if arg.To != b.Frozen.To {
return fmt.Errorf("mismatched recipient: %v != %v", arg.To, b.Frozen.To)
}
if arg.ToIsAccountID != b.Frozen.ToIsAccountID {
return fmt.Errorf("mismatches account ID type (expected %v)", b.Frozen.ToIsAccountID)
}
// Check the true amount and asset that will be sent.
// Don't bother checking the display worth. It's finicky and the server does a coarse check.
if arg.Amount != b.Frozen.Amount {
return fmt.Errorf("mismatched amount: %v != %v", arg.Amount, b.Frozen.Amount)
}
if !arg.Asset.SameAsset(b.Frozen.Asset) {
return fmt.Errorf("mismatched asset: %v != %v", arg.Asset, b.Frozen.Asset)
}
return nil
} | [
"func",
"(",
"b",
"*",
"buildPaymentData",
")",
"CheckReadyToSend",
"(",
"arg",
"stellar1",
".",
"SendPaymentLocalArg",
")",
"error",
"{",
"if",
"!",
"b",
".",
"ReadyToSend",
"{",
"if",
"!",
"b",
".",
"ReadyToReview",
"{",
"// Payment is not even ready for review.",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"// Payment is ready to review but has not been reviewed.",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"b",
".",
"Frozen",
"==",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"!",
"arg",
".",
"From",
".",
"Eq",
"(",
"b",
".",
"Frozen",
".",
"From",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"arg",
".",
"From",
",",
"b",
".",
"Frozen",
".",
"From",
")",
"\n",
"}",
"\n",
"if",
"arg",
".",
"To",
"!=",
"b",
".",
"Frozen",
".",
"To",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"arg",
".",
"To",
",",
"b",
".",
"Frozen",
".",
"To",
")",
"\n",
"}",
"\n",
"if",
"arg",
".",
"ToIsAccountID",
"!=",
"b",
".",
"Frozen",
".",
"ToIsAccountID",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"b",
".",
"Frozen",
".",
"ToIsAccountID",
")",
"\n",
"}",
"\n",
"// Check the true amount and asset that will be sent.",
"// Don't bother checking the display worth. It's finicky and the server does a coarse check.",
"if",
"arg",
".",
"Amount",
"!=",
"b",
".",
"Frozen",
".",
"Amount",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"arg",
".",
"Amount",
",",
"b",
".",
"Frozen",
".",
"Amount",
")",
"\n",
"}",
"\n",
"if",
"!",
"arg",
".",
"Asset",
".",
"SameAsset",
"(",
"b",
".",
"Frozen",
".",
"Asset",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"arg",
".",
"Asset",
",",
"b",
".",
"Frozen",
".",
"Asset",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Ready decides whether the frozen payment has been prechecked and
// the Send request matches it. | [
"Ready",
"decides",
"whether",
"the",
"frozen",
"payment",
"has",
"been",
"prechecked",
"and",
"the",
"Send",
"request",
"matches",
"it",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/stellar/build.go#L945-L975 |
159,257 | keybase/client | go/kbfs/libfs/tlf_edit_history_file.go | GetEncodedTlfEditHistory | func GetEncodedTlfEditHistory(ctx context.Context, config libkbfs.Config,
folderBranch data.FolderBranch) (
data []byte, t time.Time, err error) {
edits, err := config.KBFSOps().GetEditHistory(ctx, folderBranch)
if err != nil {
return nil, time.Time{}, err
}
data, err = PrettyJSON(edits)
return data, time.Time{}, err
} | go | func GetEncodedTlfEditHistory(ctx context.Context, config libkbfs.Config,
folderBranch data.FolderBranch) (
data []byte, t time.Time, err error) {
edits, err := config.KBFSOps().GetEditHistory(ctx, folderBranch)
if err != nil {
return nil, time.Time{}, err
}
data, err = PrettyJSON(edits)
return data, time.Time{}, err
} | [
"func",
"GetEncodedTlfEditHistory",
"(",
"ctx",
"context",
".",
"Context",
",",
"config",
"libkbfs",
".",
"Config",
",",
"folderBranch",
"data",
".",
"FolderBranch",
")",
"(",
"data",
"[",
"]",
"byte",
",",
"t",
"time",
".",
"Time",
",",
"err",
"error",
")",
"{",
"edits",
",",
"err",
":=",
"config",
".",
"KBFSOps",
"(",
")",
".",
"GetEditHistory",
"(",
"ctx",
",",
"folderBranch",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"time",
".",
"Time",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"data",
",",
"err",
"=",
"PrettyJSON",
"(",
"edits",
")",
"\n",
"return",
"data",
",",
"time",
".",
"Time",
"{",
"}",
",",
"err",
"\n",
"}"
] | // GetEncodedTlfEditHistory returns serialized JSON containing the
// file edit history for a folder. | [
"GetEncodedTlfEditHistory",
"returns",
"serialized",
"JSON",
"containing",
"the",
"file",
"edit",
"history",
"for",
"a",
"folder",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/tlf_edit_history_file.go#L17-L27 |
159,258 | keybase/client | go/kbfs/libkbfs/crypto_common.go | MakeCryptoCommon | func MakeCryptoCommon(
codec kbfscodec.Codec,
blockCryptVersioner blockCryptVersioner) CryptoCommon {
return CryptoCommon{codec, blockCryptVersioner}
} | go | func MakeCryptoCommon(
codec kbfscodec.Codec,
blockCryptVersioner blockCryptVersioner) CryptoCommon {
return CryptoCommon{codec, blockCryptVersioner}
} | [
"func",
"MakeCryptoCommon",
"(",
"codec",
"kbfscodec",
".",
"Codec",
",",
"blockCryptVersioner",
"blockCryptVersioner",
")",
"CryptoCommon",
"{",
"return",
"CryptoCommon",
"{",
"codec",
",",
"blockCryptVersioner",
"}",
"\n",
"}"
] | // MakeCryptoCommon returns a default CryptoCommon object. | [
"MakeCryptoCommon",
"returns",
"a",
"default",
"CryptoCommon",
"object",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L30-L34 |
159,259 | keybase/client | go/kbfs/libkbfs/crypto_common.go | MakeRandomTlfID | func (c CryptoCommon) MakeRandomTlfID(t tlf.Type) (tlf.ID, error) {
return tlf.MakeRandomID(t)
} | go | func (c CryptoCommon) MakeRandomTlfID(t tlf.Type) (tlf.ID, error) {
return tlf.MakeRandomID(t)
} | [
"func",
"(",
"c",
"CryptoCommon",
")",
"MakeRandomTlfID",
"(",
"t",
"tlf",
".",
"Type",
")",
"(",
"tlf",
".",
"ID",
",",
"error",
")",
"{",
"return",
"tlf",
".",
"MakeRandomID",
"(",
"t",
")",
"\n",
"}"
] | // MakeRandomTlfID implements the Crypto interface for CryptoCommon. | [
"MakeRandomTlfID",
"implements",
"the",
"Crypto",
"interface",
"for",
"CryptoCommon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L37-L39 |
159,260 | keybase/client | go/kbfs/libkbfs/crypto_common.go | MakeBlockRefNonce | func (c CryptoCommon) MakeBlockRefNonce() (nonce kbfsblock.RefNonce, err error) {
return kbfsblock.MakeRefNonce()
} | go | func (c CryptoCommon) MakeBlockRefNonce() (nonce kbfsblock.RefNonce, err error) {
return kbfsblock.MakeRefNonce()
} | [
"func",
"(",
"c",
"CryptoCommon",
")",
"MakeBlockRefNonce",
"(",
")",
"(",
"nonce",
"kbfsblock",
".",
"RefNonce",
",",
"err",
"error",
")",
"{",
"return",
"kbfsblock",
".",
"MakeRefNonce",
"(",
")",
"\n",
"}"
] | // MakeBlockRefNonce implements the Crypto interface for CryptoCommon. | [
"MakeBlockRefNonce",
"implements",
"the",
"Crypto",
"interface",
"for",
"CryptoCommon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L52-L54 |
159,261 | keybase/client | go/kbfs/libkbfs/crypto_common.go | MakeRandomTLFEphemeralKeys | func (c CryptoCommon) MakeRandomTLFEphemeralKeys() (
kbfscrypto.TLFEphemeralPublicKey, kbfscrypto.TLFEphemeralPrivateKey,
error) {
return kbfscrypto.MakeRandomTLFEphemeralKeys()
} | go | func (c CryptoCommon) MakeRandomTLFEphemeralKeys() (
kbfscrypto.TLFEphemeralPublicKey, kbfscrypto.TLFEphemeralPrivateKey,
error) {
return kbfscrypto.MakeRandomTLFEphemeralKeys()
} | [
"func",
"(",
"c",
"CryptoCommon",
")",
"MakeRandomTLFEphemeralKeys",
"(",
")",
"(",
"kbfscrypto",
".",
"TLFEphemeralPublicKey",
",",
"kbfscrypto",
".",
"TLFEphemeralPrivateKey",
",",
"error",
")",
"{",
"return",
"kbfscrypto",
".",
"MakeRandomTLFEphemeralKeys",
"(",
")",
"\n",
"}"
] | // MakeRandomTLFEphemeralKeys implements the Crypto interface for
// CryptoCommon. | [
"MakeRandomTLFEphemeralKeys",
"implements",
"the",
"Crypto",
"interface",
"for",
"CryptoCommon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L65-L69 |
159,262 | keybase/client | go/kbfs/libkbfs/crypto_common.go | MakeRandomTLFKeys | func (c CryptoCommon) MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey,
kbfscrypto.TLFPrivateKey, kbfscrypto.TLFCryptKey, error) {
publicKey, privateKey, err := box.GenerateKey(rand.Reader)
if err != nil {
return kbfscrypto.TLFPublicKey{}, kbfscrypto.TLFPrivateKey{},
kbfscrypto.TLFCryptKey{}, errors.WithStack(err)
}
pubKey := kbfscrypto.MakeTLFPublicKey(*publicKey)
privKey := kbfscrypto.MakeTLFPrivateKey(*privateKey)
cryptKey, err := kbfscrypto.MakeRandomTLFCryptKey()
if err != nil {
return kbfscrypto.TLFPublicKey{}, kbfscrypto.TLFPrivateKey{},
kbfscrypto.TLFCryptKey{}, err
}
return pubKey, privKey, cryptKey, nil
} | go | func (c CryptoCommon) MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey,
kbfscrypto.TLFPrivateKey, kbfscrypto.TLFCryptKey, error) {
publicKey, privateKey, err := box.GenerateKey(rand.Reader)
if err != nil {
return kbfscrypto.TLFPublicKey{}, kbfscrypto.TLFPrivateKey{},
kbfscrypto.TLFCryptKey{}, errors.WithStack(err)
}
pubKey := kbfscrypto.MakeTLFPublicKey(*publicKey)
privKey := kbfscrypto.MakeTLFPrivateKey(*privateKey)
cryptKey, err := kbfscrypto.MakeRandomTLFCryptKey()
if err != nil {
return kbfscrypto.TLFPublicKey{}, kbfscrypto.TLFPrivateKey{},
kbfscrypto.TLFCryptKey{}, err
}
return pubKey, privKey, cryptKey, nil
} | [
"func",
"(",
"c",
"CryptoCommon",
")",
"MakeRandomTLFKeys",
"(",
")",
"(",
"kbfscrypto",
".",
"TLFPublicKey",
",",
"kbfscrypto",
".",
"TLFPrivateKey",
",",
"kbfscrypto",
".",
"TLFCryptKey",
",",
"error",
")",
"{",
"publicKey",
",",
"privateKey",
",",
"err",
":=",
"box",
".",
"GenerateKey",
"(",
"rand",
".",
"Reader",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"kbfscrypto",
".",
"TLFPublicKey",
"{",
"}",
",",
"kbfscrypto",
".",
"TLFPrivateKey",
"{",
"}",
",",
"kbfscrypto",
".",
"TLFCryptKey",
"{",
"}",
",",
"errors",
".",
"WithStack",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"pubKey",
":=",
"kbfscrypto",
".",
"MakeTLFPublicKey",
"(",
"*",
"publicKey",
")",
"\n",
"privKey",
":=",
"kbfscrypto",
".",
"MakeTLFPrivateKey",
"(",
"*",
"privateKey",
")",
"\n\n",
"cryptKey",
",",
"err",
":=",
"kbfscrypto",
".",
"MakeRandomTLFCryptKey",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"kbfscrypto",
".",
"TLFPublicKey",
"{",
"}",
",",
"kbfscrypto",
".",
"TLFPrivateKey",
"{",
"}",
",",
"kbfscrypto",
".",
"TLFCryptKey",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"pubKey",
",",
"privKey",
",",
"cryptKey",
",",
"nil",
"\n",
"}"
] | // MakeRandomTLFKeys implements the Crypto interface for CryptoCommon. | [
"MakeRandomTLFKeys",
"implements",
"the",
"Crypto",
"interface",
"for",
"CryptoCommon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L72-L90 |
159,263 | keybase/client | go/kbfs/libkbfs/crypto_common.go | EncryptPrivateMetadata | func (c CryptoCommon) EncryptPrivateMetadata(
pmd PrivateMetadata, key kbfscrypto.TLFCryptKey) (
encryptedPmd kbfscrypto.EncryptedPrivateMetadata, err error) {
encodedPmd, err := c.codec.Encode(pmd)
if err != nil {
return kbfscrypto.EncryptedPrivateMetadata{}, err
}
return kbfscrypto.EncryptEncodedPrivateMetadata(encodedPmd, key)
} | go | func (c CryptoCommon) EncryptPrivateMetadata(
pmd PrivateMetadata, key kbfscrypto.TLFCryptKey) (
encryptedPmd kbfscrypto.EncryptedPrivateMetadata, err error) {
encodedPmd, err := c.codec.Encode(pmd)
if err != nil {
return kbfscrypto.EncryptedPrivateMetadata{}, err
}
return kbfscrypto.EncryptEncodedPrivateMetadata(encodedPmd, key)
} | [
"func",
"(",
"c",
"CryptoCommon",
")",
"EncryptPrivateMetadata",
"(",
"pmd",
"PrivateMetadata",
",",
"key",
"kbfscrypto",
".",
"TLFCryptKey",
")",
"(",
"encryptedPmd",
"kbfscrypto",
".",
"EncryptedPrivateMetadata",
",",
"err",
"error",
")",
"{",
"encodedPmd",
",",
"err",
":=",
"c",
".",
"codec",
".",
"Encode",
"(",
"pmd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"kbfscrypto",
".",
"EncryptedPrivateMetadata",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"kbfscrypto",
".",
"EncryptEncodedPrivateMetadata",
"(",
"encodedPmd",
",",
"key",
")",
"\n",
"}"
] | // EncryptPrivateMetadata implements the Crypto interface for CryptoCommon. | [
"EncryptPrivateMetadata",
"implements",
"the",
"Crypto",
"interface",
"for",
"CryptoCommon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L93-L102 |
159,264 | keybase/client | go/kbfs/libkbfs/crypto_common.go | DecryptPrivateMetadata | func (c CryptoCommon) DecryptPrivateMetadata(
encryptedPmd kbfscrypto.EncryptedPrivateMetadata, key kbfscrypto.TLFCryptKey) (
PrivateMetadata, error) {
encodedPrivateMetadata, err := kbfscrypto.DecryptPrivateMetadata(
encryptedPmd, key)
if err != nil {
return PrivateMetadata{}, err
}
var pmd PrivateMetadata
err = c.codec.Decode(encodedPrivateMetadata, &pmd)
if err != nil {
return PrivateMetadata{}, err
}
return pmd, nil
} | go | func (c CryptoCommon) DecryptPrivateMetadata(
encryptedPmd kbfscrypto.EncryptedPrivateMetadata, key kbfscrypto.TLFCryptKey) (
PrivateMetadata, error) {
encodedPrivateMetadata, err := kbfscrypto.DecryptPrivateMetadata(
encryptedPmd, key)
if err != nil {
return PrivateMetadata{}, err
}
var pmd PrivateMetadata
err = c.codec.Decode(encodedPrivateMetadata, &pmd)
if err != nil {
return PrivateMetadata{}, err
}
return pmd, nil
} | [
"func",
"(",
"c",
"CryptoCommon",
")",
"DecryptPrivateMetadata",
"(",
"encryptedPmd",
"kbfscrypto",
".",
"EncryptedPrivateMetadata",
",",
"key",
"kbfscrypto",
".",
"TLFCryptKey",
")",
"(",
"PrivateMetadata",
",",
"error",
")",
"{",
"encodedPrivateMetadata",
",",
"err",
":=",
"kbfscrypto",
".",
"DecryptPrivateMetadata",
"(",
"encryptedPmd",
",",
"key",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"PrivateMetadata",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"var",
"pmd",
"PrivateMetadata",
"\n",
"err",
"=",
"c",
".",
"codec",
".",
"Decode",
"(",
"encodedPrivateMetadata",
",",
"&",
"pmd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"PrivateMetadata",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"pmd",
",",
"nil",
"\n",
"}"
] | // DecryptPrivateMetadata implements the Crypto interface for CryptoCommon. | [
"DecryptPrivateMetadata",
"implements",
"the",
"Crypto",
"interface",
"for",
"CryptoCommon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L105-L121 |
159,265 | keybase/client | go/kbfs/libkbfs/crypto_common.go | EncryptBlock | func (c CryptoCommon) EncryptBlock(
block data.Block, tlfCryptKey kbfscrypto.TLFCryptKey,
blockServerHalf kbfscrypto.BlockCryptKeyServerHalf) (
plainSize int, encryptedBlock kbfscrypto.EncryptedBlock, err error) {
encodedBlock, err := c.codec.Encode(block)
if err != nil {
return -1, kbfscrypto.EncryptedBlock{}, err
}
paddedBlock, err := kbfscrypto.PadBlock(encodedBlock)
if err != nil {
return -1, kbfscrypto.EncryptedBlock{}, err
}
encryptedBlock, err =
kbfscrypto.EncryptPaddedEncodedBlock(
paddedBlock, tlfCryptKey, blockServerHalf,
c.blockCryptVersioner.BlockCryptVersion())
if err != nil {
return -1, kbfscrypto.EncryptedBlock{}, err
}
plainSize = len(encodedBlock)
return plainSize, encryptedBlock, nil
} | go | func (c CryptoCommon) EncryptBlock(
block data.Block, tlfCryptKey kbfscrypto.TLFCryptKey,
blockServerHalf kbfscrypto.BlockCryptKeyServerHalf) (
plainSize int, encryptedBlock kbfscrypto.EncryptedBlock, err error) {
encodedBlock, err := c.codec.Encode(block)
if err != nil {
return -1, kbfscrypto.EncryptedBlock{}, err
}
paddedBlock, err := kbfscrypto.PadBlock(encodedBlock)
if err != nil {
return -1, kbfscrypto.EncryptedBlock{}, err
}
encryptedBlock, err =
kbfscrypto.EncryptPaddedEncodedBlock(
paddedBlock, tlfCryptKey, blockServerHalf,
c.blockCryptVersioner.BlockCryptVersion())
if err != nil {
return -1, kbfscrypto.EncryptedBlock{}, err
}
plainSize = len(encodedBlock)
return plainSize, encryptedBlock, nil
} | [
"func",
"(",
"c",
"CryptoCommon",
")",
"EncryptBlock",
"(",
"block",
"data",
".",
"Block",
",",
"tlfCryptKey",
"kbfscrypto",
".",
"TLFCryptKey",
",",
"blockServerHalf",
"kbfscrypto",
".",
"BlockCryptKeyServerHalf",
")",
"(",
"plainSize",
"int",
",",
"encryptedBlock",
"kbfscrypto",
".",
"EncryptedBlock",
",",
"err",
"error",
")",
"{",
"encodedBlock",
",",
"err",
":=",
"c",
".",
"codec",
".",
"Encode",
"(",
"block",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"-",
"1",
",",
"kbfscrypto",
".",
"EncryptedBlock",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"paddedBlock",
",",
"err",
":=",
"kbfscrypto",
".",
"PadBlock",
"(",
"encodedBlock",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"-",
"1",
",",
"kbfscrypto",
".",
"EncryptedBlock",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"encryptedBlock",
",",
"err",
"=",
"kbfscrypto",
".",
"EncryptPaddedEncodedBlock",
"(",
"paddedBlock",
",",
"tlfCryptKey",
",",
"blockServerHalf",
",",
"c",
".",
"blockCryptVersioner",
".",
"BlockCryptVersion",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"-",
"1",
",",
"kbfscrypto",
".",
"EncryptedBlock",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"plainSize",
"=",
"len",
"(",
"encodedBlock",
")",
"\n",
"return",
"plainSize",
",",
"encryptedBlock",
",",
"nil",
"\n",
"}"
] | // EncryptBlock implements the Crypto interface for CryptoCommon. | [
"EncryptBlock",
"implements",
"the",
"Crypto",
"interface",
"for",
"CryptoCommon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L124-L148 |
159,266 | keybase/client | go/kbfs/libkbfs/crypto_common.go | DecryptBlock | func (c CryptoCommon) DecryptBlock(
encryptedBlock kbfscrypto.EncryptedBlock,
tlfCryptKey kbfscrypto.TLFCryptKey,
blockServerHalf kbfscrypto.BlockCryptKeyServerHalf, block data.Block) error {
var paddedBlock []byte
paddedBlock, err := kbfscrypto.DecryptBlock(
encryptedBlock, tlfCryptKey, blockServerHalf)
if err != nil {
return err
}
encodedBlock, err := kbfscrypto.DepadBlock(paddedBlock)
if err != nil {
return err
}
err = c.codec.Decode(encodedBlock, &block)
if err != nil {
return errors.WithStack(BlockDecodeError{err})
}
return nil
} | go | func (c CryptoCommon) DecryptBlock(
encryptedBlock kbfscrypto.EncryptedBlock,
tlfCryptKey kbfscrypto.TLFCryptKey,
blockServerHalf kbfscrypto.BlockCryptKeyServerHalf, block data.Block) error {
var paddedBlock []byte
paddedBlock, err := kbfscrypto.DecryptBlock(
encryptedBlock, tlfCryptKey, blockServerHalf)
if err != nil {
return err
}
encodedBlock, err := kbfscrypto.DepadBlock(paddedBlock)
if err != nil {
return err
}
err = c.codec.Decode(encodedBlock, &block)
if err != nil {
return errors.WithStack(BlockDecodeError{err})
}
return nil
} | [
"func",
"(",
"c",
"CryptoCommon",
")",
"DecryptBlock",
"(",
"encryptedBlock",
"kbfscrypto",
".",
"EncryptedBlock",
",",
"tlfCryptKey",
"kbfscrypto",
".",
"TLFCryptKey",
",",
"blockServerHalf",
"kbfscrypto",
".",
"BlockCryptKeyServerHalf",
",",
"block",
"data",
".",
"Block",
")",
"error",
"{",
"var",
"paddedBlock",
"[",
"]",
"byte",
"\n",
"paddedBlock",
",",
"err",
":=",
"kbfscrypto",
".",
"DecryptBlock",
"(",
"encryptedBlock",
",",
"tlfCryptKey",
",",
"blockServerHalf",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"encodedBlock",
",",
"err",
":=",
"kbfscrypto",
".",
"DepadBlock",
"(",
"paddedBlock",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"err",
"=",
"c",
".",
"codec",
".",
"Decode",
"(",
"encodedBlock",
",",
"&",
"block",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"errors",
".",
"WithStack",
"(",
"BlockDecodeError",
"{",
"err",
"}",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // DecryptBlock implements the Crypto interface for CryptoCommon. | [
"DecryptBlock",
"implements",
"the",
"Crypto",
"interface",
"for",
"CryptoCommon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/crypto_common.go#L151-L172 |
159,267 | keybase/client | go/chat/convsource.go | patchPaginationLast | func (s *baseConversationSource) patchPaginationLast(ctx context.Context, conv types.UnboxConversationInfo, uid gregor1.UID,
page *chat1.Pagination, msgs []chat1.MessageUnboxed) {
if page == nil || page.Last {
return
}
if len(msgs) == 0 {
s.Debug(ctx, "patchPaginationLast: true - no msgs")
page.Last = true
return
}
expunge := conv.GetExpunge()
if expunge == nil {
s.Debug(ctx, "patchPaginationLast: no expunge info")
return
}
end1 := msgs[0].GetMessageID()
end2 := msgs[len(msgs)-1].GetMessageID()
if end1.Min(end2) <= expunge.Upto {
s.Debug(ctx, "patchPaginationLast: true - hit upto")
// If any message is prior to the nukepoint, say this is the last page.
page.Last = true
}
} | go | func (s *baseConversationSource) patchPaginationLast(ctx context.Context, conv types.UnboxConversationInfo, uid gregor1.UID,
page *chat1.Pagination, msgs []chat1.MessageUnboxed) {
if page == nil || page.Last {
return
}
if len(msgs) == 0 {
s.Debug(ctx, "patchPaginationLast: true - no msgs")
page.Last = true
return
}
expunge := conv.GetExpunge()
if expunge == nil {
s.Debug(ctx, "patchPaginationLast: no expunge info")
return
}
end1 := msgs[0].GetMessageID()
end2 := msgs[len(msgs)-1].GetMessageID()
if end1.Min(end2) <= expunge.Upto {
s.Debug(ctx, "patchPaginationLast: true - hit upto")
// If any message is prior to the nukepoint, say this is the last page.
page.Last = true
}
} | [
"func",
"(",
"s",
"*",
"baseConversationSource",
")",
"patchPaginationLast",
"(",
"ctx",
"context",
".",
"Context",
",",
"conv",
"types",
".",
"UnboxConversationInfo",
",",
"uid",
"gregor1",
".",
"UID",
",",
"page",
"*",
"chat1",
".",
"Pagination",
",",
"msgs",
"[",
"]",
"chat1",
".",
"MessageUnboxed",
")",
"{",
"if",
"page",
"==",
"nil",
"||",
"page",
".",
"Last",
"{",
"return",
"\n",
"}",
"\n",
"if",
"len",
"(",
"msgs",
")",
"==",
"0",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"page",
".",
"Last",
"=",
"true",
"\n",
"return",
"\n",
"}",
"\n",
"expunge",
":=",
"conv",
".",
"GetExpunge",
"(",
")",
"\n",
"if",
"expunge",
"==",
"nil",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"return",
"\n",
"}",
"\n",
"end1",
":=",
"msgs",
"[",
"0",
"]",
".",
"GetMessageID",
"(",
")",
"\n",
"end2",
":=",
"msgs",
"[",
"len",
"(",
"msgs",
")",
"-",
"1",
"]",
".",
"GetMessageID",
"(",
")",
"\n",
"if",
"end1",
".",
"Min",
"(",
"end2",
")",
"<=",
"expunge",
".",
"Upto",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"// If any message is prior to the nukepoint, say this is the last page.",
"page",
".",
"Last",
"=",
"true",
"\n",
"}",
"\n",
"}"
] | // patchPaginationLast turns on page.Last if the messages are before InboxSource's view of Expunge. | [
"patchPaginationLast",
"turns",
"on",
"page",
".",
"Last",
"if",
"the",
"messages",
"are",
"before",
"InboxSource",
"s",
"view",
"of",
"Expunge",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/convsource.go#L163-L185 |
159,268 | keybase/client | go/chat/convsource.go | completeAttachmentUpload | func (s *HybridConversationSource) completeAttachmentUpload(ctx context.Context, msg chat1.MessageUnboxed) {
if msg.GetMessageType() == chat1.MessageType_ATTACHMENT {
outboxID := msg.OutboxID()
if outboxID != nil {
s.G().AttachmentUploader.Complete(ctx, *outboxID)
}
}
} | go | func (s *HybridConversationSource) completeAttachmentUpload(ctx context.Context, msg chat1.MessageUnboxed) {
if msg.GetMessageType() == chat1.MessageType_ATTACHMENT {
outboxID := msg.OutboxID()
if outboxID != nil {
s.G().AttachmentUploader.Complete(ctx, *outboxID)
}
}
} | [
"func",
"(",
"s",
"*",
"HybridConversationSource",
")",
"completeAttachmentUpload",
"(",
"ctx",
"context",
".",
"Context",
",",
"msg",
"chat1",
".",
"MessageUnboxed",
")",
"{",
"if",
"msg",
".",
"GetMessageType",
"(",
")",
"==",
"chat1",
".",
"MessageType_ATTACHMENT",
"{",
"outboxID",
":=",
"msg",
".",
"OutboxID",
"(",
")",
"\n",
"if",
"outboxID",
"!=",
"nil",
"{",
"s",
".",
"G",
"(",
")",
".",
"AttachmentUploader",
".",
"Complete",
"(",
"ctx",
",",
"*",
"outboxID",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // completeAttachmentUpload removes any attachment previews from pending preview storage | [
"completeAttachmentUpload",
"removes",
"any",
"attachment",
"previews",
"from",
"pending",
"preview",
"storage"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/convsource.go#L418-L425 |
159,269 | keybase/client | go/chat/convsource.go | notifyReactionUpdates | func (s *HybridConversationSource) notifyReactionUpdates(ctx context.Context, uid gregor1.UID,
convID chat1.ConversationID, msgs []chat1.MessageUnboxed) {
s.Debug(ctx, "notifyReactionUpdates: %d msgs to update", len(msgs))
if len(msgs) > 0 {
conv, err := utils.GetVerifiedConv(ctx, s.G(), uid, convID, types.InboxSourceDataSourceAll)
if err != nil {
s.Debug(ctx, "notifyReactionUpdates: failed to get conversations: %s", err)
return
}
msgs, err = s.TransformSupersedes(ctx, conv, uid, msgs)
if err != nil {
s.Debug(ctx, "notifyReactionUpdates: failed to transform supersedes: %s", err)
return
}
reactionUpdates := []chat1.ReactionUpdate{}
for _, msg := range msgs {
if msg.IsValid() {
reactionUpdates = append(reactionUpdates, chat1.ReactionUpdate{
Reactions: msg.Valid().Reactions,
TargetMsgID: msg.GetMessageID(),
})
}
}
if len(reactionUpdates) > 0 {
userReacjis := storage.NewReacjiStore(s.G()).UserReacjis(ctx, uid)
activity := chat1.NewChatActivityWithReactionUpdate(chat1.ReactionUpdateNotif{
UserReacjis: userReacjis,
ReactionUpdates: reactionUpdates,
ConvID: convID,
})
s.G().ActivityNotifier.Activity(ctx, uid, conv.GetTopicType(), &activity,
chat1.ChatActivitySource_LOCAL)
}
}
} | go | func (s *HybridConversationSource) notifyReactionUpdates(ctx context.Context, uid gregor1.UID,
convID chat1.ConversationID, msgs []chat1.MessageUnboxed) {
s.Debug(ctx, "notifyReactionUpdates: %d msgs to update", len(msgs))
if len(msgs) > 0 {
conv, err := utils.GetVerifiedConv(ctx, s.G(), uid, convID, types.InboxSourceDataSourceAll)
if err != nil {
s.Debug(ctx, "notifyReactionUpdates: failed to get conversations: %s", err)
return
}
msgs, err = s.TransformSupersedes(ctx, conv, uid, msgs)
if err != nil {
s.Debug(ctx, "notifyReactionUpdates: failed to transform supersedes: %s", err)
return
}
reactionUpdates := []chat1.ReactionUpdate{}
for _, msg := range msgs {
if msg.IsValid() {
reactionUpdates = append(reactionUpdates, chat1.ReactionUpdate{
Reactions: msg.Valid().Reactions,
TargetMsgID: msg.GetMessageID(),
})
}
}
if len(reactionUpdates) > 0 {
userReacjis := storage.NewReacjiStore(s.G()).UserReacjis(ctx, uid)
activity := chat1.NewChatActivityWithReactionUpdate(chat1.ReactionUpdateNotif{
UserReacjis: userReacjis,
ReactionUpdates: reactionUpdates,
ConvID: convID,
})
s.G().ActivityNotifier.Activity(ctx, uid, conv.GetTopicType(), &activity,
chat1.ChatActivitySource_LOCAL)
}
}
} | [
"func",
"(",
"s",
"*",
"HybridConversationSource",
")",
"notifyReactionUpdates",
"(",
"ctx",
"context",
".",
"Context",
",",
"uid",
"gregor1",
".",
"UID",
",",
"convID",
"chat1",
".",
"ConversationID",
",",
"msgs",
"[",
"]",
"chat1",
".",
"MessageUnboxed",
")",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
",",
"len",
"(",
"msgs",
")",
")",
"\n",
"if",
"len",
"(",
"msgs",
")",
">",
"0",
"{",
"conv",
",",
"err",
":=",
"utils",
".",
"GetVerifiedConv",
"(",
"ctx",
",",
"s",
".",
"G",
"(",
")",
",",
"uid",
",",
"convID",
",",
"types",
".",
"InboxSourceDataSourceAll",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"msgs",
",",
"err",
"=",
"s",
".",
"TransformSupersedes",
"(",
"ctx",
",",
"conv",
",",
"uid",
",",
"msgs",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"reactionUpdates",
":=",
"[",
"]",
"chat1",
".",
"ReactionUpdate",
"{",
"}",
"\n",
"for",
"_",
",",
"msg",
":=",
"range",
"msgs",
"{",
"if",
"msg",
".",
"IsValid",
"(",
")",
"{",
"reactionUpdates",
"=",
"append",
"(",
"reactionUpdates",
",",
"chat1",
".",
"ReactionUpdate",
"{",
"Reactions",
":",
"msg",
".",
"Valid",
"(",
")",
".",
"Reactions",
",",
"TargetMsgID",
":",
"msg",
".",
"GetMessageID",
"(",
")",
",",
"}",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"len",
"(",
"reactionUpdates",
")",
">",
"0",
"{",
"userReacjis",
":=",
"storage",
".",
"NewReacjiStore",
"(",
"s",
".",
"G",
"(",
")",
")",
".",
"UserReacjis",
"(",
"ctx",
",",
"uid",
")",
"\n",
"activity",
":=",
"chat1",
".",
"NewChatActivityWithReactionUpdate",
"(",
"chat1",
".",
"ReactionUpdateNotif",
"{",
"UserReacjis",
":",
"userReacjis",
",",
"ReactionUpdates",
":",
"reactionUpdates",
",",
"ConvID",
":",
"convID",
",",
"}",
")",
"\n",
"s",
".",
"G",
"(",
")",
".",
"ActivityNotifier",
".",
"Activity",
"(",
"ctx",
",",
"uid",
",",
"conv",
".",
"GetTopicType",
"(",
")",
",",
"&",
"activity",
",",
"chat1",
".",
"ChatActivitySource_LOCAL",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // notifyReactionUpdates notifies the GUI after reactions are received | [
"notifyReactionUpdates",
"notifies",
"the",
"GUI",
"after",
"reactions",
"are",
"received"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/convsource.go#L944-L978 |
159,270 | keybase/client | go/chat/convsource.go | notifyEphemeralPurge | func (s *HybridConversationSource) notifyEphemeralPurge(ctx context.Context, uid gregor1.UID, convID chat1.ConversationID, explodedMsgs []chat1.MessageUnboxed) {
s.Debug(ctx, "notifyEphemeralPurge: exploded: %d", len(explodedMsgs))
if len(explodedMsgs) > 0 {
// Blast out an EphemeralPurgeNotifInfo since it's time sensitive for the UI
// to update.
purgedMsgs := []chat1.UIMessage{}
for _, msg := range explodedMsgs {
purgedMsgs = append(purgedMsgs, utils.PresentMessageUnboxed(ctx, s.G(), msg, uid, convID))
}
act := chat1.NewChatActivityWithEphemeralPurge(chat1.EphemeralPurgeNotifInfo{
ConvID: convID,
Msgs: purgedMsgs,
})
s.G().ActivityNotifier.Activity(ctx, uid, chat1.TopicType_CHAT, &act, chat1.ChatActivitySource_LOCAL)
// Send an additional notification to refresh the thread after we bump
// the local inbox version
if err := storage.NewInbox(s.G()).IncrementLocalConvVersion(ctx, uid, convID); err != nil {
s.Debug(ctx, "notifyEphemeralPurge: unablle to IncrementLocalConvVersion, err", err)
}
s.G().ActivityNotifier.ThreadsStale(ctx, uid, []chat1.ConversationStaleUpdate{
chat1.ConversationStaleUpdate{
ConvID: convID,
UpdateType: chat1.StaleUpdateType_CONVUPDATE,
},
})
s.notifyUpdated(ctx, uid, convID, s.storage.GetExplodedReplies(ctx, convID, uid, explodedMsgs))
}
} | go | func (s *HybridConversationSource) notifyEphemeralPurge(ctx context.Context, uid gregor1.UID, convID chat1.ConversationID, explodedMsgs []chat1.MessageUnboxed) {
s.Debug(ctx, "notifyEphemeralPurge: exploded: %d", len(explodedMsgs))
if len(explodedMsgs) > 0 {
// Blast out an EphemeralPurgeNotifInfo since it's time sensitive for the UI
// to update.
purgedMsgs := []chat1.UIMessage{}
for _, msg := range explodedMsgs {
purgedMsgs = append(purgedMsgs, utils.PresentMessageUnboxed(ctx, s.G(), msg, uid, convID))
}
act := chat1.NewChatActivityWithEphemeralPurge(chat1.EphemeralPurgeNotifInfo{
ConvID: convID,
Msgs: purgedMsgs,
})
s.G().ActivityNotifier.Activity(ctx, uid, chat1.TopicType_CHAT, &act, chat1.ChatActivitySource_LOCAL)
// Send an additional notification to refresh the thread after we bump
// the local inbox version
if err := storage.NewInbox(s.G()).IncrementLocalConvVersion(ctx, uid, convID); err != nil {
s.Debug(ctx, "notifyEphemeralPurge: unablle to IncrementLocalConvVersion, err", err)
}
s.G().ActivityNotifier.ThreadsStale(ctx, uid, []chat1.ConversationStaleUpdate{
chat1.ConversationStaleUpdate{
ConvID: convID,
UpdateType: chat1.StaleUpdateType_CONVUPDATE,
},
})
s.notifyUpdated(ctx, uid, convID, s.storage.GetExplodedReplies(ctx, convID, uid, explodedMsgs))
}
} | [
"func",
"(",
"s",
"*",
"HybridConversationSource",
")",
"notifyEphemeralPurge",
"(",
"ctx",
"context",
".",
"Context",
",",
"uid",
"gregor1",
".",
"UID",
",",
"convID",
"chat1",
".",
"ConversationID",
",",
"explodedMsgs",
"[",
"]",
"chat1",
".",
"MessageUnboxed",
")",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
",",
"len",
"(",
"explodedMsgs",
")",
")",
"\n",
"if",
"len",
"(",
"explodedMsgs",
")",
">",
"0",
"{",
"// Blast out an EphemeralPurgeNotifInfo since it's time sensitive for the UI",
"// to update.",
"purgedMsgs",
":=",
"[",
"]",
"chat1",
".",
"UIMessage",
"{",
"}",
"\n",
"for",
"_",
",",
"msg",
":=",
"range",
"explodedMsgs",
"{",
"purgedMsgs",
"=",
"append",
"(",
"purgedMsgs",
",",
"utils",
".",
"PresentMessageUnboxed",
"(",
"ctx",
",",
"s",
".",
"G",
"(",
")",
",",
"msg",
",",
"uid",
",",
"convID",
")",
")",
"\n",
"}",
"\n",
"act",
":=",
"chat1",
".",
"NewChatActivityWithEphemeralPurge",
"(",
"chat1",
".",
"EphemeralPurgeNotifInfo",
"{",
"ConvID",
":",
"convID",
",",
"Msgs",
":",
"purgedMsgs",
",",
"}",
")",
"\n",
"s",
".",
"G",
"(",
")",
".",
"ActivityNotifier",
".",
"Activity",
"(",
"ctx",
",",
"uid",
",",
"chat1",
".",
"TopicType_CHAT",
",",
"&",
"act",
",",
"chat1",
".",
"ChatActivitySource_LOCAL",
")",
"\n\n",
"// Send an additional notification to refresh the thread after we bump",
"// the local inbox version",
"if",
"err",
":=",
"storage",
".",
"NewInbox",
"(",
"s",
".",
"G",
"(",
")",
")",
".",
"IncrementLocalConvVersion",
"(",
"ctx",
",",
"uid",
",",
"convID",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"s",
".",
"G",
"(",
")",
".",
"ActivityNotifier",
".",
"ThreadsStale",
"(",
"ctx",
",",
"uid",
",",
"[",
"]",
"chat1",
".",
"ConversationStaleUpdate",
"{",
"chat1",
".",
"ConversationStaleUpdate",
"{",
"ConvID",
":",
"convID",
",",
"UpdateType",
":",
"chat1",
".",
"StaleUpdateType_CONVUPDATE",
",",
"}",
",",
"}",
")",
"\n",
"s",
".",
"notifyUpdated",
"(",
"ctx",
",",
"uid",
",",
"convID",
",",
"s",
".",
"storage",
".",
"GetExplodedReplies",
"(",
"ctx",
",",
"convID",
",",
"uid",
",",
"explodedMsgs",
")",
")",
"\n",
"}",
"\n",
"}"
] | // notifyEphemeralPurge notifies the GUI after messages are exploded. | [
"notifyEphemeralPurge",
"notifies",
"the",
"GUI",
"after",
"messages",
"are",
"exploded",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/convsource.go#L981-L1009 |
159,271 | keybase/client | go/chat/convsource.go | Expunge | func (s *HybridConversationSource) Expunge(ctx context.Context,
convID chat1.ConversationID, uid gregor1.UID, expunge chat1.Expunge) (err error) {
defer s.Trace(ctx, func() error { return err }, "Expunge")()
s.Debug(ctx, "Expunge: convID: %s uid: %s upto: %v", convID, uid, expunge.Upto)
if expunge.Upto == 0 {
// just get out of here as quickly as possible with a 0 upto
return nil
}
s.lockTab.Acquire(ctx, uid, convID)
defer s.lockTab.Release(ctx, uid, convID)
mergeRes, err := s.storage.Expunge(ctx, convID, uid, expunge)
if err != nil {
return err
}
s.notifyExpunge(ctx, uid, convID, mergeRes)
return nil
} | go | func (s *HybridConversationSource) Expunge(ctx context.Context,
convID chat1.ConversationID, uid gregor1.UID, expunge chat1.Expunge) (err error) {
defer s.Trace(ctx, func() error { return err }, "Expunge")()
s.Debug(ctx, "Expunge: convID: %s uid: %s upto: %v", convID, uid, expunge.Upto)
if expunge.Upto == 0 {
// just get out of here as quickly as possible with a 0 upto
return nil
}
s.lockTab.Acquire(ctx, uid, convID)
defer s.lockTab.Release(ctx, uid, convID)
mergeRes, err := s.storage.Expunge(ctx, convID, uid, expunge)
if err != nil {
return err
}
s.notifyExpunge(ctx, uid, convID, mergeRes)
return nil
} | [
"func",
"(",
"s",
"*",
"HybridConversationSource",
")",
"Expunge",
"(",
"ctx",
"context",
".",
"Context",
",",
"convID",
"chat1",
".",
"ConversationID",
",",
"uid",
"gregor1",
".",
"UID",
",",
"expunge",
"chat1",
".",
"Expunge",
")",
"(",
"err",
"error",
")",
"{",
"defer",
"s",
".",
"Trace",
"(",
"ctx",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
",",
"\"",
"\"",
")",
"(",
")",
"\n",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
",",
"convID",
",",
"uid",
",",
"expunge",
".",
"Upto",
")",
"\n",
"if",
"expunge",
".",
"Upto",
"==",
"0",
"{",
"// just get out of here as quickly as possible with a 0 upto",
"return",
"nil",
"\n",
"}",
"\n\n",
"s",
".",
"lockTab",
".",
"Acquire",
"(",
"ctx",
",",
"uid",
",",
"convID",
")",
"\n",
"defer",
"s",
".",
"lockTab",
".",
"Release",
"(",
"ctx",
",",
"uid",
",",
"convID",
")",
"\n",
"mergeRes",
",",
"err",
":=",
"s",
".",
"storage",
".",
"Expunge",
"(",
"ctx",
",",
"convID",
",",
"uid",
",",
"expunge",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"s",
".",
"notifyExpunge",
"(",
"ctx",
",",
"uid",
",",
"convID",
",",
"mergeRes",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Expunge from storage and maybe notify the gui of staleness | [
"Expunge",
"from",
"storage",
"and",
"maybe",
"notify",
"the",
"gui",
"of",
"staleness"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/convsource.go#L1012-L1030 |
159,272 | keybase/client | go/chat/convsource.go | mergeMaybeNotify | func (s *HybridConversationSource) mergeMaybeNotify(ctx context.Context,
convID chat1.ConversationID, uid gregor1.UID, msgs []chat1.MessageUnboxed) error {
mergeRes, err := s.storage.Merge(ctx, convID, uid, msgs)
if err != nil {
return err
}
s.notifyExpunge(ctx, uid, convID, mergeRes)
s.notifyEphemeralPurge(ctx, uid, convID, mergeRes.Exploded)
s.notifyReactionUpdates(ctx, uid, convID, mergeRes.ReactionTargets)
s.notifyUpdated(ctx, uid, convID, mergeRes.UnfurlTargets)
s.notifyUpdated(ctx, uid, convID, mergeRes.RepliesAffected)
return nil
} | go | func (s *HybridConversationSource) mergeMaybeNotify(ctx context.Context,
convID chat1.ConversationID, uid gregor1.UID, msgs []chat1.MessageUnboxed) error {
mergeRes, err := s.storage.Merge(ctx, convID, uid, msgs)
if err != nil {
return err
}
s.notifyExpunge(ctx, uid, convID, mergeRes)
s.notifyEphemeralPurge(ctx, uid, convID, mergeRes.Exploded)
s.notifyReactionUpdates(ctx, uid, convID, mergeRes.ReactionTargets)
s.notifyUpdated(ctx, uid, convID, mergeRes.UnfurlTargets)
s.notifyUpdated(ctx, uid, convID, mergeRes.RepliesAffected)
return nil
} | [
"func",
"(",
"s",
"*",
"HybridConversationSource",
")",
"mergeMaybeNotify",
"(",
"ctx",
"context",
".",
"Context",
",",
"convID",
"chat1",
".",
"ConversationID",
",",
"uid",
"gregor1",
".",
"UID",
",",
"msgs",
"[",
"]",
"chat1",
".",
"MessageUnboxed",
")",
"error",
"{",
"mergeRes",
",",
"err",
":=",
"s",
".",
"storage",
".",
"Merge",
"(",
"ctx",
",",
"convID",
",",
"uid",
",",
"msgs",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"s",
".",
"notifyExpunge",
"(",
"ctx",
",",
"uid",
",",
"convID",
",",
"mergeRes",
")",
"\n",
"s",
".",
"notifyEphemeralPurge",
"(",
"ctx",
",",
"uid",
",",
"convID",
",",
"mergeRes",
".",
"Exploded",
")",
"\n",
"s",
".",
"notifyReactionUpdates",
"(",
"ctx",
",",
"uid",
",",
"convID",
",",
"mergeRes",
".",
"ReactionTargets",
")",
"\n",
"s",
".",
"notifyUpdated",
"(",
"ctx",
",",
"uid",
",",
"convID",
",",
"mergeRes",
".",
"UnfurlTargets",
")",
"\n",
"s",
".",
"notifyUpdated",
"(",
"ctx",
",",
"uid",
",",
"convID",
",",
"mergeRes",
".",
"RepliesAffected",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Merge with storage and maybe notify the gui of staleness | [
"Merge",
"with",
"storage",
"and",
"maybe",
"notify",
"the",
"gui",
"of",
"staleness"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/convsource.go#L1033-L1046 |
159,273 | keybase/client | go/chat/convsource.go | ClearFromDelete | func (s *HybridConversationSource) ClearFromDelete(ctx context.Context, uid gregor1.UID,
convID chat1.ConversationID, deleteID chat1.MessageID) bool {
defer s.Trace(ctx, func() error { return nil }, "ClearFromDelete")()
// Check to see if we have the message stored
stored, err := s.storage.FetchMessages(ctx, convID, uid, []chat1.MessageID{deleteID})
if err == nil && stored[0] != nil {
// Any error is grounds to load this guy into the conv loader aggressively
s.Debug(ctx, "ClearFromDelete: delete message stored, doing nothing")
return false
}
// Fire off a background load of the thread with a post hook to delete the bodies cache
s.Debug(ctx, "ClearFromDelete: delete not found, clearing")
p := &chat1.Pagination{Num: s.numExpungeReload}
s.G().ConvLoader.Queue(ctx, types.NewConvLoaderJob(convID, nil /*query */, p, types.ConvLoaderPriorityHighest,
func(ctx context.Context, tv chat1.ThreadView, job types.ConvLoaderJob) {
if len(tv.Messages) == 0 {
return
}
bound := tv.Messages[0].GetMessageID().Min(tv.Messages[len(tv.Messages)-1].GetMessageID())
if err := s.storage.ClearBefore(ctx, convID, uid, bound); err != nil {
s.Debug(ctx, "ClearFromDelete: failed to clear messages: %s", err)
}
}))
return true
} | go | func (s *HybridConversationSource) ClearFromDelete(ctx context.Context, uid gregor1.UID,
convID chat1.ConversationID, deleteID chat1.MessageID) bool {
defer s.Trace(ctx, func() error { return nil }, "ClearFromDelete")()
// Check to see if we have the message stored
stored, err := s.storage.FetchMessages(ctx, convID, uid, []chat1.MessageID{deleteID})
if err == nil && stored[0] != nil {
// Any error is grounds to load this guy into the conv loader aggressively
s.Debug(ctx, "ClearFromDelete: delete message stored, doing nothing")
return false
}
// Fire off a background load of the thread with a post hook to delete the bodies cache
s.Debug(ctx, "ClearFromDelete: delete not found, clearing")
p := &chat1.Pagination{Num: s.numExpungeReload}
s.G().ConvLoader.Queue(ctx, types.NewConvLoaderJob(convID, nil /*query */, p, types.ConvLoaderPriorityHighest,
func(ctx context.Context, tv chat1.ThreadView, job types.ConvLoaderJob) {
if len(tv.Messages) == 0 {
return
}
bound := tv.Messages[0].GetMessageID().Min(tv.Messages[len(tv.Messages)-1].GetMessageID())
if err := s.storage.ClearBefore(ctx, convID, uid, bound); err != nil {
s.Debug(ctx, "ClearFromDelete: failed to clear messages: %s", err)
}
}))
return true
} | [
"func",
"(",
"s",
"*",
"HybridConversationSource",
")",
"ClearFromDelete",
"(",
"ctx",
"context",
".",
"Context",
",",
"uid",
"gregor1",
".",
"UID",
",",
"convID",
"chat1",
".",
"ConversationID",
",",
"deleteID",
"chat1",
".",
"MessageID",
")",
"bool",
"{",
"defer",
"s",
".",
"Trace",
"(",
"ctx",
",",
"func",
"(",
")",
"error",
"{",
"return",
"nil",
"}",
",",
"\"",
"\"",
")",
"(",
")",
"\n\n",
"// Check to see if we have the message stored",
"stored",
",",
"err",
":=",
"s",
".",
"storage",
".",
"FetchMessages",
"(",
"ctx",
",",
"convID",
",",
"uid",
",",
"[",
"]",
"chat1",
".",
"MessageID",
"{",
"deleteID",
"}",
")",
"\n",
"if",
"err",
"==",
"nil",
"&&",
"stored",
"[",
"0",
"]",
"!=",
"nil",
"{",
"// Any error is grounds to load this guy into the conv loader aggressively",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"return",
"false",
"\n",
"}",
"\n\n",
"// Fire off a background load of the thread with a post hook to delete the bodies cache",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"p",
":=",
"&",
"chat1",
".",
"Pagination",
"{",
"Num",
":",
"s",
".",
"numExpungeReload",
"}",
"\n",
"s",
".",
"G",
"(",
")",
".",
"ConvLoader",
".",
"Queue",
"(",
"ctx",
",",
"types",
".",
"NewConvLoaderJob",
"(",
"convID",
",",
"nil",
"/*query */",
",",
"p",
",",
"types",
".",
"ConvLoaderPriorityHighest",
",",
"func",
"(",
"ctx",
"context",
".",
"Context",
",",
"tv",
"chat1",
".",
"ThreadView",
",",
"job",
"types",
".",
"ConvLoaderJob",
")",
"{",
"if",
"len",
"(",
"tv",
".",
"Messages",
")",
"==",
"0",
"{",
"return",
"\n",
"}",
"\n",
"bound",
":=",
"tv",
".",
"Messages",
"[",
"0",
"]",
".",
"GetMessageID",
"(",
")",
".",
"Min",
"(",
"tv",
".",
"Messages",
"[",
"len",
"(",
"tv",
".",
"Messages",
")",
"-",
"1",
"]",
".",
"GetMessageID",
"(",
")",
")",
"\n",
"if",
"err",
":=",
"s",
".",
"storage",
".",
"ClearBefore",
"(",
"ctx",
",",
"convID",
",",
"uid",
",",
"bound",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
")",
")",
"\n",
"return",
"true",
"\n",
"}"
] | // ClearFromDelete clears the current cache if there is a delete that we don't know about
// and returns true to the caller if it schedules a background loader job | [
"ClearFromDelete",
"clears",
"the",
"current",
"cache",
"if",
"there",
"is",
"a",
"delete",
"that",
"we",
"don",
"t",
"know",
"about",
"and",
"returns",
"true",
"to",
"the",
"caller",
"if",
"it",
"schedules",
"a",
"background",
"loader",
"job"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/convsource.go#L1063-L1089 |
159,274 | keybase/client | go/kbfs/data/prev_revisions.go | AddRevision | func (pr PrevRevisions) AddRevision(
r, minRev kbfsmd.Revision) (ret PrevRevisions) {
newLength := len(pr)
if newLength < len(minPrevRevisionSlotCounts) {
newLength++
}
ret = make(PrevRevisions, newLength)
copy(ret, pr)
earliestGoodSlot := 0
numDropped := 0
// First we eliminate any revisions in the current list that don't
// make sense anymore, either because they're greater or equal to
// `r`, or they're smaller or equal to `minRev` (and thus have
// been GC'd). For example:
//
// pr = [27, 25, 15, 10, 5] (revision numbers only)
// r = 27
// minRev = 11
//
// After this next block, we should have:
//
// ret = [0, 25, 15, 0, 0]
// earliestGoodSlot = 1
// numDropped = 2
//
// Then the next block of code will trim it appropriately.
for i, prc := range ret {
if prc.Count == 255 {
// This count on this revision is too large, so remove it
// before it overflows. This may happen when revisions
// are repeatedly overwritten when on an unmerged branch,
// as in the case below.
ret[i] = PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
numDropped++
continue
} else if prc.Revision >= r {
if numDropped > 0 {
panic("Revision too large after dropping one")
}
// The revision number is bigger than expected (e.g. it
// was made on an unmerged branch).
ret[i] = PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
earliestGoodSlot = i + 1
continue
} else if prc.Revision <= minRev {
// This revision is too old (or is empty), so remove it.
ret[i] = PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
numDropped++
continue
} else if numDropped > 0 {
panic("Once we've dropped one, we should drop all the rest")
}
// `minRev` < `prc.Revision` < `r`, so we keep it in the new
// slice and increment its count.
ret[i].Count++
}
// Cut out the revisions that are newer than `r` (e.g., because
// they are from an unmerged branch).
//
// Continuing the example above, this code will leave us with:
//
// ret = [25, 15, 0, 0]
if earliestGoodSlot > 0 {
if earliestGoodSlot == len(ret) {
// Always leave at least one empty slot.
earliestGoodSlot--
}
ret = ret[earliestGoodSlot:]
}
// Drop revisions off the end that are too old, but leave an empty
// slot available at the end for shifting everything over and
// putting `r` in slot 0.
//
// Continuing the example above, this code will leave us with:
//
// ret = [25, 15, 0]
if numDropped == len(ret) {
// Leave the first slot available for overwriting.
ret = ret[:1]
} else if numDropped > 1 {
ret = ret[:len(ret)-(numDropped-1)]
}
// Starting at the end, shift revisions to the right if either a)
// that slot is already empty or b) they satisfy the count of the
// slot to the right. If a revision is not going to shifted, but
// it is too close (in terms of count) to the revision on its
// right, just drop it and let the other revisions slide over --
// this makes sure we have a nicely-spaced set of revision numbers
// even when the total number of revisions for the entry is small.
//
// Continuing the example above, this code will leave us with:
//
// ret = [0, 25, 15]
for i := len(ret) - 1; i >= 1; i-- {
// Check if we can shift over the entry in slot i-1.
minCount := minPrevRevisionSlotCounts[i]
if ret[i].Count == 0 || ret[i-1].Count >= minCount {
ret[i], ret[i-1] = ret[i-1], PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
} else if ret[i].Count-ret[i-1].Count < minCount/5 {
// This revision is not being shifted, but it's
// uncomfortablely close to its neighbor on the right, so
// just drop it.
ret[i-1] = PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
}
}
// Finally, overwrite whatever's left in the first slot with `r`
// and a count of 1.
//
// Continuing the example above, this code will leave us with:
//
// ret = [27, 25, 15]
ret[0] = PrevRevisionAndCount{
Revision: r,
Count: 1,
}
return ret
} | go | func (pr PrevRevisions) AddRevision(
r, minRev kbfsmd.Revision) (ret PrevRevisions) {
newLength := len(pr)
if newLength < len(minPrevRevisionSlotCounts) {
newLength++
}
ret = make(PrevRevisions, newLength)
copy(ret, pr)
earliestGoodSlot := 0
numDropped := 0
// First we eliminate any revisions in the current list that don't
// make sense anymore, either because they're greater or equal to
// `r`, or they're smaller or equal to `minRev` (and thus have
// been GC'd). For example:
//
// pr = [27, 25, 15, 10, 5] (revision numbers only)
// r = 27
// minRev = 11
//
// After this next block, we should have:
//
// ret = [0, 25, 15, 0, 0]
// earliestGoodSlot = 1
// numDropped = 2
//
// Then the next block of code will trim it appropriately.
for i, prc := range ret {
if prc.Count == 255 {
// This count on this revision is too large, so remove it
// before it overflows. This may happen when revisions
// are repeatedly overwritten when on an unmerged branch,
// as in the case below.
ret[i] = PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
numDropped++
continue
} else if prc.Revision >= r {
if numDropped > 0 {
panic("Revision too large after dropping one")
}
// The revision number is bigger than expected (e.g. it
// was made on an unmerged branch).
ret[i] = PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
earliestGoodSlot = i + 1
continue
} else if prc.Revision <= minRev {
// This revision is too old (or is empty), so remove it.
ret[i] = PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
numDropped++
continue
} else if numDropped > 0 {
panic("Once we've dropped one, we should drop all the rest")
}
// `minRev` < `prc.Revision` < `r`, so we keep it in the new
// slice and increment its count.
ret[i].Count++
}
// Cut out the revisions that are newer than `r` (e.g., because
// they are from an unmerged branch).
//
// Continuing the example above, this code will leave us with:
//
// ret = [25, 15, 0, 0]
if earliestGoodSlot > 0 {
if earliestGoodSlot == len(ret) {
// Always leave at least one empty slot.
earliestGoodSlot--
}
ret = ret[earliestGoodSlot:]
}
// Drop revisions off the end that are too old, but leave an empty
// slot available at the end for shifting everything over and
// putting `r` in slot 0.
//
// Continuing the example above, this code will leave us with:
//
// ret = [25, 15, 0]
if numDropped == len(ret) {
// Leave the first slot available for overwriting.
ret = ret[:1]
} else if numDropped > 1 {
ret = ret[:len(ret)-(numDropped-1)]
}
// Starting at the end, shift revisions to the right if either a)
// that slot is already empty or b) they satisfy the count of the
// slot to the right. If a revision is not going to shifted, but
// it is too close (in terms of count) to the revision on its
// right, just drop it and let the other revisions slide over --
// this makes sure we have a nicely-spaced set of revision numbers
// even when the total number of revisions for the entry is small.
//
// Continuing the example above, this code will leave us with:
//
// ret = [0, 25, 15]
for i := len(ret) - 1; i >= 1; i-- {
// Check if we can shift over the entry in slot i-1.
minCount := minPrevRevisionSlotCounts[i]
if ret[i].Count == 0 || ret[i-1].Count >= minCount {
ret[i], ret[i-1] = ret[i-1], PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
} else if ret[i].Count-ret[i-1].Count < minCount/5 {
// This revision is not being shifted, but it's
// uncomfortablely close to its neighbor on the right, so
// just drop it.
ret[i-1] = PrevRevisionAndCount{
Revision: kbfsmd.RevisionUninitialized,
Count: 0,
}
}
}
// Finally, overwrite whatever's left in the first slot with `r`
// and a count of 1.
//
// Continuing the example above, this code will leave us with:
//
// ret = [27, 25, 15]
ret[0] = PrevRevisionAndCount{
Revision: r,
Count: 1,
}
return ret
} | [
"func",
"(",
"pr",
"PrevRevisions",
")",
"AddRevision",
"(",
"r",
",",
"minRev",
"kbfsmd",
".",
"Revision",
")",
"(",
"ret",
"PrevRevisions",
")",
"{",
"newLength",
":=",
"len",
"(",
"pr",
")",
"\n",
"if",
"newLength",
"<",
"len",
"(",
"minPrevRevisionSlotCounts",
")",
"{",
"newLength",
"++",
"\n",
"}",
"\n",
"ret",
"=",
"make",
"(",
"PrevRevisions",
",",
"newLength",
")",
"\n",
"copy",
"(",
"ret",
",",
"pr",
")",
"\n",
"earliestGoodSlot",
":=",
"0",
"\n",
"numDropped",
":=",
"0",
"\n\n",
"// First we eliminate any revisions in the current list that don't",
"// make sense anymore, either because they're greater or equal to",
"// `r`, or they're smaller or equal to `minRev` (and thus have",
"// been GC'd). For example:",
"//",
"// pr = [27, 25, 15, 10, 5] (revision numbers only)",
"// r = 27",
"// minRev = 11",
"//",
"// After this next block, we should have:",
"//",
"// ret = [0, 25, 15, 0, 0]",
"// earliestGoodSlot = 1",
"// numDropped = 2",
"//",
"// Then the next block of code will trim it appropriately.",
"for",
"i",
",",
"prc",
":=",
"range",
"ret",
"{",
"if",
"prc",
".",
"Count",
"==",
"255",
"{",
"// This count on this revision is too large, so remove it",
"// before it overflows. This may happen when revisions",
"// are repeatedly overwritten when on an unmerged branch,",
"// as in the case below.",
"ret",
"[",
"i",
"]",
"=",
"PrevRevisionAndCount",
"{",
"Revision",
":",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"Count",
":",
"0",
",",
"}",
"\n",
"numDropped",
"++",
"\n",
"continue",
"\n",
"}",
"else",
"if",
"prc",
".",
"Revision",
">=",
"r",
"{",
"if",
"numDropped",
">",
"0",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"// The revision number is bigger than expected (e.g. it",
"// was made on an unmerged branch).",
"ret",
"[",
"i",
"]",
"=",
"PrevRevisionAndCount",
"{",
"Revision",
":",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"Count",
":",
"0",
",",
"}",
"\n",
"earliestGoodSlot",
"=",
"i",
"+",
"1",
"\n",
"continue",
"\n",
"}",
"else",
"if",
"prc",
".",
"Revision",
"<=",
"minRev",
"{",
"// This revision is too old (or is empty), so remove it.",
"ret",
"[",
"i",
"]",
"=",
"PrevRevisionAndCount",
"{",
"Revision",
":",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"Count",
":",
"0",
",",
"}",
"\n",
"numDropped",
"++",
"\n",
"continue",
"\n",
"}",
"else",
"if",
"numDropped",
">",
"0",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"// `minRev` < `prc.Revision` < `r`, so we keep it in the new",
"// slice and increment its count.",
"ret",
"[",
"i",
"]",
".",
"Count",
"++",
"\n",
"}",
"\n\n",
"// Cut out the revisions that are newer than `r` (e.g., because",
"// they are from an unmerged branch).",
"//",
"// Continuing the example above, this code will leave us with:",
"//",
"// ret = [25, 15, 0, 0]",
"if",
"earliestGoodSlot",
">",
"0",
"{",
"if",
"earliestGoodSlot",
"==",
"len",
"(",
"ret",
")",
"{",
"// Always leave at least one empty slot.",
"earliestGoodSlot",
"--",
"\n",
"}",
"\n",
"ret",
"=",
"ret",
"[",
"earliestGoodSlot",
":",
"]",
"\n",
"}",
"\n\n",
"// Drop revisions off the end that are too old, but leave an empty",
"// slot available at the end for shifting everything over and",
"// putting `r` in slot 0.",
"//",
"// Continuing the example above, this code will leave us with:",
"//",
"// ret = [25, 15, 0]",
"if",
"numDropped",
"==",
"len",
"(",
"ret",
")",
"{",
"// Leave the first slot available for overwriting.",
"ret",
"=",
"ret",
"[",
":",
"1",
"]",
"\n",
"}",
"else",
"if",
"numDropped",
">",
"1",
"{",
"ret",
"=",
"ret",
"[",
":",
"len",
"(",
"ret",
")",
"-",
"(",
"numDropped",
"-",
"1",
")",
"]",
"\n",
"}",
"\n\n",
"// Starting at the end, shift revisions to the right if either a)",
"// that slot is already empty or b) they satisfy the count of the",
"// slot to the right. If a revision is not going to shifted, but",
"// it is too close (in terms of count) to the revision on its",
"// right, just drop it and let the other revisions slide over --",
"// this makes sure we have a nicely-spaced set of revision numbers",
"// even when the total number of revisions for the entry is small.",
"//",
"// Continuing the example above, this code will leave us with:",
"//",
"// ret = [0, 25, 15]",
"for",
"i",
":=",
"len",
"(",
"ret",
")",
"-",
"1",
";",
"i",
">=",
"1",
";",
"i",
"--",
"{",
"// Check if we can shift over the entry in slot i-1.",
"minCount",
":=",
"minPrevRevisionSlotCounts",
"[",
"i",
"]",
"\n",
"if",
"ret",
"[",
"i",
"]",
".",
"Count",
"==",
"0",
"||",
"ret",
"[",
"i",
"-",
"1",
"]",
".",
"Count",
">=",
"minCount",
"{",
"ret",
"[",
"i",
"]",
",",
"ret",
"[",
"i",
"-",
"1",
"]",
"=",
"ret",
"[",
"i",
"-",
"1",
"]",
",",
"PrevRevisionAndCount",
"{",
"Revision",
":",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"Count",
":",
"0",
",",
"}",
"\n",
"}",
"else",
"if",
"ret",
"[",
"i",
"]",
".",
"Count",
"-",
"ret",
"[",
"i",
"-",
"1",
"]",
".",
"Count",
"<",
"minCount",
"/",
"5",
"{",
"// This revision is not being shifted, but it's",
"// uncomfortablely close to its neighbor on the right, so",
"// just drop it.",
"ret",
"[",
"i",
"-",
"1",
"]",
"=",
"PrevRevisionAndCount",
"{",
"Revision",
":",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"Count",
":",
"0",
",",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Finally, overwrite whatever's left in the first slot with `r`",
"// and a count of 1.",
"//",
"// Continuing the example above, this code will leave us with:",
"//",
"// ret = [27, 25, 15]",
"ret",
"[",
"0",
"]",
"=",
"PrevRevisionAndCount",
"{",
"Revision",
":",
"r",
",",
"Count",
":",
"1",
",",
"}",
"\n",
"return",
"ret",
"\n",
"}"
] | // AddRevision returns a copy of `pr` with a new immediately-previous
// revision added, with the existing entries moved or overwritten to
// accomodate the new entry, and with increased counts. Any existing
// revisions smaller than or equal to minRev will be removed. | [
"AddRevision",
"returns",
"a",
"copy",
"of",
"pr",
"with",
"a",
"new",
"immediately",
"-",
"previous",
"revision",
"added",
"with",
"the",
"existing",
"entries",
"moved",
"or",
"overwritten",
"to",
"accomodate",
"the",
"new",
"entry",
"and",
"with",
"increased",
"counts",
".",
"Any",
"existing",
"revisions",
"smaller",
"than",
"or",
"equal",
"to",
"minRev",
"will",
"be",
"removed",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/prev_revisions.go#L36-L172 |
159,275 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | GetState | func (fbo *folderBlockOps) GetState(
lState *kbfssync.LockState) overallBlockState {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
if len(fbo.dirtyFiles) == 0 && len(fbo.dirtyDirs) == 0 &&
fbo.dirtyRootDirEntry == nil {
return cleanState
}
return dirtyState
} | go | func (fbo *folderBlockOps) GetState(
lState *kbfssync.LockState) overallBlockState {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
if len(fbo.dirtyFiles) == 0 && len(fbo.dirtyDirs) == 0 &&
fbo.dirtyRootDirEntry == nil {
return cleanState
}
return dirtyState
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"GetState",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
")",
"overallBlockState",
"{",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n",
"if",
"len",
"(",
"fbo",
".",
"dirtyFiles",
")",
"==",
"0",
"&&",
"len",
"(",
"fbo",
".",
"dirtyDirs",
")",
"==",
"0",
"&&",
"fbo",
".",
"dirtyRootDirEntry",
"==",
"nil",
"{",
"return",
"cleanState",
"\n",
"}",
"\n",
"return",
"dirtyState",
"\n",
"}"
] | // GetState returns the overall block state of this TLF. | [
"GetState",
"returns",
"the",
"overall",
"block",
"state",
"of",
"this",
"TLF",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L254-L263 |
159,276 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | getCleanEncodedBlockSizeLocked | func (fbo *folderBlockOps) getCleanEncodedBlockSizeLocked(ctx context.Context,
lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
branch data.BranchName, rtype data.BlockReqType, assumeCacheIsLive bool) (
size uint32, status keybase1.BlockStatus, err error) {
if rtype != data.BlockReadParallel {
if rtype == data.BlockWrite {
panic("Cannot get the size of a block for writing")
}
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " +
"with blockReadParallel")
}
if !ptr.IsValid() {
return 0, 0, InvalidBlockRefError{ptr.Ref()}
}
if assumeCacheIsLive {
// If we're assuming all blocks in the cache are live, we just
// need to get the block size, which we can do from either one
// of the caches.
if block, err := fbo.config.BlockCache().Get(ptr); err == nil {
return block.GetEncodedSize(), keybase1.BlockStatus_LIVE, nil
}
if diskBCache := fbo.config.DiskBlockCache(); diskBCache != nil {
cacheType := DiskBlockAnyCache
if fbo.config.IsSyncedTlf(fbo.id()) {
cacheType = DiskBlockSyncCache
}
if buf, _, _, err := diskBCache.Get(
ctx, fbo.id(), ptr.ID, cacheType); err == nil {
return uint32(len(buf)), keybase1.BlockStatus_LIVE, nil
}
}
}
if err := checkDataVersion(fbo.config, data.Path{}, ptr); err != nil {
return 0, 0, err
}
defer func() {
fbo.vlog.CLogf(
ctx, libkb.VLog1, "GetEncodedSize ptr=%v size=%d status=%s: %+v",
ptr, size, status, err)
// In certain testing situations, a block might be represented
// with a 0 size in our journal or be missing from our local
// data stores, and we need to reconstruct the size using the
// cache in order to make the accounting work out for the test.
_, isBlockNotFound :=
errors.Cause(err).(kbfsblock.ServerErrorBlockNonExistent)
if isBlockNotFound || size == 0 {
if block, cerr := fbo.config.BlockCache().Get(ptr); cerr == nil {
fbo.vlog.CLogf(
ctx, libkb.VLog1,
"Fixing encoded size of %v with cached copy", ptr)
size = block.GetEncodedSize()
err = nil
}
}
}()
// Unlock the blockLock while we wait for the network, only if
// it's locked for reading by a single goroutine. If it's locked
// for writing, that indicates we are performing an atomic write
// operation, and we need to ensure that nothing else comes in and
// modifies the blocks, so don't unlock.
//
// If there may be multiple goroutines fetching blocks under the
// same lState, we can't safely unlock since some of the other
// goroutines may be operating on the data assuming they have the
// lock.
bops := fbo.config.BlockOps()
if rtype != data.BlockReadParallel && rtype != data.BlockLookup {
fbo.blockLock.DoRUnlockedIfPossible(lState, func(*kbfssync.LockState) {
size, status, err = bops.GetEncodedSize(ctx, kmd, ptr)
})
} else {
size, status, err = bops.GetEncodedSize(ctx, kmd, ptr)
}
if err != nil {
return 0, 0, err
}
return size, status, nil
} | go | func (fbo *folderBlockOps) getCleanEncodedBlockSizeLocked(ctx context.Context,
lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
branch data.BranchName, rtype data.BlockReqType, assumeCacheIsLive bool) (
size uint32, status keybase1.BlockStatus, err error) {
if rtype != data.BlockReadParallel {
if rtype == data.BlockWrite {
panic("Cannot get the size of a block for writing")
}
fbo.blockLock.AssertAnyLocked(lState)
} else if lState != nil {
panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " +
"with blockReadParallel")
}
if !ptr.IsValid() {
return 0, 0, InvalidBlockRefError{ptr.Ref()}
}
if assumeCacheIsLive {
// If we're assuming all blocks in the cache are live, we just
// need to get the block size, which we can do from either one
// of the caches.
if block, err := fbo.config.BlockCache().Get(ptr); err == nil {
return block.GetEncodedSize(), keybase1.BlockStatus_LIVE, nil
}
if diskBCache := fbo.config.DiskBlockCache(); diskBCache != nil {
cacheType := DiskBlockAnyCache
if fbo.config.IsSyncedTlf(fbo.id()) {
cacheType = DiskBlockSyncCache
}
if buf, _, _, err := diskBCache.Get(
ctx, fbo.id(), ptr.ID, cacheType); err == nil {
return uint32(len(buf)), keybase1.BlockStatus_LIVE, nil
}
}
}
if err := checkDataVersion(fbo.config, data.Path{}, ptr); err != nil {
return 0, 0, err
}
defer func() {
fbo.vlog.CLogf(
ctx, libkb.VLog1, "GetEncodedSize ptr=%v size=%d status=%s: %+v",
ptr, size, status, err)
// In certain testing situations, a block might be represented
// with a 0 size in our journal or be missing from our local
// data stores, and we need to reconstruct the size using the
// cache in order to make the accounting work out for the test.
_, isBlockNotFound :=
errors.Cause(err).(kbfsblock.ServerErrorBlockNonExistent)
if isBlockNotFound || size == 0 {
if block, cerr := fbo.config.BlockCache().Get(ptr); cerr == nil {
fbo.vlog.CLogf(
ctx, libkb.VLog1,
"Fixing encoded size of %v with cached copy", ptr)
size = block.GetEncodedSize()
err = nil
}
}
}()
// Unlock the blockLock while we wait for the network, only if
// it's locked for reading by a single goroutine. If it's locked
// for writing, that indicates we are performing an atomic write
// operation, and we need to ensure that nothing else comes in and
// modifies the blocks, so don't unlock.
//
// If there may be multiple goroutines fetching blocks under the
// same lState, we can't safely unlock since some of the other
// goroutines may be operating on the data assuming they have the
// lock.
bops := fbo.config.BlockOps()
if rtype != data.BlockReadParallel && rtype != data.BlockLookup {
fbo.blockLock.DoRUnlockedIfPossible(lState, func(*kbfssync.LockState) {
size, status, err = bops.GetEncodedSize(ctx, kmd, ptr)
})
} else {
size, status, err = bops.GetEncodedSize(ctx, kmd, ptr)
}
if err != nil {
return 0, 0, err
}
return size, status, nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"getCleanEncodedBlockSizeLocked",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"ptr",
"data",
".",
"BlockPointer",
",",
"branch",
"data",
".",
"BranchName",
",",
"rtype",
"data",
".",
"BlockReqType",
",",
"assumeCacheIsLive",
"bool",
")",
"(",
"size",
"uint32",
",",
"status",
"keybase1",
".",
"BlockStatus",
",",
"err",
"error",
")",
"{",
"if",
"rtype",
"!=",
"data",
".",
"BlockReadParallel",
"{",
"if",
"rtype",
"==",
"data",
".",
"BlockWrite",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"fbo",
".",
"blockLock",
".",
"AssertAnyLocked",
"(",
"lState",
")",
"\n",
"}",
"else",
"if",
"lState",
"!=",
"nil",
"{",
"panic",
"(",
"\"",
"\"",
"+",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"if",
"!",
"ptr",
".",
"IsValid",
"(",
")",
"{",
"return",
"0",
",",
"0",
",",
"InvalidBlockRefError",
"{",
"ptr",
".",
"Ref",
"(",
")",
"}",
"\n",
"}",
"\n\n",
"if",
"assumeCacheIsLive",
"{",
"// If we're assuming all blocks in the cache are live, we just",
"// need to get the block size, which we can do from either one",
"// of the caches.",
"if",
"block",
",",
"err",
":=",
"fbo",
".",
"config",
".",
"BlockCache",
"(",
")",
".",
"Get",
"(",
"ptr",
")",
";",
"err",
"==",
"nil",
"{",
"return",
"block",
".",
"GetEncodedSize",
"(",
")",
",",
"keybase1",
".",
"BlockStatus_LIVE",
",",
"nil",
"\n",
"}",
"\n",
"if",
"diskBCache",
":=",
"fbo",
".",
"config",
".",
"DiskBlockCache",
"(",
")",
";",
"diskBCache",
"!=",
"nil",
"{",
"cacheType",
":=",
"DiskBlockAnyCache",
"\n",
"if",
"fbo",
".",
"config",
".",
"IsSyncedTlf",
"(",
"fbo",
".",
"id",
"(",
")",
")",
"{",
"cacheType",
"=",
"DiskBlockSyncCache",
"\n",
"}",
"\n",
"if",
"buf",
",",
"_",
",",
"_",
",",
"err",
":=",
"diskBCache",
".",
"Get",
"(",
"ctx",
",",
"fbo",
".",
"id",
"(",
")",
",",
"ptr",
".",
"ID",
",",
"cacheType",
")",
";",
"err",
"==",
"nil",
"{",
"return",
"uint32",
"(",
"len",
"(",
"buf",
")",
")",
",",
"keybase1",
".",
"BlockStatus_LIVE",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"checkDataVersion",
"(",
"fbo",
".",
"config",
",",
"data",
".",
"Path",
"{",
"}",
",",
"ptr",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"defer",
"func",
"(",
")",
"{",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"ptr",
",",
"size",
",",
"status",
",",
"err",
")",
"\n",
"// In certain testing situations, a block might be represented",
"// with a 0 size in our journal or be missing from our local",
"// data stores, and we need to reconstruct the size using the",
"// cache in order to make the accounting work out for the test.",
"_",
",",
"isBlockNotFound",
":=",
"errors",
".",
"Cause",
"(",
"err",
")",
".",
"(",
"kbfsblock",
".",
"ServerErrorBlockNonExistent",
")",
"\n",
"if",
"isBlockNotFound",
"||",
"size",
"==",
"0",
"{",
"if",
"block",
",",
"cerr",
":=",
"fbo",
".",
"config",
".",
"BlockCache",
"(",
")",
".",
"Get",
"(",
"ptr",
")",
";",
"cerr",
"==",
"nil",
"{",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"ptr",
")",
"\n",
"size",
"=",
"block",
".",
"GetEncodedSize",
"(",
")",
"\n",
"err",
"=",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"// Unlock the blockLock while we wait for the network, only if",
"// it's locked for reading by a single goroutine. If it's locked",
"// for writing, that indicates we are performing an atomic write",
"// operation, and we need to ensure that nothing else comes in and",
"// modifies the blocks, so don't unlock.",
"//",
"// If there may be multiple goroutines fetching blocks under the",
"// same lState, we can't safely unlock since some of the other",
"// goroutines may be operating on the data assuming they have the",
"// lock.",
"bops",
":=",
"fbo",
".",
"config",
".",
"BlockOps",
"(",
")",
"\n",
"if",
"rtype",
"!=",
"data",
".",
"BlockReadParallel",
"&&",
"rtype",
"!=",
"data",
".",
"BlockLookup",
"{",
"fbo",
".",
"blockLock",
".",
"DoRUnlockedIfPossible",
"(",
"lState",
",",
"func",
"(",
"*",
"kbfssync",
".",
"LockState",
")",
"{",
"size",
",",
"status",
",",
"err",
"=",
"bops",
".",
"GetEncodedSize",
"(",
"ctx",
",",
"kmd",
",",
"ptr",
")",
"\n",
"}",
")",
"\n",
"}",
"else",
"{",
"size",
",",
"status",
",",
"err",
"=",
"bops",
".",
"GetEncodedSize",
"(",
"ctx",
",",
"kmd",
",",
"ptr",
")",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"size",
",",
"status",
",",
"nil",
"\n",
"}"
] | // getCleanEncodedBlockHelperLocked retrieves the encoded size of the
// clean block pointed to by ptr, which must be valid, either from the
// cache or from the server. If `rtype` is `blockReadParallel`, it's
// assumed that some coordinating goroutine is holding the correct
// locks, and in that case `lState` must be `nil`. | [
"getCleanEncodedBlockHelperLocked",
"retrieves",
"the",
"encoded",
"size",
"of",
"the",
"clean",
"block",
"pointed",
"to",
"by",
"ptr",
"which",
"must",
"be",
"valid",
"either",
"from",
"the",
"cache",
"or",
"from",
"the",
"server",
".",
"If",
"rtype",
"is",
"blockReadParallel",
"it",
"s",
"assumed",
"that",
"some",
"coordinating",
"goroutine",
"is",
"holding",
"the",
"correct",
"locks",
"and",
"in",
"that",
"case",
"lState",
"must",
"be",
"nil",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L270-L355 |
159,277 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | ClearChargedTo | func (fbo *folderBlockOps) ClearChargedTo(lState *kbfssync.LockState) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.chargedTo = keybase1.UserOrTeamID("")
} | go | func (fbo *folderBlockOps) ClearChargedTo(lState *kbfssync.LockState) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.chargedTo = keybase1.UserOrTeamID("")
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"ClearChargedTo",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
")",
"{",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n",
"fbo",
".",
"chargedTo",
"=",
"keybase1",
".",
"UserOrTeamID",
"(",
"\"",
"\"",
")",
"\n",
"}"
] | // ClearChargedTo clears out the cached chargedTo UID for this FBO. | [
"ClearChargedTo",
"clears",
"out",
"the",
"cached",
"chargedTo",
"UID",
"for",
"this",
"FBO",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L797-L801 |
159,278 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | deepCopyFileLocked | func (fbo *folderBlockOps) deepCopyFileLocked(
ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path,
dirtyBcache data.DirtyBlockCacheSimple, dataVer data.Ver) (
newTopPtr data.BlockPointer, allChildPtrs []data.BlockPointer, err error) {
// Deep copying doesn't alter any data in use, it only makes copy,
// so only a read lock is needed.
fbo.blockLock.AssertRLocked(lState)
chargedTo, err := chargedToForTLF(
ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), fbo.config,
kmd.GetTlfHandle())
if err != nil {
return data.BlockPointer{}, nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, chargedTo, kmd, dirtyBcache)
return fd.DeepCopy(ctx, dataVer)
} | go | func (fbo *folderBlockOps) deepCopyFileLocked(
ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path,
dirtyBcache data.DirtyBlockCacheSimple, dataVer data.Ver) (
newTopPtr data.BlockPointer, allChildPtrs []data.BlockPointer, err error) {
// Deep copying doesn't alter any data in use, it only makes copy,
// so only a read lock is needed.
fbo.blockLock.AssertRLocked(lState)
chargedTo, err := chargedToForTLF(
ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), fbo.config,
kmd.GetTlfHandle())
if err != nil {
return data.BlockPointer{}, nil, err
}
fd := fbo.newFileDataWithCache(
lState, file, chargedTo, kmd, dirtyBcache)
return fd.DeepCopy(ctx, dataVer)
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"deepCopyFileLocked",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"file",
"data",
".",
"Path",
",",
"dirtyBcache",
"data",
".",
"DirtyBlockCacheSimple",
",",
"dataVer",
"data",
".",
"Ver",
")",
"(",
"newTopPtr",
"data",
".",
"BlockPointer",
",",
"allChildPtrs",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"err",
"error",
")",
"{",
"// Deep copying doesn't alter any data in use, it only makes copy,",
"// so only a read lock is needed.",
"fbo",
".",
"blockLock",
".",
"AssertRLocked",
"(",
"lState",
")",
"\n",
"chargedTo",
",",
"err",
":=",
"chargedToForTLF",
"(",
"ctx",
",",
"fbo",
".",
"config",
".",
"KBPKI",
"(",
")",
",",
"fbo",
".",
"config",
".",
"KBPKI",
"(",
")",
",",
"fbo",
".",
"config",
",",
"kmd",
".",
"GetTlfHandle",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"data",
".",
"BlockPointer",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n",
"fd",
":=",
"fbo",
".",
"newFileDataWithCache",
"(",
"lState",
",",
"file",
",",
"chargedTo",
",",
"kmd",
",",
"dirtyBcache",
")",
"\n",
"return",
"fd",
".",
"DeepCopy",
"(",
"ctx",
",",
"dataVer",
")",
"\n",
"}"
] | // DeepCopyFile makes a complete copy of the given file, deduping leaf
// blocks and making new random BlockPointers for all indirect blocks.
// It returns the new top pointer of the copy, and all the new child
// pointers in the copy. It takes a custom DirtyBlockCache, which
// directs where the resulting block copies are stored. | [
"DeepCopyFile",
"makes",
"a",
"complete",
"copy",
"of",
"the",
"given",
"file",
"deduping",
"leaf",
"blocks",
"and",
"making",
"new",
"random",
"BlockPointers",
"for",
"all",
"indirect",
"blocks",
".",
"It",
"returns",
"the",
"new",
"top",
"pointer",
"of",
"the",
"copy",
"and",
"all",
"the",
"new",
"child",
"pointers",
"in",
"the",
"copy",
".",
"It",
"takes",
"a",
"custom",
"DirtyBlockCache",
"which",
"directs",
"where",
"the",
"resulting",
"block",
"copies",
"are",
"stored",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L808-L824 |
159,279 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | newDirDataWithDBMLocked | func (fbo *folderBlockOps) newDirDataWithDBMLocked(lState *kbfssync.LockState,
dir data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata,
dbm dirBlockMap) *data.DirData {
fbo.blockLock.AssertRLocked(lState)
return data.NewDirData(dir, chargedTo, fbo.config.BlockSplitter(), kmd,
func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
dir data.Path, rtype data.BlockReqType) (*data.DirBlock, bool, error) {
hasBlock, err := dbm.hasBlock(ctx, ptr)
if err != nil {
return nil, false, err
}
if hasBlock {
block, err := dbm.getBlock(ctx, ptr)
if err != nil {
return nil, false, err
}
return block, true, nil
}
localLState := lState
getRtype := rtype
switch rtype {
case data.BlockReadParallel:
localLState = nil
case data.BlockWrite:
getRtype = data.BlockRead
}
block, wasDirty, err := fbo.getDirLocked(
ctx, localLState, kmd, ptr, dir, getRtype)
if err != nil {
return nil, false, err
}
if rtype == data.BlockWrite {
// Make a copy before we stick it in the local block cache.
block = block.DeepCopy()
err = dbm.putBlock(ctx, ptr, block)
if err != nil {
return nil, false, err
}
}
return block, wasDirty, nil
},
func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
return dbm.putBlock(ctx, ptr, block.(*data.DirBlock))
}, fbo.log, fbo.vlog)
} | go | func (fbo *folderBlockOps) newDirDataWithDBMLocked(lState *kbfssync.LockState,
dir data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata,
dbm dirBlockMap) *data.DirData {
fbo.blockLock.AssertRLocked(lState)
return data.NewDirData(dir, chargedTo, fbo.config.BlockSplitter(), kmd,
func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
dir data.Path, rtype data.BlockReqType) (*data.DirBlock, bool, error) {
hasBlock, err := dbm.hasBlock(ctx, ptr)
if err != nil {
return nil, false, err
}
if hasBlock {
block, err := dbm.getBlock(ctx, ptr)
if err != nil {
return nil, false, err
}
return block, true, nil
}
localLState := lState
getRtype := rtype
switch rtype {
case data.BlockReadParallel:
localLState = nil
case data.BlockWrite:
getRtype = data.BlockRead
}
block, wasDirty, err := fbo.getDirLocked(
ctx, localLState, kmd, ptr, dir, getRtype)
if err != nil {
return nil, false, err
}
if rtype == data.BlockWrite {
// Make a copy before we stick it in the local block cache.
block = block.DeepCopy()
err = dbm.putBlock(ctx, ptr, block)
if err != nil {
return nil, false, err
}
}
return block, wasDirty, nil
},
func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
return dbm.putBlock(ctx, ptr, block.(*data.DirBlock))
}, fbo.log, fbo.vlog)
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"newDirDataWithDBMLocked",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"dir",
"data",
".",
"Path",
",",
"chargedTo",
"keybase1",
".",
"UserOrTeamID",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"dbm",
"dirBlockMap",
")",
"*",
"data",
".",
"DirData",
"{",
"fbo",
".",
"blockLock",
".",
"AssertRLocked",
"(",
"lState",
")",
"\n",
"return",
"data",
".",
"NewDirData",
"(",
"dir",
",",
"chargedTo",
",",
"fbo",
".",
"config",
".",
"BlockSplitter",
"(",
")",
",",
"kmd",
",",
"func",
"(",
"ctx",
"context",
".",
"Context",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"ptr",
"data",
".",
"BlockPointer",
",",
"dir",
"data",
".",
"Path",
",",
"rtype",
"data",
".",
"BlockReqType",
")",
"(",
"*",
"data",
".",
"DirBlock",
",",
"bool",
",",
"error",
")",
"{",
"hasBlock",
",",
"err",
":=",
"dbm",
".",
"hasBlock",
"(",
"ctx",
",",
"ptr",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"false",
",",
"err",
"\n",
"}",
"\n",
"if",
"hasBlock",
"{",
"block",
",",
"err",
":=",
"dbm",
".",
"getBlock",
"(",
"ctx",
",",
"ptr",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"false",
",",
"err",
"\n",
"}",
"\n",
"return",
"block",
",",
"true",
",",
"nil",
"\n",
"}",
"\n\n",
"localLState",
":=",
"lState",
"\n",
"getRtype",
":=",
"rtype",
"\n",
"switch",
"rtype",
"{",
"case",
"data",
".",
"BlockReadParallel",
":",
"localLState",
"=",
"nil",
"\n",
"case",
"data",
".",
"BlockWrite",
":",
"getRtype",
"=",
"data",
".",
"BlockRead",
"\n",
"}",
"\n\n",
"block",
",",
"wasDirty",
",",
"err",
":=",
"fbo",
".",
"getDirLocked",
"(",
"ctx",
",",
"localLState",
",",
"kmd",
",",
"ptr",
",",
"dir",
",",
"getRtype",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"false",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"rtype",
"==",
"data",
".",
"BlockWrite",
"{",
"// Make a copy before we stick it in the local block cache.",
"block",
"=",
"block",
".",
"DeepCopy",
"(",
")",
"\n",
"err",
"=",
"dbm",
".",
"putBlock",
"(",
"ctx",
",",
"ptr",
",",
"block",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"false",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"block",
",",
"wasDirty",
",",
"nil",
"\n",
"}",
",",
"func",
"(",
"ctx",
"context",
".",
"Context",
",",
"ptr",
"data",
".",
"BlockPointer",
",",
"block",
"data",
".",
"Block",
")",
"error",
"{",
"return",
"dbm",
".",
"putBlock",
"(",
"ctx",
",",
"ptr",
",",
"block",
".",
"(",
"*",
"data",
".",
"DirBlock",
")",
")",
"\n",
"}",
",",
"fbo",
".",
"log",
",",
"fbo",
".",
"vlog",
")",
"\n",
"}"
] | // newDirDataWithDBMLocked creates a new `dirData` that reads from and
// puts into a local dir block cache. If it reads a block out from
// anything but the `dbm`, it makes a copy of it before inserting it
// into the `dbm`. | [
"newDirDataWithDBMLocked",
"creates",
"a",
"new",
"dirData",
"that",
"reads",
"from",
"and",
"puts",
"into",
"a",
"local",
"dir",
"block",
"cache",
".",
"If",
"it",
"reads",
"a",
"block",
"out",
"from",
"anything",
"but",
"the",
"dbm",
"it",
"makes",
"a",
"copy",
"of",
"it",
"before",
"inserting",
"it",
"into",
"the",
"dbm",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L978-L1025 |
159,280 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | newDirDataWithDBM | func (fbo *folderBlockOps) newDirDataWithDBM(
lState *kbfssync.LockState, dir data.Path, chargedTo keybase1.UserOrTeamID,
kmd libkey.KeyMetadata, dbm dirBlockMap) (*data.DirData, func()) {
// Lock and fetch for reading only, we want any dirty
// blocks to go into the dbm.
fbo.blockLock.RLock(lState)
cleanupFn := func() { fbo.blockLock.RUnlock(lState) }
return fbo.newDirDataWithDBMLocked(lState, dir, chargedTo, kmd, dbm),
cleanupFn
} | go | func (fbo *folderBlockOps) newDirDataWithDBM(
lState *kbfssync.LockState, dir data.Path, chargedTo keybase1.UserOrTeamID,
kmd libkey.KeyMetadata, dbm dirBlockMap) (*data.DirData, func()) {
// Lock and fetch for reading only, we want any dirty
// blocks to go into the dbm.
fbo.blockLock.RLock(lState)
cleanupFn := func() { fbo.blockLock.RUnlock(lState) }
return fbo.newDirDataWithDBMLocked(lState, dir, chargedTo, kmd, dbm),
cleanupFn
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"newDirDataWithDBM",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"dir",
"data",
".",
"Path",
",",
"chargedTo",
"keybase1",
".",
"UserOrTeamID",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"dbm",
"dirBlockMap",
")",
"(",
"*",
"data",
".",
"DirData",
",",
"func",
"(",
")",
")",
"{",
"// Lock and fetch for reading only, we want any dirty",
"// blocks to go into the dbm.",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"cleanupFn",
":=",
"func",
"(",
")",
"{",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"}",
"\n",
"return",
"fbo",
".",
"newDirDataWithDBMLocked",
"(",
"lState",
",",
"dir",
",",
"chargedTo",
",",
"kmd",
",",
"dbm",
")",
",",
"cleanupFn",
"\n",
"}"
] | // newDirDataWithDBM is like `newDirDataWithDBMLocked`, but it must be
// called with `blockLock` unlocked, and the returned function must be
// called when the returned `dirData` is no longer in use. | [
"newDirDataWithDBM",
"is",
"like",
"newDirDataWithDBMLocked",
"but",
"it",
"must",
"be",
"called",
"with",
"blockLock",
"unlocked",
"and",
"the",
"returned",
"function",
"must",
"be",
"called",
"when",
"the",
"returned",
"dirData",
"is",
"no",
"longer",
"in",
"use",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1030-L1039 |
159,281 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | AddDirEntryInCache | func (fbo *folderBlockOps) AddDirEntryInCache(
ctx context.Context, lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry,
dir data.Path, newName string, newDe data.DirEntry) (dirCacheUndoFn, error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fn, err := fbo.addDirEntryInCacheLocked(
ctx, lState, kmd, dir, newName, newDe)
if err != nil {
return nil, err
}
return fbo.wrapWithBlockLock(fn), nil
} | go | func (fbo *folderBlockOps) AddDirEntryInCache(
ctx context.Context, lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry,
dir data.Path, newName string, newDe data.DirEntry) (dirCacheUndoFn, error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fn, err := fbo.addDirEntryInCacheLocked(
ctx, lState, kmd, dir, newName, newDe)
if err != nil {
return nil, err
}
return fbo.wrapWithBlockLock(fn), nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"AddDirEntryInCache",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"dir",
"data",
".",
"Path",
",",
"newName",
"string",
",",
"newDe",
"data",
".",
"DirEntry",
")",
"(",
"dirCacheUndoFn",
",",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n",
"fn",
",",
"err",
":=",
"fbo",
".",
"addDirEntryInCacheLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"dir",
",",
"newName",
",",
"newDe",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"fbo",
".",
"wrapWithBlockLock",
"(",
"fn",
")",
",",
"nil",
"\n",
"}"
] | // AddDirEntryInCache adds a brand new entry to the given directory
// and updates the directory's own mtime and ctime. It returns a
// function that can be called if the change needs to be undone. | [
"AddDirEntryInCache",
"adds",
"a",
"brand",
"new",
"entry",
"to",
"the",
"given",
"directory",
"and",
"updates",
"the",
"directory",
"s",
"own",
"mtime",
"and",
"ctime",
".",
"It",
"returns",
"a",
"function",
"that",
"can",
"be",
"called",
"if",
"the",
"change",
"needs",
"to",
"be",
"undone",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1147-L1158 |
159,282 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | SetAttrInDirEntryInCache | func (fbo *folderBlockOps) SetAttrInDirEntryInCache(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, p data.Path, newDe data.DirEntry, attr attrChange) (
dirCacheUndoFn, error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
return fbo.setCachedAttrLocked(
ctx, lState, kmd, *p.ParentPath(), p.TailName(), attr, newDe)
} | go | func (fbo *folderBlockOps) SetAttrInDirEntryInCache(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, p data.Path, newDe data.DirEntry, attr attrChange) (
dirCacheUndoFn, error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
return fbo.setCachedAttrLocked(
ctx, lState, kmd, *p.ParentPath(), p.TailName(), attr, newDe)
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"SetAttrInDirEntryInCache",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"p",
"data",
".",
"Path",
",",
"newDe",
"data",
".",
"DirEntry",
",",
"attr",
"attrChange",
")",
"(",
"dirCacheUndoFn",
",",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n",
"return",
"fbo",
".",
"setCachedAttrLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"*",
"p",
".",
"ParentPath",
"(",
")",
",",
"p",
".",
"TailName",
"(",
")",
",",
"attr",
",",
"newDe",
")",
"\n",
"}"
] | // SetAttrInDirEntryInCache updates an entry from the given directory. | [
"SetAttrInDirEntryInCache",
"updates",
"an",
"entry",
"from",
"the",
"given",
"directory",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1366-L1374 |
159,283 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | GetDirtyDirCopy | func (fbo *folderBlockOps) GetDirtyDirCopy(
ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, dir data.Path,
rtype data.BlockReqType) (*data.DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
dblock, err := fbo.getDirtyDirLocked(ctx, lState, kmd, dir, rtype)
if err != nil {
return nil, err
}
// Copy it while under lock. Otherwise, another operation like
// `Write` can modify it while the caller is trying to copy it,
// leading to a panic like in KBFS-3407.
return dblock.DeepCopy(), nil
} | go | func (fbo *folderBlockOps) GetDirtyDirCopy(
ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, dir data.Path,
rtype data.BlockReqType) (*data.DirBlock, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
dblock, err := fbo.getDirtyDirLocked(ctx, lState, kmd, dir, rtype)
if err != nil {
return nil, err
}
// Copy it while under lock. Otherwise, another operation like
// `Write` can modify it while the caller is trying to copy it,
// leading to a panic like in KBFS-3407.
return dblock.DeepCopy(), nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"GetDirtyDirCopy",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"dir",
"data",
".",
"Path",
",",
"rtype",
"data",
".",
"BlockReqType",
")",
"(",
"*",
"data",
".",
"DirBlock",
",",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n",
"dblock",
",",
"err",
":=",
"fbo",
".",
"getDirtyDirLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"dir",
",",
"rtype",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"// Copy it while under lock. Otherwise, another operation like",
"// `Write` can modify it while the caller is trying to copy it,",
"// leading to a panic like in KBFS-3407.",
"return",
"dblock",
".",
"DeepCopy",
"(",
")",
",",
"nil",
"\n",
"}"
] | // GetDirtyDirCopy returns a deep copy of the directory block for a
// dirty directory, while under lock, updated with all cached dirty
// entries. | [
"GetDirtyDirCopy",
"returns",
"a",
"deep",
"copy",
"of",
"the",
"directory",
"block",
"for",
"a",
"dirty",
"directory",
"while",
"under",
"lock",
"updated",
"with",
"all",
"cached",
"dirty",
"entries",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1396-L1409 |
159,284 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | updateEntryLocked | func (fbo *folderBlockOps) updateEntryLocked(ctx context.Context,
lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry, file data.Path,
de data.DirEntry, includeDeleted bool) error {
fbo.blockLock.AssertAnyLocked(lState)
chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
if err != nil {
return err
}
parentPath := *file.ParentPath()
dd := fbo.newDirDataLocked(lState, parentPath, chargedTo, kmd)
unrefs, err := dd.UpdateEntry(ctx, file.TailName(), de)
_, noExist := errors.Cause(err).(idutil.NoSuchNameError)
if noExist && includeDeleted {
unlinkedNode := fbo.nodeCache.Get(file.TailPointer().Ref())
if unlinkedNode != nil && fbo.nodeCache.IsUnlinked(unlinkedNode) {
fbo.nodeCache.UpdateUnlinkedDirEntry(unlinkedNode, de)
return nil
}
return err
} else if err != nil {
return err
} else {
_ = fbo.makeDirDirtyLocked(lState, parentPath.TailPointer(), unrefs)
}
return nil
} | go | func (fbo *folderBlockOps) updateEntryLocked(ctx context.Context,
lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry, file data.Path,
de data.DirEntry, includeDeleted bool) error {
fbo.blockLock.AssertAnyLocked(lState)
chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
if err != nil {
return err
}
parentPath := *file.ParentPath()
dd := fbo.newDirDataLocked(lState, parentPath, chargedTo, kmd)
unrefs, err := dd.UpdateEntry(ctx, file.TailName(), de)
_, noExist := errors.Cause(err).(idutil.NoSuchNameError)
if noExist && includeDeleted {
unlinkedNode := fbo.nodeCache.Get(file.TailPointer().Ref())
if unlinkedNode != nil && fbo.nodeCache.IsUnlinked(unlinkedNode) {
fbo.nodeCache.UpdateUnlinkedDirEntry(unlinkedNode, de)
return nil
}
return err
} else if err != nil {
return err
} else {
_ = fbo.makeDirDirtyLocked(lState, parentPath.TailPointer(), unrefs)
}
return nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"updateEntryLocked",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"file",
"data",
".",
"Path",
",",
"de",
"data",
".",
"DirEntry",
",",
"includeDeleted",
"bool",
")",
"error",
"{",
"fbo",
".",
"blockLock",
".",
"AssertAnyLocked",
"(",
"lState",
")",
"\n\n",
"chargedTo",
",",
"err",
":=",
"fbo",
".",
"getChargedToLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"parentPath",
":=",
"*",
"file",
".",
"ParentPath",
"(",
")",
"\n",
"dd",
":=",
"fbo",
".",
"newDirDataLocked",
"(",
"lState",
",",
"parentPath",
",",
"chargedTo",
",",
"kmd",
")",
"\n",
"unrefs",
",",
"err",
":=",
"dd",
".",
"UpdateEntry",
"(",
"ctx",
",",
"file",
".",
"TailName",
"(",
")",
",",
"de",
")",
"\n",
"_",
",",
"noExist",
":=",
"errors",
".",
"Cause",
"(",
"err",
")",
".",
"(",
"idutil",
".",
"NoSuchNameError",
")",
"\n",
"if",
"noExist",
"&&",
"includeDeleted",
"{",
"unlinkedNode",
":=",
"fbo",
".",
"nodeCache",
".",
"Get",
"(",
"file",
".",
"TailPointer",
"(",
")",
".",
"Ref",
"(",
")",
")",
"\n",
"if",
"unlinkedNode",
"!=",
"nil",
"&&",
"fbo",
".",
"nodeCache",
".",
"IsUnlinked",
"(",
"unlinkedNode",
")",
"{",
"fbo",
".",
"nodeCache",
".",
"UpdateUnlinkedDirEntry",
"(",
"unlinkedNode",
",",
"de",
")",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}",
"else",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"else",
"{",
"_",
"=",
"fbo",
".",
"makeDirDirtyLocked",
"(",
"lState",
",",
"parentPath",
".",
"TailPointer",
"(",
")",
",",
"unrefs",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // file must have a valid parent. | [
"file",
"must",
"have",
"a",
"valid",
"parent",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1463-L1489 |
159,285 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | GetEntry | func (fbo *folderBlockOps) GetEntry(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, file data.Path) (data.DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getEntryLocked(ctx, lState, kmd, file, false)
} | go | func (fbo *folderBlockOps) GetEntry(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, file data.Path) (data.DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
return fbo.getEntryLocked(ctx, lState, kmd, file, false)
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"GetEntry",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"file",
"data",
".",
"Path",
")",
"(",
"data",
".",
"DirEntry",
",",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n",
"return",
"fbo",
".",
"getEntryLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"file",
",",
"false",
")",
"\n",
"}"
] | // GetEntry returns the possibly-dirty DirEntry of the given file in
// its parent DirBlock. file must have a valid parent. | [
"GetEntry",
"returns",
"the",
"possibly",
"-",
"dirty",
"DirEntry",
"of",
"the",
"given",
"file",
"in",
"its",
"parent",
"DirBlock",
".",
"file",
"must",
"have",
"a",
"valid",
"parent",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1493-L1499 |
159,286 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | Lookup | func (fbo *folderBlockOps) Lookup(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, dir Node, name string) (
Node, data.DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
dirPath := fbo.nodeCache.PathFromNode(dir)
if !dirPath.IsValid() {
return nil, data.DirEntry{}, errors.WithStack(InvalidPathError{dirPath})
}
childPath := dirPath.ChildPathNoPtr(name)
de, err := fbo.getEntryLocked(ctx, lState, kmd, childPath, false)
if err != nil {
return nil, data.DirEntry{}, err
}
node, err := fbo.getChildNodeLocked(lState, dir, name, de)
if err != nil {
return nil, data.DirEntry{}, err
}
return node, de, nil
} | go | func (fbo *folderBlockOps) Lookup(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, dir Node, name string) (
Node, data.DirEntry, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
dirPath := fbo.nodeCache.PathFromNode(dir)
if !dirPath.IsValid() {
return nil, data.DirEntry{}, errors.WithStack(InvalidPathError{dirPath})
}
childPath := dirPath.ChildPathNoPtr(name)
de, err := fbo.getEntryLocked(ctx, lState, kmd, childPath, false)
if err != nil {
return nil, data.DirEntry{}, err
}
node, err := fbo.getChildNodeLocked(lState, dir, name, de)
if err != nil {
return nil, data.DirEntry{}, err
}
return node, de, nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"Lookup",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"dir",
"Node",
",",
"name",
"string",
")",
"(",
"Node",
",",
"data",
".",
"DirEntry",
",",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n\n",
"dirPath",
":=",
"fbo",
".",
"nodeCache",
".",
"PathFromNode",
"(",
"dir",
")",
"\n",
"if",
"!",
"dirPath",
".",
"IsValid",
"(",
")",
"{",
"return",
"nil",
",",
"data",
".",
"DirEntry",
"{",
"}",
",",
"errors",
".",
"WithStack",
"(",
"InvalidPathError",
"{",
"dirPath",
"}",
")",
"\n",
"}",
"\n\n",
"childPath",
":=",
"dirPath",
".",
"ChildPathNoPtr",
"(",
"name",
")",
"\n",
"de",
",",
"err",
":=",
"fbo",
".",
"getEntryLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"childPath",
",",
"false",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"data",
".",
"DirEntry",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"node",
",",
"err",
":=",
"fbo",
".",
"getChildNodeLocked",
"(",
"lState",
",",
"dir",
",",
"name",
",",
"de",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"data",
".",
"DirEntry",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"return",
"node",
",",
"de",
",",
"nil",
"\n",
"}"
] | // Lookup returns the possibly-dirty DirEntry of the given file in its
// parent DirBlock, and a Node for the file if it exists. It has to
// do all of this under the block lock to avoid races with
// UpdatePointers. | [
"Lookup",
"returns",
"the",
"possibly",
"-",
"dirty",
"DirEntry",
"of",
"the",
"given",
"file",
"in",
"its",
"parent",
"DirBlock",
"and",
"a",
"Node",
"for",
"the",
"file",
"if",
"it",
"exists",
".",
"It",
"has",
"to",
"do",
"all",
"of",
"this",
"under",
"the",
"block",
"lock",
"to",
"avoid",
"races",
"with",
"UpdatePointers",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1536-L1559 |
159,287 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | cacheBlockIfNotYetDirtyLocked | func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked(
ctx context.Context, lState *kbfssync.LockState, ptr data.BlockPointer,
file data.Path, block data.Block) error {
fbo.blockLock.AssertLocked(lState)
df := fbo.getOrCreateDirtyFileLocked(lState, file)
needsCaching, isSyncing := df.SetBlockDirty(ptr)
if needsCaching {
err := fbo.config.DirtyBlockCache().Put(
ctx, fbo.id(), ptr, file.Branch, block)
if err != nil {
return err
}
}
if isSyncing {
fbo.doDeferWrite = true
}
return nil
} | go | func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked(
ctx context.Context, lState *kbfssync.LockState, ptr data.BlockPointer,
file data.Path, block data.Block) error {
fbo.blockLock.AssertLocked(lState)
df := fbo.getOrCreateDirtyFileLocked(lState, file)
needsCaching, isSyncing := df.SetBlockDirty(ptr)
if needsCaching {
err := fbo.config.DirtyBlockCache().Put(
ctx, fbo.id(), ptr, file.Branch, block)
if err != nil {
return err
}
}
if isSyncing {
fbo.doDeferWrite = true
}
return nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"cacheBlockIfNotYetDirtyLocked",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"ptr",
"data",
".",
"BlockPointer",
",",
"file",
"data",
".",
"Path",
",",
"block",
"data",
".",
"Block",
")",
"error",
"{",
"fbo",
".",
"blockLock",
".",
"AssertLocked",
"(",
"lState",
")",
"\n",
"df",
":=",
"fbo",
".",
"getOrCreateDirtyFileLocked",
"(",
"lState",
",",
"file",
")",
"\n",
"needsCaching",
",",
"isSyncing",
":=",
"df",
".",
"SetBlockDirty",
"(",
"ptr",
")",
"\n\n",
"if",
"needsCaching",
"{",
"err",
":=",
"fbo",
".",
"config",
".",
"DirtyBlockCache",
"(",
")",
".",
"Put",
"(",
"ctx",
",",
"fbo",
".",
"id",
"(",
")",
",",
"ptr",
",",
"file",
".",
"Branch",
",",
"block",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"if",
"isSyncing",
"{",
"fbo",
".",
"doDeferWrite",
"=",
"true",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // cacheBlockIfNotYetDirtyLocked puts a block into the cache, but only
// does so if the block isn't already marked as dirty in the cache.
// This is useful when operating on a dirty copy of a block that may
// already be in the cache. | [
"cacheBlockIfNotYetDirtyLocked",
"puts",
"a",
"block",
"into",
"the",
"cache",
"but",
"only",
"does",
"so",
"if",
"the",
"block",
"isn",
"t",
"already",
"marked",
"as",
"dirty",
"in",
"the",
"cache",
".",
"This",
"is",
"useful",
"when",
"operating",
"on",
"a",
"dirty",
"copy",
"of",
"a",
"block",
"that",
"may",
"already",
"be",
"in",
"the",
"cache",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1577-L1596 |
159,288 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | GetDirtyFileBlockRefs | func (fbo *folderBlockOps) GetDirtyFileBlockRefs(
lState *kbfssync.LockState) []data.BlockRef {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var dirtyRefs []data.BlockRef
for ref := range fbo.unrefCache {
dirtyRefs = append(dirtyRefs, ref)
}
return dirtyRefs
} | go | func (fbo *folderBlockOps) GetDirtyFileBlockRefs(
lState *kbfssync.LockState) []data.BlockRef {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var dirtyRefs []data.BlockRef
for ref := range fbo.unrefCache {
dirtyRefs = append(dirtyRefs, ref)
}
return dirtyRefs
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"GetDirtyFileBlockRefs",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
")",
"[",
"]",
"data",
".",
"BlockRef",
"{",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n",
"var",
"dirtyRefs",
"[",
"]",
"data",
".",
"BlockRef",
"\n",
"for",
"ref",
":=",
"range",
"fbo",
".",
"unrefCache",
"{",
"dirtyRefs",
"=",
"append",
"(",
"dirtyRefs",
",",
"ref",
")",
"\n",
"}",
"\n",
"return",
"dirtyRefs",
"\n",
"}"
] | // GetDirtyFileBlockRefs returns a list of references of all known dirty
// files. | [
"GetDirtyFileBlockRefs",
"returns",
"a",
"list",
"of",
"references",
"of",
"all",
"known",
"dirty",
"files",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1619-L1628 |
159,289 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | GetDirtyDirBlockRefs | func (fbo *folderBlockOps) GetDirtyDirBlockRefs(
lState *kbfssync.LockState) []data.BlockRef {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var dirtyRefs []data.BlockRef
for ptr := range fbo.dirtyDirs {
dirtyRefs = append(dirtyRefs, ptr.Ref())
}
return dirtyRefs
} | go | func (fbo *folderBlockOps) GetDirtyDirBlockRefs(
lState *kbfssync.LockState) []data.BlockRef {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
var dirtyRefs []data.BlockRef
for ptr := range fbo.dirtyDirs {
dirtyRefs = append(dirtyRefs, ptr.Ref())
}
return dirtyRefs
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"GetDirtyDirBlockRefs",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
")",
"[",
"]",
"data",
".",
"BlockRef",
"{",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n",
"var",
"dirtyRefs",
"[",
"]",
"data",
".",
"BlockRef",
"\n",
"for",
"ptr",
":=",
"range",
"fbo",
".",
"dirtyDirs",
"{",
"dirtyRefs",
"=",
"append",
"(",
"dirtyRefs",
",",
"ptr",
".",
"Ref",
"(",
")",
")",
"\n",
"}",
"\n",
"return",
"dirtyRefs",
"\n",
"}"
] | // GetDirtyDirBlockRefs returns a list of references of all known dirty
// directories. | [
"GetDirtyDirBlockRefs",
"returns",
"a",
"list",
"of",
"references",
"of",
"all",
"known",
"dirty",
"directories",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1632-L1641 |
159,290 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | getDirtyDirUnrefsLocked | func (fbo *folderBlockOps) getDirtyDirUnrefsLocked(
lState *kbfssync.LockState, ptr data.BlockPointer) []data.BlockInfo {
fbo.blockLock.AssertRLocked(lState)
return fbo.dirtyDirs[ptr]
} | go | func (fbo *folderBlockOps) getDirtyDirUnrefsLocked(
lState *kbfssync.LockState, ptr data.BlockPointer) []data.BlockInfo {
fbo.blockLock.AssertRLocked(lState)
return fbo.dirtyDirs[ptr]
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"getDirtyDirUnrefsLocked",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"ptr",
"data",
".",
"BlockPointer",
")",
"[",
"]",
"data",
".",
"BlockInfo",
"{",
"fbo",
".",
"blockLock",
".",
"AssertRLocked",
"(",
"lState",
")",
"\n",
"return",
"fbo",
".",
"dirtyDirs",
"[",
"ptr",
"]",
"\n",
"}"
] | // getDirtyDirUnrefsLocked returns a list of block infos that need to be
// unreferenced for the given directory. | [
"getDirtyDirUnrefsLocked",
"returns",
"a",
"list",
"of",
"block",
"infos",
"that",
"need",
"to",
"be",
"unreferenced",
"for",
"the",
"given",
"directory",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1645-L1649 |
159,291 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | Read | func (fbo *folderBlockOps) Read(
ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file Node,
dest []byte, off int64) (int64, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
filePath := fbo.nodeCache.PathFromNode(file)
fbo.vlog.CLogf(ctx, libkb.VLog1, "Reading from %v", filePath.TailPointer())
var id keybase1.UserOrTeamID // Data reads don't depend on the id.
fd := fbo.newFileData(lState, filePath, id, kmd)
return fd.Read(ctx, dest, data.Int64Offset(off))
} | go | func (fbo *folderBlockOps) Read(
ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file Node,
dest []byte, off int64) (int64, error) {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
filePath := fbo.nodeCache.PathFromNode(file)
fbo.vlog.CLogf(ctx, libkb.VLog1, "Reading from %v", filePath.TailPointer())
var id keybase1.UserOrTeamID // Data reads don't depend on the id.
fd := fbo.newFileData(lState, filePath, id, kmd)
return fd.Read(ctx, dest, data.Int64Offset(off))
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"Read",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"file",
"Node",
",",
"dest",
"[",
"]",
"byte",
",",
"off",
"int64",
")",
"(",
"int64",
",",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n\n",
"filePath",
":=",
"fbo",
".",
"nodeCache",
".",
"PathFromNode",
"(",
"file",
")",
"\n\n",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"filePath",
".",
"TailPointer",
"(",
")",
")",
"\n\n",
"var",
"id",
"keybase1",
".",
"UserOrTeamID",
"// Data reads don't depend on the id.",
"\n",
"fd",
":=",
"fbo",
".",
"newFileData",
"(",
"lState",
",",
"filePath",
",",
"id",
",",
"kmd",
")",
"\n",
"return",
"fd",
".",
"Read",
"(",
"ctx",
",",
"dest",
",",
"data",
".",
"Int64Offset",
"(",
"off",
")",
")",
"\n",
"}"
] | // Read reads from the given file into the given buffer at the given
// offset. It returns the number of bytes read and nil, or 0 and the
// error if there was one. | [
"Read",
"reads",
"from",
"the",
"given",
"file",
"into",
"the",
"given",
"buffer",
"at",
"the",
"given",
"offset",
".",
"It",
"returns",
"the",
"number",
"of",
"bytes",
"read",
"and",
"nil",
"or",
"0",
"and",
"the",
"error",
"if",
"there",
"was",
"one",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1834-L1847 |
159,292 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | writeGetFileLocked | func (fbo *folderBlockOps) writeGetFileLocked(
ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
file data.Path) (*data.FileBlock, error) {
fbo.blockLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
isWriter, err := kmd.IsWriter(
ctx, fbo.config.KBPKI(), fbo.config, session.UID, session.VerifyingKey)
if err != nil {
return nil, err
}
if !isWriter {
return nil, tlfhandle.NewWriteAccessError(kmd.GetTlfHandle(),
session.Name, file.String())
}
fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, data.BlockWrite)
if err != nil {
return nil, err
}
return fblock, nil
} | go | func (fbo *folderBlockOps) writeGetFileLocked(
ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
file data.Path) (*data.FileBlock, error) {
fbo.blockLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
isWriter, err := kmd.IsWriter(
ctx, fbo.config.KBPKI(), fbo.config, session.UID, session.VerifyingKey)
if err != nil {
return nil, err
}
if !isWriter {
return nil, tlfhandle.NewWriteAccessError(kmd.GetTlfHandle(),
session.Name, file.String())
}
fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, data.BlockWrite)
if err != nil {
return nil, err
}
return fblock, nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"writeGetFileLocked",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"file",
"data",
".",
"Path",
")",
"(",
"*",
"data",
".",
"FileBlock",
",",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"AssertLocked",
"(",
"lState",
")",
"\n\n",
"session",
",",
"err",
":=",
"fbo",
".",
"config",
".",
"KBPKI",
"(",
")",
".",
"GetCurrentSession",
"(",
"ctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"isWriter",
",",
"err",
":=",
"kmd",
".",
"IsWriter",
"(",
"ctx",
",",
"fbo",
".",
"config",
".",
"KBPKI",
"(",
")",
",",
"fbo",
".",
"config",
",",
"session",
".",
"UID",
",",
"session",
".",
"VerifyingKey",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"!",
"isWriter",
"{",
"return",
"nil",
",",
"tlfhandle",
".",
"NewWriteAccessError",
"(",
"kmd",
".",
"GetTlfHandle",
"(",
")",
",",
"session",
".",
"Name",
",",
"file",
".",
"String",
"(",
")",
")",
"\n",
"}",
"\n",
"fblock",
",",
"err",
":=",
"fbo",
".",
"getFileLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"file",
",",
"data",
".",
"BlockWrite",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"fblock",
",",
"nil",
"\n",
"}"
] | // writeGetFileLocked checks write permissions explicitly for
// writeDataLocked, truncateLocked etc and returns | [
"writeGetFileLocked",
"checks",
"write",
"permissions",
"explicitly",
"for",
"writeDataLocked",
"truncateLocked",
"etc",
"and",
"returns"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L1934-L1957 |
159,293 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | truncateExtendLocked | func (fbo *folderBlockOps) truncateExtendLocked(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, file data.Path, size uint64,
parentBlocks []data.ParentBlockAndChildIndex) (
WriteRange, []data.BlockPointer, error) {
fblock, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, err
}
chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
if err != nil {
return WriteRange{}, nil, err
}
fd := fbo.newFileData(lState, file, chargedTo, kmd)
de, err := fbo.getEntryLocked(ctx, lState, kmd, file, true)
if err != nil {
return WriteRange{}, nil, err
}
df := fbo.getOrCreateDirtyFileLocked(lState, file)
newDe, dirtyPtrs, err := fd.TruncateExtend(
ctx, size, fblock, parentBlocks, de, df)
if err != nil {
return WriteRange{}, nil, err
}
now := fbo.nowUnixNano()
newDe.Mtime = now
newDe.Ctime = now
err = fbo.updateEntryLocked(ctx, lState, kmd, file, newDe, true)
if err != nil {
return WriteRange{}, nil, err
}
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return WriteRange{}, nil, err
}
latestWrite := si.op.addTruncate(size)
if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
select {
// If we can't send on the channel, that means a sync is
// already in progress
case fbo.forceSyncChan <- struct{}{}:
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Forcing a sync due to full buffer")
default:
}
}
fbo.vlog.CLogf(ctx, libkb.VLog1, "truncateExtendLocked: done")
return latestWrite, dirtyPtrs, nil
} | go | func (fbo *folderBlockOps) truncateExtendLocked(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, file data.Path, size uint64,
parentBlocks []data.ParentBlockAndChildIndex) (
WriteRange, []data.BlockPointer, error) {
fblock, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
if err != nil {
return WriteRange{}, nil, err
}
chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
if err != nil {
return WriteRange{}, nil, err
}
fd := fbo.newFileData(lState, file, chargedTo, kmd)
de, err := fbo.getEntryLocked(ctx, lState, kmd, file, true)
if err != nil {
return WriteRange{}, nil, err
}
df := fbo.getOrCreateDirtyFileLocked(lState, file)
newDe, dirtyPtrs, err := fd.TruncateExtend(
ctx, size, fblock, parentBlocks, de, df)
if err != nil {
return WriteRange{}, nil, err
}
now := fbo.nowUnixNano()
newDe.Mtime = now
newDe.Ctime = now
err = fbo.updateEntryLocked(ctx, lState, kmd, file, newDe, true)
if err != nil {
return WriteRange{}, nil, err
}
si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
if err != nil {
return WriteRange{}, nil, err
}
latestWrite := si.op.addTruncate(size)
if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
select {
// If we can't send on the channel, that means a sync is
// already in progress
case fbo.forceSyncChan <- struct{}{}:
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Forcing a sync due to full buffer")
default:
}
}
fbo.vlog.CLogf(ctx, libkb.VLog1, "truncateExtendLocked: done")
return latestWrite, dirtyPtrs, nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"truncateExtendLocked",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"file",
"data",
".",
"Path",
",",
"size",
"uint64",
",",
"parentBlocks",
"[",
"]",
"data",
".",
"ParentBlockAndChildIndex",
")",
"(",
"WriteRange",
",",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"error",
")",
"{",
"fblock",
",",
"err",
":=",
"fbo",
".",
"writeGetFileLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"file",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"WriteRange",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"chargedTo",
",",
"err",
":=",
"fbo",
".",
"getChargedToLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"WriteRange",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"fd",
":=",
"fbo",
".",
"newFileData",
"(",
"lState",
",",
"file",
",",
"chargedTo",
",",
"kmd",
")",
"\n\n",
"de",
",",
"err",
":=",
"fbo",
".",
"getEntryLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"file",
",",
"true",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"WriteRange",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n",
"df",
":=",
"fbo",
".",
"getOrCreateDirtyFileLocked",
"(",
"lState",
",",
"file",
")",
"\n",
"newDe",
",",
"dirtyPtrs",
",",
"err",
":=",
"fd",
".",
"TruncateExtend",
"(",
"ctx",
",",
"size",
",",
"fblock",
",",
"parentBlocks",
",",
"de",
",",
"df",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"WriteRange",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"now",
":=",
"fbo",
".",
"nowUnixNano",
"(",
")",
"\n",
"newDe",
".",
"Mtime",
"=",
"now",
"\n",
"newDe",
".",
"Ctime",
"=",
"now",
"\n",
"err",
"=",
"fbo",
".",
"updateEntryLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"file",
",",
"newDe",
",",
"true",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"WriteRange",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"si",
",",
"err",
":=",
"fbo",
".",
"getOrCreateSyncInfoLocked",
"(",
"lState",
",",
"de",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"WriteRange",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n",
"latestWrite",
":=",
"si",
".",
"op",
".",
"addTruncate",
"(",
"size",
")",
"\n\n",
"if",
"fbo",
".",
"config",
".",
"DirtyBlockCache",
"(",
")",
".",
"ShouldForceSync",
"(",
"fbo",
".",
"id",
"(",
")",
")",
"{",
"select",
"{",
"// If we can't send on the channel, that means a sync is",
"// already in progress",
"case",
"fbo",
".",
"forceSyncChan",
"<-",
"struct",
"{",
"}",
"{",
"}",
":",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
")",
"\n",
"default",
":",
"}",
"\n",
"}",
"\n\n",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
")",
"\n",
"return",
"latestWrite",
",",
"dirtyPtrs",
",",
"nil",
"\n",
"}"
] | // truncateExtendLocked is called by truncateLocked to extend a file and
// creates a hole. | [
"truncateExtendLocked",
"is",
"called",
"by",
"truncateLocked",
"to",
"extend",
"a",
"file",
"and",
"creates",
"a",
"hole",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L2130-L2185 |
159,294 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | Truncate | func (fbo *folderBlockOps) Truncate(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, file Node, size uint64) error {
// If there is too much unflushed data, we should wait until some
// of it gets flush so our memory usage doesn't grow without
// bound.
//
// Assume the whole remaining file will be dirty after this
// truncate. TODO: try to figure out how many bytes actually will
// be dirtied ahead of time?
c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
fbo.id(), int64(size))
if err != nil {
return err
}
defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
-int64(size), false)
err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
if err != nil {
return err
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
defer func() {
fbo.doDeferWrite = false
}()
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked(
ctx, lState, kmd, filePath, size)
if err != nil {
return err
}
if latestWrite != nil {
fbo.observers.localChange(ctx, file, *latestWrite)
}
if fbo.doDeferWrite {
// There's an ongoing sync, and this truncate altered
// dirty blocks that are in the process of syncing. So,
// we have to redo this truncate once the sync is complete,
// using the new file path.
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Deferring a truncate to file %v",
filePath.TailPointer())
ds := fbo.deferred[filePath.TailRef()]
ds.dirtyDeletes = append(ds.dirtyDeletes, dirtyPtrs...)
ds.writes = append(ds.writes,
func(ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, f data.Path) error {
// We are about to re-dirty these bytes, so mark that
// they will no longer be synced via the old file.
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
df.UpdateNotYetSyncingBytes(-newlyDirtiedChildBytes)
// Truncate the file again. We know this won't be
// deferred, so no need to check the new ptrs.
_, _, _, err := fbo.truncateLocked(
ctx, lState, kmd, f, size)
return err
})
ds.waitBytes += newlyDirtiedChildBytes
fbo.deferred[filePath.TailRef()] = ds
}
return nil
} | go | func (fbo *folderBlockOps) Truncate(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, file Node, size uint64) error {
// If there is too much unflushed data, we should wait until some
// of it gets flush so our memory usage doesn't grow without
// bound.
//
// Assume the whole remaining file will be dirty after this
// truncate. TODO: try to figure out how many bytes actually will
// be dirtied ahead of time?
c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
fbo.id(), int64(size))
if err != nil {
return err
}
defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
-int64(size), false)
err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
if err != nil {
return err
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
if err != nil {
return err
}
defer func() {
fbo.doDeferWrite = false
}()
latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked(
ctx, lState, kmd, filePath, size)
if err != nil {
return err
}
if latestWrite != nil {
fbo.observers.localChange(ctx, file, *latestWrite)
}
if fbo.doDeferWrite {
// There's an ongoing sync, and this truncate altered
// dirty blocks that are in the process of syncing. So,
// we have to redo this truncate once the sync is complete,
// using the new file path.
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Deferring a truncate to file %v",
filePath.TailPointer())
ds := fbo.deferred[filePath.TailRef()]
ds.dirtyDeletes = append(ds.dirtyDeletes, dirtyPtrs...)
ds.writes = append(ds.writes,
func(ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, f data.Path) error {
// We are about to re-dirty these bytes, so mark that
// they will no longer be synced via the old file.
df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
df.UpdateNotYetSyncingBytes(-newlyDirtiedChildBytes)
// Truncate the file again. We know this won't be
// deferred, so no need to check the new ptrs.
_, _, _, err := fbo.truncateLocked(
ctx, lState, kmd, f, size)
return err
})
ds.waitBytes += newlyDirtiedChildBytes
fbo.deferred[filePath.TailRef()] = ds
}
return nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"Truncate",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"file",
"Node",
",",
"size",
"uint64",
")",
"error",
"{",
"// If there is too much unflushed data, we should wait until some",
"// of it gets flush so our memory usage doesn't grow without",
"// bound.",
"//",
"// Assume the whole remaining file will be dirty after this",
"// truncate. TODO: try to figure out how many bytes actually will",
"// be dirtied ahead of time?",
"c",
",",
"err",
":=",
"fbo",
".",
"config",
".",
"DirtyBlockCache",
"(",
")",
".",
"RequestPermissionToDirty",
"(",
"ctx",
",",
"fbo",
".",
"id",
"(",
")",
",",
"int64",
"(",
"size",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"defer",
"fbo",
".",
"config",
".",
"DirtyBlockCache",
"(",
")",
".",
"UpdateUnsyncedBytes",
"(",
"fbo",
".",
"id",
"(",
")",
",",
"-",
"int64",
"(",
"size",
")",
",",
"false",
")",
"\n",
"err",
"=",
"fbo",
".",
"maybeWaitOnDeferredWrites",
"(",
"ctx",
",",
"lState",
",",
"file",
",",
"c",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n\n",
"filePath",
",",
"err",
":=",
"fbo",
".",
"pathFromNodeForBlockWriteLocked",
"(",
"lState",
",",
"file",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"defer",
"func",
"(",
")",
"{",
"fbo",
".",
"doDeferWrite",
"=",
"false",
"\n",
"}",
"(",
")",
"\n\n",
"latestWrite",
",",
"dirtyPtrs",
",",
"newlyDirtiedChildBytes",
",",
"err",
":=",
"fbo",
".",
"truncateLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"filePath",
",",
"size",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"latestWrite",
"!=",
"nil",
"{",
"fbo",
".",
"observers",
".",
"localChange",
"(",
"ctx",
",",
"file",
",",
"*",
"latestWrite",
")",
"\n",
"}",
"\n\n",
"if",
"fbo",
".",
"doDeferWrite",
"{",
"// There's an ongoing sync, and this truncate altered",
"// dirty blocks that are in the process of syncing. So,",
"// we have to redo this truncate once the sync is complete,",
"// using the new file path.",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"filePath",
".",
"TailPointer",
"(",
")",
")",
"\n",
"ds",
":=",
"fbo",
".",
"deferred",
"[",
"filePath",
".",
"TailRef",
"(",
")",
"]",
"\n",
"ds",
".",
"dirtyDeletes",
"=",
"append",
"(",
"ds",
".",
"dirtyDeletes",
",",
"dirtyPtrs",
"...",
")",
"\n",
"ds",
".",
"writes",
"=",
"append",
"(",
"ds",
".",
"writes",
",",
"func",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"f",
"data",
".",
"Path",
")",
"error",
"{",
"// We are about to re-dirty these bytes, so mark that",
"// they will no longer be synced via the old file.",
"df",
":=",
"fbo",
".",
"getOrCreateDirtyFileLocked",
"(",
"lState",
",",
"filePath",
")",
"\n",
"df",
".",
"UpdateNotYetSyncingBytes",
"(",
"-",
"newlyDirtiedChildBytes",
")",
"\n\n",
"// Truncate the file again. We know this won't be",
"// deferred, so no need to check the new ptrs.",
"_",
",",
"_",
",",
"_",
",",
"err",
":=",
"fbo",
".",
"truncateLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"f",
",",
"size",
")",
"\n",
"return",
"err",
"\n",
"}",
")",
"\n",
"ds",
".",
"waitBytes",
"+=",
"newlyDirtiedChildBytes",
"\n",
"fbo",
".",
"deferred",
"[",
"filePath",
".",
"TailRef",
"(",
")",
"]",
"=",
"ds",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // Truncate truncates or extends the given file to the given size.
// May block if there is too much unflushed data; in that case, it
// will be unblocked by a future sync. | [
"Truncate",
"truncates",
"or",
"extends",
"the",
"given",
"file",
"to",
"the",
"given",
"size",
".",
"May",
"block",
"if",
"there",
"is",
"too",
"much",
"unflushed",
"data",
";",
"in",
"that",
"case",
"it",
"will",
"be",
"unblocked",
"by",
"a",
"future",
"sync",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L2280-L2353 |
159,295 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | IsDirty | func (fbo *folderBlockOps) IsDirty(lState *kbfssync.LockState, file data.Path) bool {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
// A dirty file should probably match all three of these, but
// check them individually just in case.
if fbo.config.DirtyBlockCache().IsDirty(
fbo.id(), file.TailPointer(), file.Branch) {
return true
}
if _, ok := fbo.dirtyFiles[file.TailPointer()]; ok {
return ok
}
_, ok := fbo.unrefCache[file.TailRef()]
return ok
} | go | func (fbo *folderBlockOps) IsDirty(lState *kbfssync.LockState, file data.Path) bool {
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
// A dirty file should probably match all three of these, but
// check them individually just in case.
if fbo.config.DirtyBlockCache().IsDirty(
fbo.id(), file.TailPointer(), file.Branch) {
return true
}
if _, ok := fbo.dirtyFiles[file.TailPointer()]; ok {
return ok
}
_, ok := fbo.unrefCache[file.TailRef()]
return ok
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"IsDirty",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"file",
"data",
".",
"Path",
")",
"bool",
"{",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n",
"// A dirty file should probably match all three of these, but",
"// check them individually just in case.",
"if",
"fbo",
".",
"config",
".",
"DirtyBlockCache",
"(",
")",
".",
"IsDirty",
"(",
"fbo",
".",
"id",
"(",
")",
",",
"file",
".",
"TailPointer",
"(",
")",
",",
"file",
".",
"Branch",
")",
"{",
"return",
"true",
"\n",
"}",
"\n\n",
"if",
"_",
",",
"ok",
":=",
"fbo",
".",
"dirtyFiles",
"[",
"file",
".",
"TailPointer",
"(",
")",
"]",
";",
"ok",
"{",
"return",
"ok",
"\n",
"}",
"\n\n",
"_",
",",
"ok",
":=",
"fbo",
".",
"unrefCache",
"[",
"file",
".",
"TailRef",
"(",
")",
"]",
"\n",
"return",
"ok",
"\n",
"}"
] | // IsDirty returns whether the given file is dirty; if false is
// returned, then the file doesn't need to be synced. | [
"IsDirty",
"returns",
"whether",
"the",
"given",
"file",
"is",
"dirty",
";",
"if",
"false",
"is",
"returned",
"then",
"the",
"file",
"doesn",
"t",
"need",
"to",
"be",
"synced",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L2357-L2373 |
159,296 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | ClearCacheInfo | func (fbo *folderBlockOps) ClearCacheInfo(
lState *kbfssync.LockState, file data.Path) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
return fbo.clearCacheInfoLocked(lState, file)
} | go | func (fbo *folderBlockOps) ClearCacheInfo(
lState *kbfssync.LockState, file data.Path) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
return fbo.clearCacheInfoLocked(lState, file)
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"ClearCacheInfo",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"file",
"data",
".",
"Path",
")",
"error",
"{",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n",
"return",
"fbo",
".",
"clearCacheInfoLocked",
"(",
"lState",
",",
"file",
")",
"\n",
"}"
] | // ClearCacheInfo removes any cached info for the the given file. | [
"ClearCacheInfo",
"removes",
"any",
"cached",
"info",
"for",
"the",
"the",
"given",
"file",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L2428-L2433 |
159,297 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | revertSyncInfoAfterRecoverableError | func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError(
ctx context.Context, blocksToRemove []data.BlockPointer, result fileSyncState) {
si := result.si
savedSi := result.savedSi
// Save the blocks we need to clean up on the next attempt.
toClean := si.toCleanIfUnused
newIndirect := make(map[data.BlockPointer]bool)
for _, ptr := range result.newIndirectFileBlockPtrs {
newIndirect[ptr] = true
}
// Propagate all unrefs forward, except those that belong to new
// blocks that were created during the sync.
unrefs := make([]data.BlockInfo, 0, len(si.unrefs))
for _, unref := range si.unrefs {
if newIndirect[unref.BlockPointer] {
fbo.vlog.CLogf(
nil, libkb.VLog1, "Dropping unref %v", unref)
continue
}
unrefs = append(unrefs, unref)
}
// This sync will be retried and needs new blocks, so
// reset everything in the sync info.
*si = *savedSi
si.toCleanIfUnused = toClean
si.unrefs = unrefs
if si.bps == nil {
return
}
// Mark any bad pointers so they get skipped next time.
blocksToRemoveSet := make(map[data.BlockPointer]bool)
for _, ptr := range blocksToRemove {
blocksToRemoveSet[ptr] = true
}
newBps, err := savedSi.bps.deepCopyWithBlacklist(ctx, blocksToRemoveSet)
if err != nil {
return
}
si.bps = newBps
} | go | func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError(
ctx context.Context, blocksToRemove []data.BlockPointer, result fileSyncState) {
si := result.si
savedSi := result.savedSi
// Save the blocks we need to clean up on the next attempt.
toClean := si.toCleanIfUnused
newIndirect := make(map[data.BlockPointer]bool)
for _, ptr := range result.newIndirectFileBlockPtrs {
newIndirect[ptr] = true
}
// Propagate all unrefs forward, except those that belong to new
// blocks that were created during the sync.
unrefs := make([]data.BlockInfo, 0, len(si.unrefs))
for _, unref := range si.unrefs {
if newIndirect[unref.BlockPointer] {
fbo.vlog.CLogf(
nil, libkb.VLog1, "Dropping unref %v", unref)
continue
}
unrefs = append(unrefs, unref)
}
// This sync will be retried and needs new blocks, so
// reset everything in the sync info.
*si = *savedSi
si.toCleanIfUnused = toClean
si.unrefs = unrefs
if si.bps == nil {
return
}
// Mark any bad pointers so they get skipped next time.
blocksToRemoveSet := make(map[data.BlockPointer]bool)
for _, ptr := range blocksToRemove {
blocksToRemoveSet[ptr] = true
}
newBps, err := savedSi.bps.deepCopyWithBlacklist(ctx, blocksToRemoveSet)
if err != nil {
return
}
si.bps = newBps
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"revertSyncInfoAfterRecoverableError",
"(",
"ctx",
"context",
".",
"Context",
",",
"blocksToRemove",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"result",
"fileSyncState",
")",
"{",
"si",
":=",
"result",
".",
"si",
"\n",
"savedSi",
":=",
"result",
".",
"savedSi",
"\n\n",
"// Save the blocks we need to clean up on the next attempt.",
"toClean",
":=",
"si",
".",
"toCleanIfUnused",
"\n\n",
"newIndirect",
":=",
"make",
"(",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"bool",
")",
"\n",
"for",
"_",
",",
"ptr",
":=",
"range",
"result",
".",
"newIndirectFileBlockPtrs",
"{",
"newIndirect",
"[",
"ptr",
"]",
"=",
"true",
"\n",
"}",
"\n\n",
"// Propagate all unrefs forward, except those that belong to new",
"// blocks that were created during the sync.",
"unrefs",
":=",
"make",
"(",
"[",
"]",
"data",
".",
"BlockInfo",
",",
"0",
",",
"len",
"(",
"si",
".",
"unrefs",
")",
")",
"\n",
"for",
"_",
",",
"unref",
":=",
"range",
"si",
".",
"unrefs",
"{",
"if",
"newIndirect",
"[",
"unref",
".",
"BlockPointer",
"]",
"{",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"nil",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"unref",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"unrefs",
"=",
"append",
"(",
"unrefs",
",",
"unref",
")",
"\n",
"}",
"\n\n",
"// This sync will be retried and needs new blocks, so",
"// reset everything in the sync info.",
"*",
"si",
"=",
"*",
"savedSi",
"\n",
"si",
".",
"toCleanIfUnused",
"=",
"toClean",
"\n",
"si",
".",
"unrefs",
"=",
"unrefs",
"\n",
"if",
"si",
".",
"bps",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n\n",
"// Mark any bad pointers so they get skipped next time.",
"blocksToRemoveSet",
":=",
"make",
"(",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"bool",
")",
"\n",
"for",
"_",
",",
"ptr",
":=",
"range",
"blocksToRemove",
"{",
"blocksToRemoveSet",
"[",
"ptr",
"]",
"=",
"true",
"\n",
"}",
"\n\n",
"newBps",
",",
"err",
":=",
"savedSi",
".",
"bps",
".",
"deepCopyWithBlacklist",
"(",
"ctx",
",",
"blocksToRemoveSet",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"si",
".",
"bps",
"=",
"newBps",
"\n",
"}"
] | // revertSyncInfoAfterRecoverableError updates the saved sync info to
// include all the blocks from before the error, except for those that
// have encountered recoverable block errors themselves. | [
"revertSyncInfoAfterRecoverableError",
"updates",
"the",
"saved",
"sync",
"info",
"to",
"include",
"all",
"the",
"blocks",
"from",
"before",
"the",
"error",
"except",
"for",
"those",
"that",
"have",
"encountered",
"recoverable",
"block",
"errors",
"themselves",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L2438-L2483 |
159,298 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | mergeDirtyEntryWithDBM | func (fbo *folderBlockOps) mergeDirtyEntryWithDBM(
ctx context.Context, lState *kbfssync.LockState, file data.Path, md libkey.KeyMetadata,
dbm dirBlockMap, dirtyDe data.DirEntry) error {
// Lock and fetch for reading only, any dirty blocks will go into
// the dbm.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
chargedTo, err := fbo.getChargedToLocked(ctx, lState, md)
if err != nil {
return err
}
dd := fbo.newDirDataWithDBMLocked(
lState, *file.ParentPath(), chargedTo, md, dbm)
unrefs, err := dd.SetEntry(ctx, file.TailName(), dirtyDe)
if err != nil {
return err
}
if len(unrefs) != 0 {
return errors.Errorf(
"Merging dirty entry produced %d new unrefs", len(unrefs))
}
return nil
} | go | func (fbo *folderBlockOps) mergeDirtyEntryWithDBM(
ctx context.Context, lState *kbfssync.LockState, file data.Path, md libkey.KeyMetadata,
dbm dirBlockMap, dirtyDe data.DirEntry) error {
// Lock and fetch for reading only, any dirty blocks will go into
// the dbm.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
chargedTo, err := fbo.getChargedToLocked(ctx, lState, md)
if err != nil {
return err
}
dd := fbo.newDirDataWithDBMLocked(
lState, *file.ParentPath(), chargedTo, md, dbm)
unrefs, err := dd.SetEntry(ctx, file.TailName(), dirtyDe)
if err != nil {
return err
}
if len(unrefs) != 0 {
return errors.Errorf(
"Merging dirty entry produced %d new unrefs", len(unrefs))
}
return nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"mergeDirtyEntryWithDBM",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"file",
"data",
".",
"Path",
",",
"md",
"libkey",
".",
"KeyMetadata",
",",
"dbm",
"dirBlockMap",
",",
"dirtyDe",
"data",
".",
"DirEntry",
")",
"error",
"{",
"// Lock and fetch for reading only, any dirty blocks will go into",
"// the dbm.",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n\n",
"chargedTo",
",",
"err",
":=",
"fbo",
".",
"getChargedToLocked",
"(",
"ctx",
",",
"lState",
",",
"md",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"dd",
":=",
"fbo",
".",
"newDirDataWithDBMLocked",
"(",
"lState",
",",
"*",
"file",
".",
"ParentPath",
"(",
")",
",",
"chargedTo",
",",
"md",
",",
"dbm",
")",
"\n",
"unrefs",
",",
"err",
":=",
"dd",
".",
"SetEntry",
"(",
"ctx",
",",
"file",
".",
"TailName",
"(",
")",
",",
"dirtyDe",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"len",
"(",
"unrefs",
")",
"!=",
"0",
"{",
"return",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"len",
"(",
"unrefs",
")",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // mergeDirtyEntryWithDBM sets the entry for a file into a directory,
// storing all the affected blocks into `dbm` rather than the dirty
// block cache. It must only be called with an entry that's already
// been written to the dirty block cache, such that no new blocks are
// dirtied. | [
"mergeDirtyEntryWithDBM",
"sets",
"the",
"entry",
"for",
"a",
"file",
"into",
"a",
"directory",
"storing",
"all",
"the",
"affected",
"blocks",
"into",
"dbm",
"rather",
"than",
"the",
"dirty",
"block",
"cache",
".",
"It",
"must",
"only",
"be",
"called",
"with",
"an",
"entry",
"that",
"s",
"already",
"been",
"written",
"to",
"the",
"dirty",
"block",
"cache",
"such",
"that",
"no",
"new",
"blocks",
"are",
"dirtied",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L2695-L2719 |
159,299 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | cleanUpUnusedBlocks | func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context,
md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error {
numToClean := len(syncState.si.toCleanIfUnused)
if numToClean == 0 {
return nil
}
// What blocks are referenced in the successful MD?
refs := make(map[data.BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr == data.ZeroPtr {
panic("Unexpected zero ref ptr in a sync MD revision")
}
refs[ptr] = true
}
for _, update := range op.allUpdates() {
if update.Ref == data.ZeroPtr {
panic("Unexpected zero update ref ptr in a sync MD revision")
}
refs[update.Ref] = true
}
}
// For each MD to clean, clean up the old failed blocks
// immediately if the merge status matches the successful put, if
// they didn't get referenced in the successful put. If the merge
// status is different (e.g., we ended up on a conflict branch),
// clean it up only if the original revision failed. If the same
// block appears more than once, the one with a different merged
// status takes precedence (which will always come earlier in the
// list of MDs).
blocksSeen := make(map[data.BlockPointer]bool)
for _, oldMD := range syncState.si.toCleanIfUnused {
bdType := blockDeleteAlways
if oldMD.md.MergedStatus() != md.MergedStatus() {
bdType = blockDeleteOnMDFail
}
failedBps := newBlockPutStateMemory(oldMD.bps.numBlocks())
for _, ptr := range oldMD.bps.ptrs() {
if ptr == data.ZeroPtr {
panic("Unexpected zero block ptr in an old sync MD revision")
}
if blocksSeen[ptr] {
continue
}
blocksSeen[ptr] = true
if refs[ptr] && bdType == blockDeleteAlways {
continue
}
failedBps.blockStates[ptr] = blockState{}
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Cleaning up block %v from a previous "+
"failed revision %d (oldMD is %s, bdType=%d)", ptr,
oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType)
}
if len(failedBps.blockStates) > 0 {
fbm.cleanUpBlockState(oldMD.md, failedBps, bdType)
}
}
return nil
} | go | func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context,
md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error {
numToClean := len(syncState.si.toCleanIfUnused)
if numToClean == 0 {
return nil
}
// What blocks are referenced in the successful MD?
refs := make(map[data.BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr == data.ZeroPtr {
panic("Unexpected zero ref ptr in a sync MD revision")
}
refs[ptr] = true
}
for _, update := range op.allUpdates() {
if update.Ref == data.ZeroPtr {
panic("Unexpected zero update ref ptr in a sync MD revision")
}
refs[update.Ref] = true
}
}
// For each MD to clean, clean up the old failed blocks
// immediately if the merge status matches the successful put, if
// they didn't get referenced in the successful put. If the merge
// status is different (e.g., we ended up on a conflict branch),
// clean it up only if the original revision failed. If the same
// block appears more than once, the one with a different merged
// status takes precedence (which will always come earlier in the
// list of MDs).
blocksSeen := make(map[data.BlockPointer]bool)
for _, oldMD := range syncState.si.toCleanIfUnused {
bdType := blockDeleteAlways
if oldMD.md.MergedStatus() != md.MergedStatus() {
bdType = blockDeleteOnMDFail
}
failedBps := newBlockPutStateMemory(oldMD.bps.numBlocks())
for _, ptr := range oldMD.bps.ptrs() {
if ptr == data.ZeroPtr {
panic("Unexpected zero block ptr in an old sync MD revision")
}
if blocksSeen[ptr] {
continue
}
blocksSeen[ptr] = true
if refs[ptr] && bdType == blockDeleteAlways {
continue
}
failedBps.blockStates[ptr] = blockState{}
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Cleaning up block %v from a previous "+
"failed revision %d (oldMD is %s, bdType=%d)", ptr,
oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType)
}
if len(failedBps.blockStates) > 0 {
fbm.cleanUpBlockState(oldMD.md, failedBps, bdType)
}
}
return nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"cleanUpUnusedBlocks",
"(",
"ctx",
"context",
".",
"Context",
",",
"md",
"ReadOnlyRootMetadata",
",",
"syncState",
"fileSyncState",
",",
"fbm",
"*",
"folderBlockManager",
")",
"error",
"{",
"numToClean",
":=",
"len",
"(",
"syncState",
".",
"si",
".",
"toCleanIfUnused",
")",
"\n",
"if",
"numToClean",
"==",
"0",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"// What blocks are referenced in the successful MD?",
"refs",
":=",
"make",
"(",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"bool",
")",
"\n",
"for",
"_",
",",
"op",
":=",
"range",
"md",
".",
"data",
".",
"Changes",
".",
"Ops",
"{",
"for",
"_",
",",
"ptr",
":=",
"range",
"op",
".",
"Refs",
"(",
")",
"{",
"if",
"ptr",
"==",
"data",
".",
"ZeroPtr",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"refs",
"[",
"ptr",
"]",
"=",
"true",
"\n",
"}",
"\n",
"for",
"_",
",",
"update",
":=",
"range",
"op",
".",
"allUpdates",
"(",
")",
"{",
"if",
"update",
".",
"Ref",
"==",
"data",
".",
"ZeroPtr",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"refs",
"[",
"update",
".",
"Ref",
"]",
"=",
"true",
"\n",
"}",
"\n",
"}",
"\n\n",
"// For each MD to clean, clean up the old failed blocks",
"// immediately if the merge status matches the successful put, if",
"// they didn't get referenced in the successful put. If the merge",
"// status is different (e.g., we ended up on a conflict branch),",
"// clean it up only if the original revision failed. If the same",
"// block appears more than once, the one with a different merged",
"// status takes precedence (which will always come earlier in the",
"// list of MDs).",
"blocksSeen",
":=",
"make",
"(",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"bool",
")",
"\n",
"for",
"_",
",",
"oldMD",
":=",
"range",
"syncState",
".",
"si",
".",
"toCleanIfUnused",
"{",
"bdType",
":=",
"blockDeleteAlways",
"\n",
"if",
"oldMD",
".",
"md",
".",
"MergedStatus",
"(",
")",
"!=",
"md",
".",
"MergedStatus",
"(",
")",
"{",
"bdType",
"=",
"blockDeleteOnMDFail",
"\n",
"}",
"\n\n",
"failedBps",
":=",
"newBlockPutStateMemory",
"(",
"oldMD",
".",
"bps",
".",
"numBlocks",
"(",
")",
")",
"\n",
"for",
"_",
",",
"ptr",
":=",
"range",
"oldMD",
".",
"bps",
".",
"ptrs",
"(",
")",
"{",
"if",
"ptr",
"==",
"data",
".",
"ZeroPtr",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"blocksSeen",
"[",
"ptr",
"]",
"{",
"continue",
"\n",
"}",
"\n",
"blocksSeen",
"[",
"ptr",
"]",
"=",
"true",
"\n",
"if",
"refs",
"[",
"ptr",
"]",
"&&",
"bdType",
"==",
"blockDeleteAlways",
"{",
"continue",
"\n",
"}",
"\n",
"failedBps",
".",
"blockStates",
"[",
"ptr",
"]",
"=",
"blockState",
"{",
"}",
"\n",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"ptr",
",",
"oldMD",
".",
"md",
".",
"Revision",
"(",
")",
",",
"oldMD",
".",
"md",
".",
"MergedStatus",
"(",
")",
",",
"bdType",
")",
"\n",
"}",
"\n\n",
"if",
"len",
"(",
"failedBps",
".",
"blockStates",
")",
">",
"0",
"{",
"fbm",
".",
"cleanUpBlockState",
"(",
"oldMD",
".",
"md",
",",
"failedBps",
",",
"bdType",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // cleanUpUnusedBlocks cleans up the blocks from any previous failed
// sync attempts. | [
"cleanUpUnusedBlocks",
"cleans",
"up",
"the",
"blocks",
"from",
"any",
"previous",
"failed",
"sync",
"attempts",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L2847-L2911 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.