id
int32 0
167k
| repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
sequencelengths 21
1.41k
| docstring
stringlengths 6
2.61k
| docstring_tokens
sequencelengths 3
215
| sha
stringlengths 40
40
| url
stringlengths 85
252
|
---|---|---|---|---|---|---|---|---|---|---|---|
159,300 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | FinishSyncLocked | func (fbo *folderBlockOps) FinishSyncLocked(
ctx context.Context, lState *kbfssync.LockState,
oldPath, newPath data.Path, md ReadOnlyRootMetadata,
syncState fileSyncState, fbm *folderBlockManager) (
stillDirty bool, err error) {
fbo.blockLock.AssertLocked(lState)
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range syncState.oldFileBlockPtrs {
fbo.vlog.CLogf(ctx, libkb.VLog1, "Deleting dirty ptr %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
return true, err
}
}
bcache := fbo.config.BlockCache()
for _, ptr := range syncState.newIndirectFileBlockPtrs {
err := bcache.DeletePermanent(ptr.ID)
if err != nil {
fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v",
ptr.ID, err)
}
}
stillDirty, err = fbo.doDeferredWritesLocked(
ctx, lState, md, oldPath, newPath)
if err != nil {
return true, err
}
// Clear cached info for the old path. We are guaranteed that any
// concurrent write to this file was deferred, even if it was to a
// block that wasn't currently being sync'd, since the top-most
// block is always in dirtyFiles and is always dirtied during a
// write/truncate.
//
// Also, we can get rid of all the sync state that might have
// happened during the sync, since we will replay the writes
// below anyway.
if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil {
return true, err
}
if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil {
return true, err
}
return stillDirty, nil
} | go | func (fbo *folderBlockOps) FinishSyncLocked(
ctx context.Context, lState *kbfssync.LockState,
oldPath, newPath data.Path, md ReadOnlyRootMetadata,
syncState fileSyncState, fbm *folderBlockManager) (
stillDirty bool, err error) {
fbo.blockLock.AssertLocked(lState)
dirtyBcache := fbo.config.DirtyBlockCache()
for _, ptr := range syncState.oldFileBlockPtrs {
fbo.vlog.CLogf(ctx, libkb.VLog1, "Deleting dirty ptr %v", ptr)
if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
return true, err
}
}
bcache := fbo.config.BlockCache()
for _, ptr := range syncState.newIndirectFileBlockPtrs {
err := bcache.DeletePermanent(ptr.ID)
if err != nil {
fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v",
ptr.ID, err)
}
}
stillDirty, err = fbo.doDeferredWritesLocked(
ctx, lState, md, oldPath, newPath)
if err != nil {
return true, err
}
// Clear cached info for the old path. We are guaranteed that any
// concurrent write to this file was deferred, even if it was to a
// block that wasn't currently being sync'd, since the top-most
// block is always in dirtyFiles and is always dirtied during a
// write/truncate.
//
// Also, we can get rid of all the sync state that might have
// happened during the sync, since we will replay the writes
// below anyway.
if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil {
return true, err
}
if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil {
return true, err
}
return stillDirty, nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"FinishSyncLocked",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"oldPath",
",",
"newPath",
"data",
".",
"Path",
",",
"md",
"ReadOnlyRootMetadata",
",",
"syncState",
"fileSyncState",
",",
"fbm",
"*",
"folderBlockManager",
")",
"(",
"stillDirty",
"bool",
",",
"err",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"AssertLocked",
"(",
"lState",
")",
"\n\n",
"dirtyBcache",
":=",
"fbo",
".",
"config",
".",
"DirtyBlockCache",
"(",
")",
"\n",
"for",
"_",
",",
"ptr",
":=",
"range",
"syncState",
".",
"oldFileBlockPtrs",
"{",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"ptr",
")",
"\n",
"if",
"err",
":=",
"dirtyBcache",
".",
"Delete",
"(",
"fbo",
".",
"id",
"(",
")",
",",
"ptr",
",",
"fbo",
".",
"branch",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"true",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"bcache",
":=",
"fbo",
".",
"config",
".",
"BlockCache",
"(",
")",
"\n",
"for",
"_",
",",
"ptr",
":=",
"range",
"syncState",
".",
"newIndirectFileBlockPtrs",
"{",
"err",
":=",
"bcache",
".",
"DeletePermanent",
"(",
"ptr",
".",
"ID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"fbo",
".",
"log",
".",
"CWarningf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"ptr",
".",
"ID",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"stillDirty",
",",
"err",
"=",
"fbo",
".",
"doDeferredWritesLocked",
"(",
"ctx",
",",
"lState",
",",
"md",
",",
"oldPath",
",",
"newPath",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"true",
",",
"err",
"\n",
"}",
"\n\n",
"// Clear cached info for the old path. We are guaranteed that any",
"// concurrent write to this file was deferred, even if it was to a",
"// block that wasn't currently being sync'd, since the top-most",
"// block is always in dirtyFiles and is always dirtied during a",
"// write/truncate.",
"//",
"// Also, we can get rid of all the sync state that might have",
"// happened during the sync, since we will replay the writes",
"// below anyway.",
"if",
"err",
":=",
"fbo",
".",
"clearCacheInfoLocked",
"(",
"lState",
",",
"oldPath",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"true",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"fbo",
".",
"cleanUpUnusedBlocks",
"(",
"ctx",
",",
"md",
",",
"syncState",
",",
"fbm",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"true",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"stillDirty",
",",
"nil",
"\n",
"}"
] | // FinishSyncLocked finishes the sync process for a file, given the
// state from StartSync. Specifically, it re-applies any writes that
// happened since the call to StartSync. | [
"FinishSyncLocked",
"finishes",
"the",
"sync",
"process",
"for",
"a",
"file",
"given",
"the",
"state",
"from",
"StartSync",
".",
"Specifically",
"it",
"re",
"-",
"applies",
"any",
"writes",
"that",
"happened",
"since",
"the",
"call",
"to",
"StartSync",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L2949-L2997 |
159,301 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | notifyErrListenersLocked | func (fbo *folderBlockOps) notifyErrListenersLocked(
lState *kbfssync.LockState, ptr data.BlockPointer, err error) {
fbo.blockLock.AssertLocked(lState)
if isRecoverableBlockError(err) {
// Don't bother any listeners with this error, since the sync
// will be retried. Unless the sync has reached its retry
// limit, but in that case the listeners will just proceed as
// normal once the dirty block cache bytes are freed, and
// that's ok since this error isn't fatal.
return
}
df := fbo.dirtyFiles[ptr]
if df != nil {
df.NotifyErrListeners(err)
}
} | go | func (fbo *folderBlockOps) notifyErrListenersLocked(
lState *kbfssync.LockState, ptr data.BlockPointer, err error) {
fbo.blockLock.AssertLocked(lState)
if isRecoverableBlockError(err) {
// Don't bother any listeners with this error, since the sync
// will be retried. Unless the sync has reached its retry
// limit, but in that case the listeners will just proceed as
// normal once the dirty block cache bytes are freed, and
// that's ok since this error isn't fatal.
return
}
df := fbo.dirtyFiles[ptr]
if df != nil {
df.NotifyErrListeners(err)
}
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"notifyErrListenersLocked",
"(",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"ptr",
"data",
".",
"BlockPointer",
",",
"err",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"AssertLocked",
"(",
"lState",
")",
"\n",
"if",
"isRecoverableBlockError",
"(",
"err",
")",
"{",
"// Don't bother any listeners with this error, since the sync",
"// will be retried. Unless the sync has reached its retry",
"// limit, but in that case the listeners will just proceed as",
"// normal once the dirty block cache bytes are freed, and",
"// that's ok since this error isn't fatal.",
"return",
"\n",
"}",
"\n",
"df",
":=",
"fbo",
".",
"dirtyFiles",
"[",
"ptr",
"]",
"\n",
"if",
"df",
"!=",
"nil",
"{",
"df",
".",
"NotifyErrListeners",
"(",
"err",
")",
"\n",
"}",
"\n",
"}"
] | // notifyErrListeners notifies any write operations that are blocked
// on a file so that they can learn about unrecoverable sync errors. | [
"notifyErrListeners",
"notifies",
"any",
"write",
"operations",
"that",
"are",
"blocked",
"on",
"a",
"file",
"so",
"that",
"they",
"can",
"learn",
"about",
"unrecoverable",
"sync",
"errors",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3001-L3016 |
159,302 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | searchForNodesInDirLocked | func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context,
lState *kbfssync.LockState, cache NodeCache, newPtrs map[data.BlockPointer]bool,
kmd libkey.KeyMetadata, rootNode Node, currDir data.Path, nodeMap map[data.BlockPointer]Node,
numNodesFoundSoFar int) (int, error) {
fbo.blockLock.AssertAnyLocked(lState)
chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
if err != nil {
return 0, err
}
dd := fbo.newDirDataLocked(lState, currDir, chargedTo, kmd)
entries, err := dd.GetEntries(ctx)
if err != nil {
return 0, err
}
// getDirLocked may have unlocked blockLock, which means the cache
// could have changed out from under us. Verify that didn't
// happen, so we can avoid messing it up with nodes from an old MD
// version. If it did happen, return a special error that lets
// the caller know they should retry with a fresh cache.
if currDir.Path[0].BlockPointer !=
cache.PathFromNode(rootNode).TailPointer() {
return 0, searchWithOutOfDateCacheError{}
}
if numNodesFoundSoFar >= len(nodeMap) {
return 0, nil
}
numNodesFound := 0
for name, de := range entries {
if _, ok := nodeMap[de.BlockPointer]; ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
// make a node for every pathnode
n := rootNode
for i, pn := range childPath.Path[1:] {
if !pn.BlockPointer.IsValid() {
// Temporary debugging output for KBFS-1764 -- the
// GetOrCreate call below will panic.
fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+
"path.path=%v (index %d), name=%s, de=%#v, "+
"nodeMap=%v, newPtrs=%v, kmd=%#v",
childPath, childPath.Path, i, name, de, nodeMap,
newPtrs, kmd)
}
et := data.Dir
if i == len(childPath.Path)-2 {
et = de.Type
}
n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n, et)
if err != nil {
return 0, err
}
}
nodeMap[de.BlockPointer] = n
numNodesFound++
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
// otherwise, recurse if this represents an updated block
if _, ok := newPtrs[de.BlockPointer]; de.Type == data.Dir && ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache,
newPtrs, kmd, rootNode, childPath, nodeMap,
numNodesFoundSoFar+numNodesFound)
if err != nil {
return 0, err
}
numNodesFound += n
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
}
return numNodesFound, nil
} | go | func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context,
lState *kbfssync.LockState, cache NodeCache, newPtrs map[data.BlockPointer]bool,
kmd libkey.KeyMetadata, rootNode Node, currDir data.Path, nodeMap map[data.BlockPointer]Node,
numNodesFoundSoFar int) (int, error) {
fbo.blockLock.AssertAnyLocked(lState)
chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
if err != nil {
return 0, err
}
dd := fbo.newDirDataLocked(lState, currDir, chargedTo, kmd)
entries, err := dd.GetEntries(ctx)
if err != nil {
return 0, err
}
// getDirLocked may have unlocked blockLock, which means the cache
// could have changed out from under us. Verify that didn't
// happen, so we can avoid messing it up with nodes from an old MD
// version. If it did happen, return a special error that lets
// the caller know they should retry with a fresh cache.
if currDir.Path[0].BlockPointer !=
cache.PathFromNode(rootNode).TailPointer() {
return 0, searchWithOutOfDateCacheError{}
}
if numNodesFoundSoFar >= len(nodeMap) {
return 0, nil
}
numNodesFound := 0
for name, de := range entries {
if _, ok := nodeMap[de.BlockPointer]; ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
// make a node for every pathnode
n := rootNode
for i, pn := range childPath.Path[1:] {
if !pn.BlockPointer.IsValid() {
// Temporary debugging output for KBFS-1764 -- the
// GetOrCreate call below will panic.
fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+
"path.path=%v (index %d), name=%s, de=%#v, "+
"nodeMap=%v, newPtrs=%v, kmd=%#v",
childPath, childPath.Path, i, name, de, nodeMap,
newPtrs, kmd)
}
et := data.Dir
if i == len(childPath.Path)-2 {
et = de.Type
}
n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n, et)
if err != nil {
return 0, err
}
}
nodeMap[de.BlockPointer] = n
numNodesFound++
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
// otherwise, recurse if this represents an updated block
if _, ok := newPtrs[de.BlockPointer]; de.Type == data.Dir && ok {
childPath := currDir.ChildPath(name, de.BlockPointer)
n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache,
newPtrs, kmd, rootNode, childPath, nodeMap,
numNodesFoundSoFar+numNodesFound)
if err != nil {
return 0, err
}
numNodesFound += n
if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
return numNodesFound, nil
}
}
}
return numNodesFound, nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"searchForNodesInDirLocked",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"cache",
"NodeCache",
",",
"newPtrs",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"bool",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"rootNode",
"Node",
",",
"currDir",
"data",
".",
"Path",
",",
"nodeMap",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"Node",
",",
"numNodesFoundSoFar",
"int",
")",
"(",
"int",
",",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"AssertAnyLocked",
"(",
"lState",
")",
"\n\n",
"chargedTo",
",",
"err",
":=",
"fbo",
".",
"getChargedToLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"dd",
":=",
"fbo",
".",
"newDirDataLocked",
"(",
"lState",
",",
"currDir",
",",
"chargedTo",
",",
"kmd",
")",
"\n",
"entries",
",",
"err",
":=",
"dd",
".",
"GetEntries",
"(",
"ctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"// getDirLocked may have unlocked blockLock, which means the cache",
"// could have changed out from under us. Verify that didn't",
"// happen, so we can avoid messing it up with nodes from an old MD",
"// version. If it did happen, return a special error that lets",
"// the caller know they should retry with a fresh cache.",
"if",
"currDir",
".",
"Path",
"[",
"0",
"]",
".",
"BlockPointer",
"!=",
"cache",
".",
"PathFromNode",
"(",
"rootNode",
")",
".",
"TailPointer",
"(",
")",
"{",
"return",
"0",
",",
"searchWithOutOfDateCacheError",
"{",
"}",
"\n",
"}",
"\n\n",
"if",
"numNodesFoundSoFar",
">=",
"len",
"(",
"nodeMap",
")",
"{",
"return",
"0",
",",
"nil",
"\n",
"}",
"\n\n",
"numNodesFound",
":=",
"0",
"\n",
"for",
"name",
",",
"de",
":=",
"range",
"entries",
"{",
"if",
"_",
",",
"ok",
":=",
"nodeMap",
"[",
"de",
".",
"BlockPointer",
"]",
";",
"ok",
"{",
"childPath",
":=",
"currDir",
".",
"ChildPath",
"(",
"name",
",",
"de",
".",
"BlockPointer",
")",
"\n",
"// make a node for every pathnode",
"n",
":=",
"rootNode",
"\n",
"for",
"i",
",",
"pn",
":=",
"range",
"childPath",
".",
"Path",
"[",
"1",
":",
"]",
"{",
"if",
"!",
"pn",
".",
"BlockPointer",
".",
"IsValid",
"(",
")",
"{",
"// Temporary debugging output for KBFS-1764 -- the",
"// GetOrCreate call below will panic.",
"fbo",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"childPath",
",",
"childPath",
".",
"Path",
",",
"i",
",",
"name",
",",
"de",
",",
"nodeMap",
",",
"newPtrs",
",",
"kmd",
")",
"\n",
"}",
"\n",
"et",
":=",
"data",
".",
"Dir",
"\n",
"if",
"i",
"==",
"len",
"(",
"childPath",
".",
"Path",
")",
"-",
"2",
"{",
"et",
"=",
"de",
".",
"Type",
"\n",
"}",
"\n",
"n",
",",
"err",
"=",
"cache",
".",
"GetOrCreate",
"(",
"pn",
".",
"BlockPointer",
",",
"pn",
".",
"Name",
",",
"n",
",",
"et",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"nodeMap",
"[",
"de",
".",
"BlockPointer",
"]",
"=",
"n",
"\n",
"numNodesFound",
"++",
"\n",
"if",
"numNodesFoundSoFar",
"+",
"numNodesFound",
">=",
"len",
"(",
"nodeMap",
")",
"{",
"return",
"numNodesFound",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n\n",
"// otherwise, recurse if this represents an updated block",
"if",
"_",
",",
"ok",
":=",
"newPtrs",
"[",
"de",
".",
"BlockPointer",
"]",
";",
"de",
".",
"Type",
"==",
"data",
".",
"Dir",
"&&",
"ok",
"{",
"childPath",
":=",
"currDir",
".",
"ChildPath",
"(",
"name",
",",
"de",
".",
"BlockPointer",
")",
"\n",
"n",
",",
"err",
":=",
"fbo",
".",
"searchForNodesInDirLocked",
"(",
"ctx",
",",
"lState",
",",
"cache",
",",
"newPtrs",
",",
"kmd",
",",
"rootNode",
",",
"childPath",
",",
"nodeMap",
",",
"numNodesFoundSoFar",
"+",
"numNodesFound",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"numNodesFound",
"+=",
"n",
"\n",
"if",
"numNodesFoundSoFar",
"+",
"numNodesFound",
">=",
"len",
"(",
"nodeMap",
")",
"{",
"return",
"numNodesFound",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"numNodesFound",
",",
"nil",
"\n",
"}"
] | // searchForNodesInDirLocked recursively tries to find a path, and
// ultimately a node, to ptr, given the set of pointers that were
// updated in a particular operation. The keys in nodeMap make up the
// set of BlockPointers that are being searched for, and nodeMap is
// updated in place to include the corresponding discovered nodes.
//
// Returns the number of nodes found by this invocation. If the error
// it returns is searchWithOutOfDateCache, the search should be
// retried by the caller with a clean cache. | [
"searchForNodesInDirLocked",
"recursively",
"tries",
"to",
"find",
"a",
"path",
"and",
"ultimately",
"a",
"node",
"to",
"ptr",
"given",
"the",
"set",
"of",
"pointers",
"that",
"were",
"updated",
"in",
"a",
"particular",
"operation",
".",
"The",
"keys",
"in",
"nodeMap",
"make",
"up",
"the",
"set",
"of",
"BlockPointers",
"that",
"are",
"being",
"searched",
"for",
"and",
"nodeMap",
"is",
"updated",
"in",
"place",
"to",
"include",
"the",
"corresponding",
"discovered",
"nodes",
".",
"Returns",
"the",
"number",
"of",
"nodes",
"found",
"by",
"this",
"invocation",
".",
"If",
"the",
"error",
"it",
"returns",
"is",
"searchWithOutOfDateCache",
"the",
"search",
"should",
"be",
"retried",
"by",
"the",
"caller",
"with",
"a",
"clean",
"cache",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3035-L3114 |
159,303 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | SearchForPaths | func (fbo *folderBlockOps) SearchForPaths(ctx context.Context,
cache NodeCache, ptrs []data.BlockPointer, newPtrs map[data.BlockPointer]bool,
kmd libkey.KeyMetadata, rootPtr data.BlockPointer) (map[data.BlockPointer]data.Path, error) {
lState := makeFBOLockState()
// Hold the lock while processing the paths so they can't be changed.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
nodeMap, cache, err :=
fbo.searchForNodesLocked(
ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
if err != nil {
return nil, err
}
paths := make(map[data.BlockPointer]data.Path)
for ptr, n := range nodeMap {
if n == nil {
paths[ptr] = data.Path{}
continue
}
p := cache.PathFromNode(n)
if p.TailPointer() != ptr {
return nil, NodeNotFoundError{ptr}
}
paths[ptr] = p
}
return paths, nil
} | go | func (fbo *folderBlockOps) SearchForPaths(ctx context.Context,
cache NodeCache, ptrs []data.BlockPointer, newPtrs map[data.BlockPointer]bool,
kmd libkey.KeyMetadata, rootPtr data.BlockPointer) (map[data.BlockPointer]data.Path, error) {
lState := makeFBOLockState()
// Hold the lock while processing the paths so they can't be changed.
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
nodeMap, cache, err :=
fbo.searchForNodesLocked(
ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
if err != nil {
return nil, err
}
paths := make(map[data.BlockPointer]data.Path)
for ptr, n := range nodeMap {
if n == nil {
paths[ptr] = data.Path{}
continue
}
p := cache.PathFromNode(n)
if p.TailPointer() != ptr {
return nil, NodeNotFoundError{ptr}
}
paths[ptr] = p
}
return paths, nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"SearchForPaths",
"(",
"ctx",
"context",
".",
"Context",
",",
"cache",
"NodeCache",
",",
"ptrs",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"newPtrs",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"bool",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"rootPtr",
"data",
".",
"BlockPointer",
")",
"(",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"data",
".",
"Path",
",",
"error",
")",
"{",
"lState",
":=",
"makeFBOLockState",
"(",
")",
"\n",
"// Hold the lock while processing the paths so they can't be changed.",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n",
"nodeMap",
",",
"cache",
",",
"err",
":=",
"fbo",
".",
"searchForNodesLocked",
"(",
"ctx",
",",
"lState",
",",
"cache",
",",
"ptrs",
",",
"newPtrs",
",",
"kmd",
",",
"rootPtr",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"paths",
":=",
"make",
"(",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"data",
".",
"Path",
")",
"\n",
"for",
"ptr",
",",
"n",
":=",
"range",
"nodeMap",
"{",
"if",
"n",
"==",
"nil",
"{",
"paths",
"[",
"ptr",
"]",
"=",
"data",
".",
"Path",
"{",
"}",
"\n",
"continue",
"\n",
"}",
"\n\n",
"p",
":=",
"cache",
".",
"PathFromNode",
"(",
"n",
")",
"\n",
"if",
"p",
".",
"TailPointer",
"(",
")",
"!=",
"ptr",
"{",
"return",
"nil",
",",
"NodeNotFoundError",
"{",
"ptr",
"}",
"\n",
"}",
"\n",
"paths",
"[",
"ptr",
"]",
"=",
"p",
"\n",
"}",
"\n\n",
"return",
"paths",
",",
"nil",
"\n",
"}"
] | // SearchForPaths is like SearchForNodes, except it returns a
// consistent view of all the paths of the searched-for pointers. | [
"SearchForPaths",
"is",
"like",
"SearchForNodes",
"except",
"it",
"returns",
"a",
"consistent",
"view",
"of",
"all",
"the",
"paths",
"of",
"the",
"searched",
"-",
"for",
"pointers",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3261-L3290 |
159,304 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | UpdateCachedEntryAttributesOnRemovedFile | func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, op *setAttrOp, p data.Path, de data.DirEntry) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
_, err := fbo.setCachedAttrLocked(
ctx, lState, kmd, *p.ParentPath(), p.TailName(), op.Attr, de)
return err
} | go | func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile(
ctx context.Context, lState *kbfssync.LockState,
kmd KeyMetadataWithRootDirEntry, op *setAttrOp, p data.Path, de data.DirEntry) error {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
_, err := fbo.setCachedAttrLocked(
ctx, lState, kmd, *p.ParentPath(), p.TailName(), op.Attr, de)
return err
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"UpdateCachedEntryAttributesOnRemovedFile",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"kmd",
"KeyMetadataWithRootDirEntry",
",",
"op",
"*",
"setAttrOp",
",",
"p",
"data",
".",
"Path",
",",
"de",
"data",
".",
"DirEntry",
")",
"error",
"{",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n",
"_",
",",
"err",
":=",
"fbo",
".",
"setCachedAttrLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
",",
"*",
"p",
".",
"ParentPath",
"(",
")",
",",
"p",
".",
"TailName",
"(",
")",
",",
"op",
".",
"Attr",
",",
"de",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // UpdateCachedEntryAttributesOnRemovedFile updates any cached entry
// for the given path of an unlinked file, according to the given op,
// and it makes a new dirty cache entry if one doesn't exist yet. We
// assume Sync will be called eventually on the corresponding open
// file handle, which will clear out the entry. | [
"UpdateCachedEntryAttributesOnRemovedFile",
"updates",
"any",
"cached",
"entry",
"for",
"the",
"given",
"path",
"of",
"an",
"unlinked",
"file",
"according",
"to",
"the",
"given",
"op",
"and",
"it",
"makes",
"a",
"new",
"dirty",
"cache",
"entry",
"if",
"one",
"doesn",
"t",
"exist",
"yet",
".",
"We",
"assume",
"Sync",
"will",
"be",
"called",
"eventually",
"on",
"the",
"corresponding",
"open",
"file",
"handle",
"which",
"will",
"clear",
"out",
"the",
"entry",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3297-L3305 |
159,305 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | UpdatePointers | func (fbo *folderBlockOps) UpdatePointers(
kmd libkey.KeyMetadata, lState *kbfssync.LockState, op op, shouldPrefetch bool,
afterUpdateFn func() error) (affectedNodeIDs []NodeID, err error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
for _, update := range op.allUpdates() {
updatedNode := fbo.updatePointer(
kmd, update.Unref, update.Ref, shouldPrefetch)
if updatedNode != nil {
affectedNodeIDs = append(affectedNodeIDs, updatedNode)
}
}
// Cancel any prefetches for all unreferenced block pointers.
for _, unref := range op.Unrefs() {
fbo.config.BlockOps().Prefetcher().CancelPrefetch(unref)
}
if afterUpdateFn == nil {
return affectedNodeIDs, nil
}
return affectedNodeIDs, afterUpdateFn()
} | go | func (fbo *folderBlockOps) UpdatePointers(
kmd libkey.KeyMetadata, lState *kbfssync.LockState, op op, shouldPrefetch bool,
afterUpdateFn func() error) (affectedNodeIDs []NodeID, err error) {
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
for _, update := range op.allUpdates() {
updatedNode := fbo.updatePointer(
kmd, update.Unref, update.Ref, shouldPrefetch)
if updatedNode != nil {
affectedNodeIDs = append(affectedNodeIDs, updatedNode)
}
}
// Cancel any prefetches for all unreferenced block pointers.
for _, unref := range op.Unrefs() {
fbo.config.BlockOps().Prefetcher().CancelPrefetch(unref)
}
if afterUpdateFn == nil {
return affectedNodeIDs, nil
}
return affectedNodeIDs, afterUpdateFn()
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"UpdatePointers",
"(",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"op",
"op",
",",
"shouldPrefetch",
"bool",
",",
"afterUpdateFn",
"func",
"(",
")",
"error",
")",
"(",
"affectedNodeIDs",
"[",
"]",
"NodeID",
",",
"err",
"error",
")",
"{",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n",
"for",
"_",
",",
"update",
":=",
"range",
"op",
".",
"allUpdates",
"(",
")",
"{",
"updatedNode",
":=",
"fbo",
".",
"updatePointer",
"(",
"kmd",
",",
"update",
".",
"Unref",
",",
"update",
".",
"Ref",
",",
"shouldPrefetch",
")",
"\n",
"if",
"updatedNode",
"!=",
"nil",
"{",
"affectedNodeIDs",
"=",
"append",
"(",
"affectedNodeIDs",
",",
"updatedNode",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Cancel any prefetches for all unreferenced block pointers.",
"for",
"_",
",",
"unref",
":=",
"range",
"op",
".",
"Unrefs",
"(",
")",
"{",
"fbo",
".",
"config",
".",
"BlockOps",
"(",
")",
".",
"Prefetcher",
"(",
")",
".",
"CancelPrefetch",
"(",
"unref",
")",
"\n",
"}",
"\n\n",
"if",
"afterUpdateFn",
"==",
"nil",
"{",
"return",
"affectedNodeIDs",
",",
"nil",
"\n",
"}",
"\n\n",
"return",
"affectedNodeIDs",
",",
"afterUpdateFn",
"(",
")",
"\n",
"}"
] | // UpdatePointers updates all the pointers in the node cache
// atomically. If `afterUpdateFn` is non-nil, it's called under the
// same block lock under which the pointers were updated. | [
"UpdatePointers",
"updates",
"all",
"the",
"pointers",
"in",
"the",
"node",
"cache",
"atomically",
".",
"If",
"afterUpdateFn",
"is",
"non",
"-",
"nil",
"it",
"s",
"called",
"under",
"the",
"same",
"block",
"lock",
"under",
"which",
"the",
"pointers",
"were",
"updated",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3352-L3375 |
159,306 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | FastForwardAllNodes | func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context,
lState *kbfssync.LockState, md ReadOnlyRootMetadata) (
changes []NodeChange, affectedNodeIDs []NodeID, err error) {
if fbo.nodeCache == nil {
// Nothing needs to be done!
return nil, nil, nil
}
// Take a hard lock through this whole process. TODO: is there
// any way to relax this? It could lead to file system operation
// timeouts, even on reads, if we hold it too long.
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
nodes := fbo.nodeCache.AllNodes()
if len(nodes) == 0 {
// Nothing needs to be done!
return nil, nil, nil
}
fbo.vlog.CLogf(ctx, libkb.VLog1, "Fast-forwarding %d nodes", len(nodes))
defer func() {
fbo.vlog.CLogf(ctx, libkb.VLog1, "Fast-forward complete: %v", err)
}()
rootPath, children := fbo.makeChildrenTreeFromNodesLocked(lState, nodes)
if !rootPath.IsValid() {
return nil, nil, errors.New("Couldn't find the root path")
}
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Fast-forwarding root %v -> %v",
rootPath.Path[0].BlockPointer, md.data.Dir.BlockPointer)
fbo.updatePointer(md, rootPath.Path[0].BlockPointer,
md.data.Dir.BlockPointer, false)
rootPath.Path[0].BlockPointer = md.data.Dir.BlockPointer
rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref())
if rootNode != nil {
change := NodeChange{Node: rootNode}
for child := range children[rootPath.String()] {
change.DirUpdated = append(change.DirUpdated, child.Name)
}
changes = append(changes, change)
affectedNodeIDs = append(affectedNodeIDs, rootNode.GetID())
}
childChanges, childAffectedNodeIDs, err :=
fbo.fastForwardDirAndChildrenLocked(
ctx, lState, rootPath, children, md)
if err != nil {
return nil, nil, err
}
changes = append(changes, childChanges...)
affectedNodeIDs = append(affectedNodeIDs, childAffectedNodeIDs...)
// Unlink any children that remain.
for _, childPNs := range children {
for child := range childPNs {
fbo.unlinkDuringFastForwardLocked(
ctx, lState, md, child.BlockPointer.Ref())
}
}
return changes, affectedNodeIDs, nil
} | go | func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context,
lState *kbfssync.LockState, md ReadOnlyRootMetadata) (
changes []NodeChange, affectedNodeIDs []NodeID, err error) {
if fbo.nodeCache == nil {
// Nothing needs to be done!
return nil, nil, nil
}
// Take a hard lock through this whole process. TODO: is there
// any way to relax this? It could lead to file system operation
// timeouts, even on reads, if we hold it too long.
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
nodes := fbo.nodeCache.AllNodes()
if len(nodes) == 0 {
// Nothing needs to be done!
return nil, nil, nil
}
fbo.vlog.CLogf(ctx, libkb.VLog1, "Fast-forwarding %d nodes", len(nodes))
defer func() {
fbo.vlog.CLogf(ctx, libkb.VLog1, "Fast-forward complete: %v", err)
}()
rootPath, children := fbo.makeChildrenTreeFromNodesLocked(lState, nodes)
if !rootPath.IsValid() {
return nil, nil, errors.New("Couldn't find the root path")
}
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Fast-forwarding root %v -> %v",
rootPath.Path[0].BlockPointer, md.data.Dir.BlockPointer)
fbo.updatePointer(md, rootPath.Path[0].BlockPointer,
md.data.Dir.BlockPointer, false)
rootPath.Path[0].BlockPointer = md.data.Dir.BlockPointer
rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref())
if rootNode != nil {
change := NodeChange{Node: rootNode}
for child := range children[rootPath.String()] {
change.DirUpdated = append(change.DirUpdated, child.Name)
}
changes = append(changes, change)
affectedNodeIDs = append(affectedNodeIDs, rootNode.GetID())
}
childChanges, childAffectedNodeIDs, err :=
fbo.fastForwardDirAndChildrenLocked(
ctx, lState, rootPath, children, md)
if err != nil {
return nil, nil, err
}
changes = append(changes, childChanges...)
affectedNodeIDs = append(affectedNodeIDs, childAffectedNodeIDs...)
// Unlink any children that remain.
for _, childPNs := range children {
for child := range childPNs {
fbo.unlinkDuringFastForwardLocked(
ctx, lState, md, child.BlockPointer.Ref())
}
}
return changes, affectedNodeIDs, nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"FastForwardAllNodes",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"md",
"ReadOnlyRootMetadata",
")",
"(",
"changes",
"[",
"]",
"NodeChange",
",",
"affectedNodeIDs",
"[",
"]",
"NodeID",
",",
"err",
"error",
")",
"{",
"if",
"fbo",
".",
"nodeCache",
"==",
"nil",
"{",
"// Nothing needs to be done!",
"return",
"nil",
",",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"// Take a hard lock through this whole process. TODO: is there",
"// any way to relax this? It could lead to file system operation",
"// timeouts, even on reads, if we hold it too long.",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n\n",
"nodes",
":=",
"fbo",
".",
"nodeCache",
".",
"AllNodes",
"(",
")",
"\n",
"if",
"len",
"(",
"nodes",
")",
"==",
"0",
"{",
"// Nothing needs to be done!",
"return",
"nil",
",",
"nil",
",",
"nil",
"\n",
"}",
"\n",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"len",
"(",
"nodes",
")",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"(",
")",
"\n\n",
"rootPath",
",",
"children",
":=",
"fbo",
".",
"makeChildrenTreeFromNodesLocked",
"(",
"lState",
",",
"nodes",
")",
"\n",
"if",
"!",
"rootPath",
".",
"IsValid",
"(",
")",
"{",
"return",
"nil",
",",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"rootPath",
".",
"Path",
"[",
"0",
"]",
".",
"BlockPointer",
",",
"md",
".",
"data",
".",
"Dir",
".",
"BlockPointer",
")",
"\n",
"fbo",
".",
"updatePointer",
"(",
"md",
",",
"rootPath",
".",
"Path",
"[",
"0",
"]",
".",
"BlockPointer",
",",
"md",
".",
"data",
".",
"Dir",
".",
"BlockPointer",
",",
"false",
")",
"\n",
"rootPath",
".",
"Path",
"[",
"0",
"]",
".",
"BlockPointer",
"=",
"md",
".",
"data",
".",
"Dir",
".",
"BlockPointer",
"\n",
"rootNode",
":=",
"fbo",
".",
"nodeCache",
".",
"Get",
"(",
"md",
".",
"data",
".",
"Dir",
".",
"BlockPointer",
".",
"Ref",
"(",
")",
")",
"\n",
"if",
"rootNode",
"!=",
"nil",
"{",
"change",
":=",
"NodeChange",
"{",
"Node",
":",
"rootNode",
"}",
"\n",
"for",
"child",
":=",
"range",
"children",
"[",
"rootPath",
".",
"String",
"(",
")",
"]",
"{",
"change",
".",
"DirUpdated",
"=",
"append",
"(",
"change",
".",
"DirUpdated",
",",
"child",
".",
"Name",
")",
"\n",
"}",
"\n",
"changes",
"=",
"append",
"(",
"changes",
",",
"change",
")",
"\n",
"affectedNodeIDs",
"=",
"append",
"(",
"affectedNodeIDs",
",",
"rootNode",
".",
"GetID",
"(",
")",
")",
"\n",
"}",
"\n\n",
"childChanges",
",",
"childAffectedNodeIDs",
",",
"err",
":=",
"fbo",
".",
"fastForwardDirAndChildrenLocked",
"(",
"ctx",
",",
"lState",
",",
"rootPath",
",",
"children",
",",
"md",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"nil",
",",
"err",
"\n",
"}",
"\n",
"changes",
"=",
"append",
"(",
"changes",
",",
"childChanges",
"...",
")",
"\n",
"affectedNodeIDs",
"=",
"append",
"(",
"affectedNodeIDs",
",",
"childAffectedNodeIDs",
"...",
")",
"\n\n",
"// Unlink any children that remain.",
"for",
"_",
",",
"childPNs",
":=",
"range",
"children",
"{",
"for",
"child",
":=",
"range",
"childPNs",
"{",
"fbo",
".",
"unlinkDuringFastForwardLocked",
"(",
"ctx",
",",
"lState",
",",
"md",
",",
"child",
".",
"BlockPointer",
".",
"Ref",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"changes",
",",
"affectedNodeIDs",
",",
"nil",
"\n",
"}"
] | // FastForwardAllNodes attempts to update the block pointers
// associated with nodes in the cache by searching for their paths in
// the current version of the TLF. If it can't find a corresponding
// node, it assumes it's been deleted and unlinks it. Returns the set
// of node changes that resulted. If there are no nodes, it returns a
// nil error because there's nothing to be done. | [
"FastForwardAllNodes",
"attempts",
"to",
"update",
"the",
"block",
"pointers",
"associated",
"with",
"nodes",
"in",
"the",
"cache",
"by",
"searching",
"for",
"their",
"paths",
"in",
"the",
"current",
"version",
"of",
"the",
"TLF",
".",
"If",
"it",
"can",
"t",
"find",
"a",
"corresponding",
"node",
"it",
"assumes",
"it",
"s",
"been",
"deleted",
"and",
"unlinks",
"it",
".",
"Returns",
"the",
"set",
"of",
"node",
"changes",
"that",
"resulted",
".",
"If",
"there",
"are",
"no",
"nodes",
"it",
"returns",
"a",
"nil",
"error",
"because",
"there",
"s",
"nothing",
"to",
"be",
"done",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3518-L3580 |
159,307 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | GetInvalidationChangesForNode | func (fbo *folderBlockOps) GetInvalidationChangesForNode(
ctx context.Context, lState *kbfssync.LockState, node Node) (
changes []NodeChange, affectedNodeIDs []NodeID, err error) {
if fbo.nodeCache == nil {
// Nothing needs to be done!
return nil, nil, nil
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.vlog.CLogf(
ctx, libkb.VLog1, "About to get all children for node %p", node)
childNodes := fbo.nodeCache.AllNodeChildren(node)
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Found %d children for node %p", len(childNodes),
node)
return fbo.getInvalidationChangesForNodes(
ctx, lState, append(childNodes, node))
} | go | func (fbo *folderBlockOps) GetInvalidationChangesForNode(
ctx context.Context, lState *kbfssync.LockState, node Node) (
changes []NodeChange, affectedNodeIDs []NodeID, err error) {
if fbo.nodeCache == nil {
// Nothing needs to be done!
return nil, nil, nil
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
fbo.vlog.CLogf(
ctx, libkb.VLog1, "About to get all children for node %p", node)
childNodes := fbo.nodeCache.AllNodeChildren(node)
fbo.vlog.CLogf(
ctx, libkb.VLog1, "Found %d children for node %p", len(childNodes),
node)
return fbo.getInvalidationChangesForNodes(
ctx, lState, append(childNodes, node))
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"GetInvalidationChangesForNode",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"node",
"Node",
")",
"(",
"changes",
"[",
"]",
"NodeChange",
",",
"affectedNodeIDs",
"[",
"]",
"NodeID",
",",
"err",
"error",
")",
"{",
"if",
"fbo",
".",
"nodeCache",
"==",
"nil",
"{",
"// Nothing needs to be done!",
"return",
"nil",
",",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"node",
")",
"\n",
"childNodes",
":=",
"fbo",
".",
"nodeCache",
".",
"AllNodeChildren",
"(",
"node",
")",
"\n",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"len",
"(",
"childNodes",
")",
",",
"node",
")",
"\n",
"return",
"fbo",
".",
"getInvalidationChangesForNodes",
"(",
"ctx",
",",
"lState",
",",
"append",
"(",
"childNodes",
",",
"node",
")",
")",
"\n",
"}"
] | // GetInvalidationChangesForNode returns the list of invalidation
// notifications for all the nodes rooted at the given node. | [
"GetInvalidationChangesForNode",
"returns",
"the",
"list",
"of",
"invalidation",
"notifications",
"for",
"all",
"the",
"nodes",
"rooted",
"at",
"the",
"given",
"node",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3618-L3636 |
159,308 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | GetInvalidationChangesForAll | func (fbo *folderBlockOps) GetInvalidationChangesForAll(
ctx context.Context, lState *kbfssync.LockState) (
changes []NodeChange, affectedNodeIDs []NodeID, err error) {
if fbo.nodeCache == nil {
// Nothing needs to be done!
return nil, nil, nil
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
childNodes := fbo.nodeCache.AllNodes()
fbo.vlog.CLogf(ctx, libkb.VLog1, "Found %d nodes", len(childNodes))
return fbo.getInvalidationChangesForNodes(ctx, lState, childNodes)
} | go | func (fbo *folderBlockOps) GetInvalidationChangesForAll(
ctx context.Context, lState *kbfssync.LockState) (
changes []NodeChange, affectedNodeIDs []NodeID, err error) {
if fbo.nodeCache == nil {
// Nothing needs to be done!
return nil, nil, nil
}
fbo.blockLock.Lock(lState)
defer fbo.blockLock.Unlock(lState)
childNodes := fbo.nodeCache.AllNodes()
fbo.vlog.CLogf(ctx, libkb.VLog1, "Found %d nodes", len(childNodes))
return fbo.getInvalidationChangesForNodes(ctx, lState, childNodes)
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"GetInvalidationChangesForAll",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
")",
"(",
"changes",
"[",
"]",
"NodeChange",
",",
"affectedNodeIDs",
"[",
"]",
"NodeID",
",",
"err",
"error",
")",
"{",
"if",
"fbo",
".",
"nodeCache",
"==",
"nil",
"{",
"// Nothing needs to be done!",
"return",
"nil",
",",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"fbo",
".",
"blockLock",
".",
"Lock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"Unlock",
"(",
"lState",
")",
"\n",
"childNodes",
":=",
"fbo",
".",
"nodeCache",
".",
"AllNodes",
"(",
")",
"\n",
"fbo",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"len",
"(",
"childNodes",
")",
")",
"\n",
"return",
"fbo",
".",
"getInvalidationChangesForNodes",
"(",
"ctx",
",",
"lState",
",",
"childNodes",
")",
"\n",
"}"
] | // GetInvalidationChangesForAll returns the list of invalidation
// notifications for the entire TLF. | [
"GetInvalidationChangesForAll",
"returns",
"the",
"list",
"of",
"invalidation",
"notifications",
"for",
"the",
"entire",
"TLF",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3640-L3653 |
159,309 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | MarkNode | func (fbo *folderBlockOps) MarkNode(
ctx context.Context, lState *kbfssync.LockState, node Node, kmd libkey.KeyMetadata,
tag string, cacheType DiskBlockCacheType) error {
dbc := fbo.config.DiskBlockCache()
if dbc == nil {
return nil
}
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
if err != nil {
return err
}
p := fbo.nodeCache.PathFromNode(node)
err = dbc.Mark(ctx, p.TailPointer().ID, tag, cacheType)
if err != nil {
return err
}
var infos []data.BlockInfo
if node.EntryType() == data.Dir {
dd := fbo.newDirDataLocked(lState, p, chargedTo, kmd)
infos, err = dd.GetIndirectDirBlockInfos(ctx)
} else {
fd := fbo.newFileData(lState, p, chargedTo, kmd)
infos, err = fd.GetIndirectFileBlockInfos(ctx)
}
if err != nil {
return err
}
for _, info := range infos {
err = dbc.Mark(ctx, info.BlockPointer.ID, tag, cacheType)
switch errors.Cause(err).(type) {
case nil:
case data.NoSuchBlockError:
default:
return err
}
}
return nil
} | go | func (fbo *folderBlockOps) MarkNode(
ctx context.Context, lState *kbfssync.LockState, node Node, kmd libkey.KeyMetadata,
tag string, cacheType DiskBlockCacheType) error {
dbc := fbo.config.DiskBlockCache()
if dbc == nil {
return nil
}
fbo.blockLock.RLock(lState)
defer fbo.blockLock.RUnlock(lState)
chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
if err != nil {
return err
}
p := fbo.nodeCache.PathFromNode(node)
err = dbc.Mark(ctx, p.TailPointer().ID, tag, cacheType)
if err != nil {
return err
}
var infos []data.BlockInfo
if node.EntryType() == data.Dir {
dd := fbo.newDirDataLocked(lState, p, chargedTo, kmd)
infos, err = dd.GetIndirectDirBlockInfos(ctx)
} else {
fd := fbo.newFileData(lState, p, chargedTo, kmd)
infos, err = fd.GetIndirectFileBlockInfos(ctx)
}
if err != nil {
return err
}
for _, info := range infos {
err = dbc.Mark(ctx, info.BlockPointer.ID, tag, cacheType)
switch errors.Cause(err).(type) {
case nil:
case data.NoSuchBlockError:
default:
return err
}
}
return nil
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"MarkNode",
"(",
"ctx",
"context",
".",
"Context",
",",
"lState",
"*",
"kbfssync",
".",
"LockState",
",",
"node",
"Node",
",",
"kmd",
"libkey",
".",
"KeyMetadata",
",",
"tag",
"string",
",",
"cacheType",
"DiskBlockCacheType",
")",
"error",
"{",
"dbc",
":=",
"fbo",
".",
"config",
".",
"DiskBlockCache",
"(",
")",
"\n",
"if",
"dbc",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"fbo",
".",
"blockLock",
".",
"RLock",
"(",
"lState",
")",
"\n",
"defer",
"fbo",
".",
"blockLock",
".",
"RUnlock",
"(",
"lState",
")",
"\n\n",
"chargedTo",
",",
"err",
":=",
"fbo",
".",
"getChargedToLocked",
"(",
"ctx",
",",
"lState",
",",
"kmd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"p",
":=",
"fbo",
".",
"nodeCache",
".",
"PathFromNode",
"(",
"node",
")",
"\n",
"err",
"=",
"dbc",
".",
"Mark",
"(",
"ctx",
",",
"p",
".",
"TailPointer",
"(",
")",
".",
"ID",
",",
"tag",
",",
"cacheType",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"var",
"infos",
"[",
"]",
"data",
".",
"BlockInfo",
"\n",
"if",
"node",
".",
"EntryType",
"(",
")",
"==",
"data",
".",
"Dir",
"{",
"dd",
":=",
"fbo",
".",
"newDirDataLocked",
"(",
"lState",
",",
"p",
",",
"chargedTo",
",",
"kmd",
")",
"\n",
"infos",
",",
"err",
"=",
"dd",
".",
"GetIndirectDirBlockInfos",
"(",
"ctx",
")",
"\n",
"}",
"else",
"{",
"fd",
":=",
"fbo",
".",
"newFileData",
"(",
"lState",
",",
"p",
",",
"chargedTo",
",",
"kmd",
")",
"\n",
"infos",
",",
"err",
"=",
"fd",
".",
"GetIndirectFileBlockInfos",
"(",
"ctx",
")",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"info",
":=",
"range",
"infos",
"{",
"err",
"=",
"dbc",
".",
"Mark",
"(",
"ctx",
",",
"info",
".",
"BlockPointer",
".",
"ID",
",",
"tag",
",",
"cacheType",
")",
"\n",
"switch",
"errors",
".",
"Cause",
"(",
"err",
")",
".",
"(",
"type",
")",
"{",
"case",
"nil",
":",
"case",
"data",
".",
"NoSuchBlockError",
":",
"default",
":",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // MarkNode marks all the blocks in the node's block tree with the
// given tag. | [
"MarkNode",
"marks",
"all",
"the",
"blocks",
"in",
"the",
"node",
"s",
"block",
"tree",
"with",
"the",
"given",
"tag",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3657-L3699 |
159,310 | keybase/client | go/kbfs/libkbfs/folder_block_ops.go | populateChainPaths | func (fbo *folderBlockOps) populateChainPaths(ctx context.Context,
log logger.Logger, chains *crChains, includeCreates bool) error {
_, err := chains.getPaths(
ctx, fbo, log, fbo.nodeCache, includeCreates,
fbo.config.Mode().IsTestMode())
return err
} | go | func (fbo *folderBlockOps) populateChainPaths(ctx context.Context,
log logger.Logger, chains *crChains, includeCreates bool) error {
_, err := chains.getPaths(
ctx, fbo, log, fbo.nodeCache, includeCreates,
fbo.config.Mode().IsTestMode())
return err
} | [
"func",
"(",
"fbo",
"*",
"folderBlockOps",
")",
"populateChainPaths",
"(",
"ctx",
"context",
".",
"Context",
",",
"log",
"logger",
".",
"Logger",
",",
"chains",
"*",
"crChains",
",",
"includeCreates",
"bool",
")",
"error",
"{",
"_",
",",
"err",
":=",
"chains",
".",
"getPaths",
"(",
"ctx",
",",
"fbo",
",",
"log",
",",
"fbo",
".",
"nodeCache",
",",
"includeCreates",
",",
"fbo",
".",
"config",
".",
"Mode",
"(",
")",
".",
"IsTestMode",
"(",
")",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // populateChainPaths updates all the paths in all the ops tracked by
// `chains`, using the main nodeCache. | [
"populateChainPaths",
"updates",
"all",
"the",
"paths",
"in",
"all",
"the",
"ops",
"tracked",
"by",
"chains",
"using",
"the",
"main",
"nodeCache",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_ops.go#L3707-L3713 |
159,311 | keybase/client | go/chat/localizer.go | checkRekeyError | func (s *localizerPipeline) checkRekeyError(ctx context.Context, fromErr error, conversationRemote chat1.Conversation, unverifiedTLFName string) *chat1.ConversationErrorLocal {
if fromErr == nil {
return nil
}
convErr, err2 := s.checkRekeyErrorInner(ctx, fromErr, conversationRemote, unverifiedTLFName)
if err2 != nil {
errMsg := fmt.Sprintf("failed to get rekey info: convID: %s: %s",
conversationRemote.Metadata.ConversationID, err2.Error())
return chat1.NewConversationErrorLocal(
errMsg, conversationRemote, unverifiedTLFName, chat1.ConversationErrorType_TRANSIENT, nil)
}
if convErr != nil {
return convErr
}
return nil
} | go | func (s *localizerPipeline) checkRekeyError(ctx context.Context, fromErr error, conversationRemote chat1.Conversation, unverifiedTLFName string) *chat1.ConversationErrorLocal {
if fromErr == nil {
return nil
}
convErr, err2 := s.checkRekeyErrorInner(ctx, fromErr, conversationRemote, unverifiedTLFName)
if err2 != nil {
errMsg := fmt.Sprintf("failed to get rekey info: convID: %s: %s",
conversationRemote.Metadata.ConversationID, err2.Error())
return chat1.NewConversationErrorLocal(
errMsg, conversationRemote, unverifiedTLFName, chat1.ConversationErrorType_TRANSIENT, nil)
}
if convErr != nil {
return convErr
}
return nil
} | [
"func",
"(",
"s",
"*",
"localizerPipeline",
")",
"checkRekeyError",
"(",
"ctx",
"context",
".",
"Context",
",",
"fromErr",
"error",
",",
"conversationRemote",
"chat1",
".",
"Conversation",
",",
"unverifiedTLFName",
"string",
")",
"*",
"chat1",
".",
"ConversationErrorLocal",
"{",
"if",
"fromErr",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"convErr",
",",
"err2",
":=",
"s",
".",
"checkRekeyErrorInner",
"(",
"ctx",
",",
"fromErr",
",",
"conversationRemote",
",",
"unverifiedTLFName",
")",
"\n",
"if",
"err2",
"!=",
"nil",
"{",
"errMsg",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"conversationRemote",
".",
"Metadata",
".",
"ConversationID",
",",
"err2",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"chat1",
".",
"NewConversationErrorLocal",
"(",
"errMsg",
",",
"conversationRemote",
",",
"unverifiedTLFName",
",",
"chat1",
".",
"ConversationErrorType_TRANSIENT",
",",
"nil",
")",
"\n",
"}",
"\n",
"if",
"convErr",
"!=",
"nil",
"{",
"return",
"convErr",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Checks fromErr to see if it is a rekey error.
// Returns a ConversationErrorLocal if it is a rekey error.
// Returns nil otherwise. | [
"Checks",
"fromErr",
"to",
"see",
"if",
"it",
"is",
"a",
"rekey",
"error",
".",
"Returns",
"a",
"ConversationErrorLocal",
"if",
"it",
"is",
"a",
"rekey",
"error",
".",
"Returns",
"nil",
"otherwise",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/localizer.go#L916-L931 |
159,312 | keybase/client | go/kbfs/libpages/root.go | setRoot | func setRoot(root *Root, str string) error {
switch {
case strings.HasPrefix(str, privatePrefix):
root.TlfType = tlf.Private
setRootTlfNameAndPath(root, str[len(privatePrefix):])
return nil
case strings.HasPrefix(str, publicPrefix):
root.TlfType = tlf.Public
setRootTlfNameAndPath(root, str[len(publicPrefix):])
return nil
case strings.HasPrefix(str, teamPrefix):
root.TlfType = tlf.SingleTeam
setRootTlfNameAndPath(root, str[len(teamPrefix):])
return nil
default:
return ErrInvalidKeybasePagesRecord{}
}
} | go | func setRoot(root *Root, str string) error {
switch {
case strings.HasPrefix(str, privatePrefix):
root.TlfType = tlf.Private
setRootTlfNameAndPath(root, str[len(privatePrefix):])
return nil
case strings.HasPrefix(str, publicPrefix):
root.TlfType = tlf.Public
setRootTlfNameAndPath(root, str[len(publicPrefix):])
return nil
case strings.HasPrefix(str, teamPrefix):
root.TlfType = tlf.SingleTeam
setRootTlfNameAndPath(root, str[len(teamPrefix):])
return nil
default:
return ErrInvalidKeybasePagesRecord{}
}
} | [
"func",
"setRoot",
"(",
"root",
"*",
"Root",
",",
"str",
"string",
")",
"error",
"{",
"switch",
"{",
"case",
"strings",
".",
"HasPrefix",
"(",
"str",
",",
"privatePrefix",
")",
":",
"root",
".",
"TlfType",
"=",
"tlf",
".",
"Private",
"\n",
"setRootTlfNameAndPath",
"(",
"root",
",",
"str",
"[",
"len",
"(",
"privatePrefix",
")",
":",
"]",
")",
"\n",
"return",
"nil",
"\n",
"case",
"strings",
".",
"HasPrefix",
"(",
"str",
",",
"publicPrefix",
")",
":",
"root",
".",
"TlfType",
"=",
"tlf",
".",
"Public",
"\n",
"setRootTlfNameAndPath",
"(",
"root",
",",
"str",
"[",
"len",
"(",
"publicPrefix",
")",
":",
"]",
")",
"\n",
"return",
"nil",
"\n",
"case",
"strings",
".",
"HasPrefix",
"(",
"str",
",",
"teamPrefix",
")",
":",
"root",
".",
"TlfType",
"=",
"tlf",
".",
"SingleTeam",
"\n",
"setRootTlfNameAndPath",
"(",
"root",
",",
"str",
"[",
"len",
"(",
"teamPrefix",
")",
":",
"]",
")",
"\n",
"return",
"nil",
"\n",
"default",
":",
"return",
"ErrInvalidKeybasePagesRecord",
"{",
"}",
"\n",
"}",
"\n",
"}"
] | // str is everything after either gitPrefix or kbfsPrefix. | [
"str",
"is",
"everything",
"after",
"either",
"gitPrefix",
"or",
"kbfsPrefix",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libpages/root.go#L195-L212 |
159,313 | keybase/client | go/kbfs/libpages/root.go | ParseRoot | func ParseRoot(str string) (Root, error) {
str = strings.TrimSpace(str)
switch {
case strings.HasPrefix(str, gitPrefix):
root := Root{Type: GitRoot}
if err := setRoot(&root, str[len(gitPrefix):]); err != nil {
return Root{}, err
}
return root, nil
case strings.HasPrefix(str, kbfsPrefix):
root := Root{Type: KBFSRoot}
if err := setRoot(&root, str[len(kbfsPrefix):]); err != nil {
return Root{}, err
}
return root, nil
default:
return Root{}, ErrInvalidKeybasePagesRecord{}
}
} | go | func ParseRoot(str string) (Root, error) {
str = strings.TrimSpace(str)
switch {
case strings.HasPrefix(str, gitPrefix):
root := Root{Type: GitRoot}
if err := setRoot(&root, str[len(gitPrefix):]); err != nil {
return Root{}, err
}
return root, nil
case strings.HasPrefix(str, kbfsPrefix):
root := Root{Type: KBFSRoot}
if err := setRoot(&root, str[len(kbfsPrefix):]); err != nil {
return Root{}, err
}
return root, nil
default:
return Root{}, ErrInvalidKeybasePagesRecord{}
}
} | [
"func",
"ParseRoot",
"(",
"str",
"string",
")",
"(",
"Root",
",",
"error",
")",
"{",
"str",
"=",
"strings",
".",
"TrimSpace",
"(",
"str",
")",
"\n",
"switch",
"{",
"case",
"strings",
".",
"HasPrefix",
"(",
"str",
",",
"gitPrefix",
")",
":",
"root",
":=",
"Root",
"{",
"Type",
":",
"GitRoot",
"}",
"\n",
"if",
"err",
":=",
"setRoot",
"(",
"&",
"root",
",",
"str",
"[",
"len",
"(",
"gitPrefix",
")",
":",
"]",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"Root",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"return",
"root",
",",
"nil",
"\n",
"case",
"strings",
".",
"HasPrefix",
"(",
"str",
",",
"kbfsPrefix",
")",
":",
"root",
":=",
"Root",
"{",
"Type",
":",
"KBFSRoot",
"}",
"\n",
"if",
"err",
":=",
"setRoot",
"(",
"&",
"root",
",",
"str",
"[",
"len",
"(",
"kbfsPrefix",
")",
":",
"]",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"Root",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"return",
"root",
",",
"nil",
"\n\n",
"default",
":",
"return",
"Root",
"{",
"}",
",",
"ErrInvalidKeybasePagesRecord",
"{",
"}",
"\n",
"}",
"\n",
"}"
] | // ParseRoot parses a kbp= TXT record from a domain into a Root object. | [
"ParseRoot",
"parses",
"a",
"kbp",
"=",
"TXT",
"record",
"from",
"a",
"domain",
"into",
"a",
"Root",
"object",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libpages/root.go#L215-L234 |
159,314 | keybase/client | go/teams/resolve.go | ResolveIDToName | func ResolveIDToName(ctx context.Context, g *libkb.GlobalContext, id keybase1.TeamID) (name keybase1.TeamName, err error) {
return resolveIDToName(ctx, g, id, false)
} | go | func ResolveIDToName(ctx context.Context, g *libkb.GlobalContext, id keybase1.TeamID) (name keybase1.TeamName, err error) {
return resolveIDToName(ctx, g, id, false)
} | [
"func",
"ResolveIDToName",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"id",
"keybase1",
".",
"TeamID",
")",
"(",
"name",
"keybase1",
".",
"TeamName",
",",
"err",
"error",
")",
"{",
"return",
"resolveIDToName",
"(",
"ctx",
",",
"g",
",",
"id",
",",
"false",
")",
"\n",
"}"
] | // ResolveIDToName takes a team ID and resolves it to a name.
// It can use server-assist but always cryptographically checks the result. | [
"ResolveIDToName",
"takes",
"a",
"team",
"ID",
"and",
"resolves",
"it",
"to",
"a",
"name",
".",
"It",
"can",
"use",
"server",
"-",
"assist",
"but",
"always",
"cryptographically",
"checks",
"the",
"result",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/resolve.go#L18-L20 |
159,315 | keybase/client | go/teams/resolve.go | ResolveIDToNameForceRefresh | func ResolveIDToNameForceRefresh(ctx context.Context, g *libkb.GlobalContext, id keybase1.TeamID) (name keybase1.TeamName, err error) {
return resolveIDToName(ctx, g, id, true)
} | go | func ResolveIDToNameForceRefresh(ctx context.Context, g *libkb.GlobalContext, id keybase1.TeamID) (name keybase1.TeamName, err error) {
return resolveIDToName(ctx, g, id, true)
} | [
"func",
"ResolveIDToNameForceRefresh",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"id",
"keybase1",
".",
"TeamID",
")",
"(",
"name",
"keybase1",
".",
"TeamName",
",",
"err",
"error",
")",
"{",
"return",
"resolveIDToName",
"(",
"ctx",
",",
"g",
",",
"id",
",",
"true",
")",
"\n",
"}"
] | // ResolveIDToNameForceRefresh is like ResolveIDToName but forces a refresh of
// the FTL cache. | [
"ResolveIDToNameForceRefresh",
"is",
"like",
"ResolveIDToName",
"but",
"forces",
"a",
"refresh",
"of",
"the",
"FTL",
"cache",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/resolve.go#L24-L26 |
159,316 | keybase/client | go/teams/resolve.go | ResolveNameToID | func ResolveNameToID(ctx context.Context, g *libkb.GlobalContext, name keybase1.TeamName) (id keybase1.TeamID, err error) {
return resolveNameToID(ctx, g, name, false)
} | go | func ResolveNameToID(ctx context.Context, g *libkb.GlobalContext, name keybase1.TeamName) (id keybase1.TeamID, err error) {
return resolveNameToID(ctx, g, name, false)
} | [
"func",
"ResolveNameToID",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"name",
"keybase1",
".",
"TeamName",
")",
"(",
"id",
"keybase1",
".",
"TeamID",
",",
"err",
"error",
")",
"{",
"return",
"resolveNameToID",
"(",
"ctx",
",",
"g",
",",
"name",
",",
"false",
")",
"\n",
"}"
] | // ResolveNameToID takes a team name and resolves it to a team ID.
// It can use server-assist but always cryptographically checks the result. | [
"ResolveNameToID",
"takes",
"a",
"team",
"name",
"and",
"resolves",
"it",
"to",
"a",
"team",
"ID",
".",
"It",
"can",
"use",
"server",
"-",
"assist",
"but",
"always",
"cryptographically",
"checks",
"the",
"result",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/resolve.go#L44-L46 |
159,317 | keybase/client | go/teams/resolve.go | ResolveNameToIDForceRefresh | func ResolveNameToIDForceRefresh(ctx context.Context, g *libkb.GlobalContext, name keybase1.TeamName) (id keybase1.TeamID, err error) {
return resolveNameToID(ctx, g, name, true)
} | go | func ResolveNameToIDForceRefresh(ctx context.Context, g *libkb.GlobalContext, name keybase1.TeamName) (id keybase1.TeamID, err error) {
return resolveNameToID(ctx, g, name, true)
} | [
"func",
"ResolveNameToIDForceRefresh",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"name",
"keybase1",
".",
"TeamName",
")",
"(",
"id",
"keybase1",
".",
"TeamID",
",",
"err",
"error",
")",
"{",
"return",
"resolveNameToID",
"(",
"ctx",
",",
"g",
",",
"name",
",",
"true",
")",
"\n",
"}"
] | // ResolveNameToIDForceRefresh is just like ResolveNameToID but it forces a refresh. | [
"ResolveNameToIDForceRefresh",
"is",
"just",
"like",
"ResolveNameToID",
"but",
"it",
"forces",
"a",
"refresh",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/resolve.go#L49-L51 |
159,318 | keybase/client | go/teams/resolve.go | ResolveImplicitTeamDisplayName | func ResolveImplicitTeamDisplayName(ctx context.Context, g *libkb.GlobalContext,
name string, public bool) (res keybase1.ImplicitTeamDisplayName, err error) {
defer g.CTraceTimed(ctx, fmt.Sprintf("ResolveImplicitTeamDisplayName(%v, public:%v)", name, public), func() error { return err })()
split1 := strings.SplitN(name, " ", 2) // split1: [assertions, ?conflict]
assertions := split1[0]
var suffix string
if len(split1) > 1 {
suffix = split1[1]
}
writerAssertions, readerAssertions, err := externals.ParseAssertionsWithReaders(g, assertions)
if err != nil {
return res, err
}
res = keybase1.ImplicitTeamDisplayName{
IsPublic: public,
}
if len(suffix) > 0 {
res.ConflictInfo, err = libkb.ParseImplicitTeamDisplayNameSuffix(suffix)
if err != nil {
return res, err
}
}
var resolvedAssertions []libkb.ResolvedAssertion
if err = ResolveImplicitTeamSetUntrusted(ctx, g, writerAssertions, &res.Writers, &resolvedAssertions); err != nil {
return res, err
}
if err = ResolveImplicitTeamSetUntrusted(ctx, g, readerAssertions, &res.Readers, &resolvedAssertions); err != nil {
return res, err
}
deduplicateImplicitTeamDisplayName(&res)
// errgroup collects errors and returns the first non-nil.
// subctx is canceled when the group finishes.
group, subctx := errgroup.WithContext(ctx)
// Identify everyone who resolved in parallel, checking that they match their resolved UID and original assertions.
for _, resolvedAssertion := range resolvedAssertions {
resolvedAssertion := resolvedAssertion // https://golang.org/doc/faq#closures_and_goroutines
group.Go(func() error {
return verifyResolveResult(subctx, g, resolvedAssertion)
})
}
err = group.Wait()
return res, err
} | go | func ResolveImplicitTeamDisplayName(ctx context.Context, g *libkb.GlobalContext,
name string, public bool) (res keybase1.ImplicitTeamDisplayName, err error) {
defer g.CTraceTimed(ctx, fmt.Sprintf("ResolveImplicitTeamDisplayName(%v, public:%v)", name, public), func() error { return err })()
split1 := strings.SplitN(name, " ", 2) // split1: [assertions, ?conflict]
assertions := split1[0]
var suffix string
if len(split1) > 1 {
suffix = split1[1]
}
writerAssertions, readerAssertions, err := externals.ParseAssertionsWithReaders(g, assertions)
if err != nil {
return res, err
}
res = keybase1.ImplicitTeamDisplayName{
IsPublic: public,
}
if len(suffix) > 0 {
res.ConflictInfo, err = libkb.ParseImplicitTeamDisplayNameSuffix(suffix)
if err != nil {
return res, err
}
}
var resolvedAssertions []libkb.ResolvedAssertion
if err = ResolveImplicitTeamSetUntrusted(ctx, g, writerAssertions, &res.Writers, &resolvedAssertions); err != nil {
return res, err
}
if err = ResolveImplicitTeamSetUntrusted(ctx, g, readerAssertions, &res.Readers, &resolvedAssertions); err != nil {
return res, err
}
deduplicateImplicitTeamDisplayName(&res)
// errgroup collects errors and returns the first non-nil.
// subctx is canceled when the group finishes.
group, subctx := errgroup.WithContext(ctx)
// Identify everyone who resolved in parallel, checking that they match their resolved UID and original assertions.
for _, resolvedAssertion := range resolvedAssertions {
resolvedAssertion := resolvedAssertion // https://golang.org/doc/faq#closures_and_goroutines
group.Go(func() error {
return verifyResolveResult(subctx, g, resolvedAssertion)
})
}
err = group.Wait()
return res, err
} | [
"func",
"ResolveImplicitTeamDisplayName",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"name",
"string",
",",
"public",
"bool",
")",
"(",
"res",
"keybase1",
".",
"ImplicitTeamDisplayName",
",",
"err",
"error",
")",
"{",
"defer",
"g",
".",
"CTraceTimed",
"(",
"ctx",
",",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"name",
",",
"public",
")",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
")",
"(",
")",
"\n\n",
"split1",
":=",
"strings",
".",
"SplitN",
"(",
"name",
",",
"\"",
"\"",
",",
"2",
")",
"// split1: [assertions, ?conflict]",
"\n",
"assertions",
":=",
"split1",
"[",
"0",
"]",
"\n",
"var",
"suffix",
"string",
"\n",
"if",
"len",
"(",
"split1",
")",
">",
"1",
"{",
"suffix",
"=",
"split1",
"[",
"1",
"]",
"\n",
"}",
"\n\n",
"writerAssertions",
",",
"readerAssertions",
",",
"err",
":=",
"externals",
".",
"ParseAssertionsWithReaders",
"(",
"g",
",",
"assertions",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"res",
",",
"err",
"\n",
"}",
"\n\n",
"res",
"=",
"keybase1",
".",
"ImplicitTeamDisplayName",
"{",
"IsPublic",
":",
"public",
",",
"}",
"\n",
"if",
"len",
"(",
"suffix",
")",
">",
"0",
"{",
"res",
".",
"ConflictInfo",
",",
"err",
"=",
"libkb",
".",
"ParseImplicitTeamDisplayNameSuffix",
"(",
"suffix",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"res",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"var",
"resolvedAssertions",
"[",
"]",
"libkb",
".",
"ResolvedAssertion",
"\n",
"if",
"err",
"=",
"ResolveImplicitTeamSetUntrusted",
"(",
"ctx",
",",
"g",
",",
"writerAssertions",
",",
"&",
"res",
".",
"Writers",
",",
"&",
"resolvedAssertions",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"res",
",",
"err",
"\n",
"}",
"\n",
"if",
"err",
"=",
"ResolveImplicitTeamSetUntrusted",
"(",
"ctx",
",",
"g",
",",
"readerAssertions",
",",
"&",
"res",
".",
"Readers",
",",
"&",
"resolvedAssertions",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"res",
",",
"err",
"\n",
"}",
"\n\n",
"deduplicateImplicitTeamDisplayName",
"(",
"&",
"res",
")",
"\n\n",
"// errgroup collects errors and returns the first non-nil.",
"// subctx is canceled when the group finishes.",
"group",
",",
"subctx",
":=",
"errgroup",
".",
"WithContext",
"(",
"ctx",
")",
"\n\n",
"// Identify everyone who resolved in parallel, checking that they match their resolved UID and original assertions.",
"for",
"_",
",",
"resolvedAssertion",
":=",
"range",
"resolvedAssertions",
"{",
"resolvedAssertion",
":=",
"resolvedAssertion",
"// https://golang.org/doc/faq#closures_and_goroutines",
"\n",
"group",
".",
"Go",
"(",
"func",
"(",
")",
"error",
"{",
"return",
"verifyResolveResult",
"(",
"subctx",
",",
"g",
",",
"resolvedAssertion",
")",
"\n",
"}",
")",
"\n",
"}",
"\n\n",
"err",
"=",
"group",
".",
"Wait",
"(",
")",
"\n",
"return",
"res",
",",
"err",
"\n",
"}"
] | // Resolve assertions in an implicit team display name and verify the result.
// Resolve an implicit team name with assertions like "alice,bob+bob@twitter#char (conflicted copy 2017-03-04 #1)"
// Into "alice,bob#char (conflicted copy 2017-03-04 #1)"
// The input can contain compound assertions, but if compound assertions are left unresolved, an error will be returned. | [
"Resolve",
"assertions",
"in",
"an",
"implicit",
"team",
"display",
"name",
"and",
"verify",
"the",
"result",
".",
"Resolve",
"an",
"implicit",
"team",
"name",
"with",
"assertions",
"like",
"alice",
"bob",
"+",
"bob"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/resolve.go#L75-L126 |
159,319 | keybase/client | go/teams/resolve.go | shouldPreventTeamCreation | func shouldPreventTeamCreation(err error) bool {
if resErr, ok := err.(libkb.ResolutionError); ok {
switch resErr.Kind {
case libkb.ResolutionErrorRateLimited, libkb.ResolutionErrorInvalidInput, libkb.ResolutionErrorRequestFailed:
return true
}
}
return false
} | go | func shouldPreventTeamCreation(err error) bool {
if resErr, ok := err.(libkb.ResolutionError); ok {
switch resErr.Kind {
case libkb.ResolutionErrorRateLimited, libkb.ResolutionErrorInvalidInput, libkb.ResolutionErrorRequestFailed:
return true
}
}
return false
} | [
"func",
"shouldPreventTeamCreation",
"(",
"err",
"error",
")",
"bool",
"{",
"if",
"resErr",
",",
"ok",
":=",
"err",
".",
"(",
"libkb",
".",
"ResolutionError",
")",
";",
"ok",
"{",
"switch",
"resErr",
".",
"Kind",
"{",
"case",
"libkb",
".",
"ResolutionErrorRateLimited",
",",
"libkb",
".",
"ResolutionErrorInvalidInput",
",",
"libkb",
".",
"ResolutionErrorRequestFailed",
":",
"return",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"false",
"\n",
"}"
] | // preventTeamCreationOnError checks if an error coming from resolver should
// prevent us from creating a team. We don't want a team where we don't know if
// SBS user is resolvable but we just were unable to get the answer. | [
"preventTeamCreationOnError",
"checks",
"if",
"an",
"error",
"coming",
"from",
"resolver",
"should",
"prevent",
"us",
"from",
"creating",
"a",
"team",
".",
"We",
"don",
"t",
"want",
"a",
"team",
"where",
"we",
"don",
"t",
"know",
"if",
"SBS",
"user",
"is",
"resolvable",
"but",
"we",
"just",
"were",
"unable",
"to",
"get",
"the",
"answer",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/resolve.go#L131-L139 |
159,320 | keybase/client | go/teams/resolve.go | ResolveImplicitTeamSetUntrusted | func ResolveImplicitTeamSetUntrusted(ctx context.Context, g *libkb.GlobalContext,
sourceAssertions []libkb.AssertionExpression, resSet *keybase1.ImplicitTeamUserSet, resolvedAssertions *[]libkb.ResolvedAssertion) error {
m := libkb.NewMetaContext(ctx, g)
for _, expr := range sourceAssertions {
u, resolveRes, err := g.Resolver.ResolveUser(m, expr.String())
if err != nil {
// Resolution failed. Could still be an SBS assertion.
if shouldPreventTeamCreation(err) {
// but if we are not sure, better to bail out
return err
}
sa, err := expr.ToSocialAssertion()
if err != nil {
// Could not convert to a social assertion.
// This could be because it is a compound assertion, which we do not support when SBS.
// Or it could be because it's a team assertion or something weird like that.
return libkb.ResolutionError{Input: expr.String(), Msg: "unknown user assertion",
Kind: libkb.ResolutionErrorNotFound}
}
resSet.UnresolvedUsers = append(resSet.UnresolvedUsers, sa)
} else {
// Resolution succeeded
resSet.KeybaseUsers = append(resSet.KeybaseUsers, u.Username)
// Append the resolvee and assertion to resolvedAssertions, in case we identify later.
*resolvedAssertions = append(*resolvedAssertions, libkb.ResolvedAssertion{
UID: u.Uid,
Assertion: expr,
ResolveResult: resolveRes,
})
}
}
return nil
} | go | func ResolveImplicitTeamSetUntrusted(ctx context.Context, g *libkb.GlobalContext,
sourceAssertions []libkb.AssertionExpression, resSet *keybase1.ImplicitTeamUserSet, resolvedAssertions *[]libkb.ResolvedAssertion) error {
m := libkb.NewMetaContext(ctx, g)
for _, expr := range sourceAssertions {
u, resolveRes, err := g.Resolver.ResolveUser(m, expr.String())
if err != nil {
// Resolution failed. Could still be an SBS assertion.
if shouldPreventTeamCreation(err) {
// but if we are not sure, better to bail out
return err
}
sa, err := expr.ToSocialAssertion()
if err != nil {
// Could not convert to a social assertion.
// This could be because it is a compound assertion, which we do not support when SBS.
// Or it could be because it's a team assertion or something weird like that.
return libkb.ResolutionError{Input: expr.String(), Msg: "unknown user assertion",
Kind: libkb.ResolutionErrorNotFound}
}
resSet.UnresolvedUsers = append(resSet.UnresolvedUsers, sa)
} else {
// Resolution succeeded
resSet.KeybaseUsers = append(resSet.KeybaseUsers, u.Username)
// Append the resolvee and assertion to resolvedAssertions, in case we identify later.
*resolvedAssertions = append(*resolvedAssertions, libkb.ResolvedAssertion{
UID: u.Uid,
Assertion: expr,
ResolveResult: resolveRes,
})
}
}
return nil
} | [
"func",
"ResolveImplicitTeamSetUntrusted",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"sourceAssertions",
"[",
"]",
"libkb",
".",
"AssertionExpression",
",",
"resSet",
"*",
"keybase1",
".",
"ImplicitTeamUserSet",
",",
"resolvedAssertions",
"*",
"[",
"]",
"libkb",
".",
"ResolvedAssertion",
")",
"error",
"{",
"m",
":=",
"libkb",
".",
"NewMetaContext",
"(",
"ctx",
",",
"g",
")",
"\n\n",
"for",
"_",
",",
"expr",
":=",
"range",
"sourceAssertions",
"{",
"u",
",",
"resolveRes",
",",
"err",
":=",
"g",
".",
"Resolver",
".",
"ResolveUser",
"(",
"m",
",",
"expr",
".",
"String",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"// Resolution failed. Could still be an SBS assertion.",
"if",
"shouldPreventTeamCreation",
"(",
"err",
")",
"{",
"// but if we are not sure, better to bail out",
"return",
"err",
"\n",
"}",
"\n",
"sa",
",",
"err",
":=",
"expr",
".",
"ToSocialAssertion",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"// Could not convert to a social assertion.",
"// This could be because it is a compound assertion, which we do not support when SBS.",
"// Or it could be because it's a team assertion or something weird like that.",
"return",
"libkb",
".",
"ResolutionError",
"{",
"Input",
":",
"expr",
".",
"String",
"(",
")",
",",
"Msg",
":",
"\"",
"\"",
",",
"Kind",
":",
"libkb",
".",
"ResolutionErrorNotFound",
"}",
"\n",
"}",
"\n",
"resSet",
".",
"UnresolvedUsers",
"=",
"append",
"(",
"resSet",
".",
"UnresolvedUsers",
",",
"sa",
")",
"\n",
"}",
"else",
"{",
"// Resolution succeeded",
"resSet",
".",
"KeybaseUsers",
"=",
"append",
"(",
"resSet",
".",
"KeybaseUsers",
",",
"u",
".",
"Username",
")",
"\n",
"// Append the resolvee and assertion to resolvedAssertions, in case we identify later.",
"*",
"resolvedAssertions",
"=",
"append",
"(",
"*",
"resolvedAssertions",
",",
"libkb",
".",
"ResolvedAssertion",
"{",
"UID",
":",
"u",
".",
"Uid",
",",
"Assertion",
":",
"expr",
",",
"ResolveResult",
":",
"resolveRes",
",",
"}",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Try to resolve implicit team members.
// Modifies the arguments `resSet` and appends to `resolvedAssertions`.
// For each assertion in `sourceAssertions`, try to resolve them.
// If they resolve, add the username to `resSet` and the assertion to `resolvedAssertions`.
// If they don't resolve, add the SocialAssertion to `resSet`, but nothing to `resolvedAssertions`. | [
"Try",
"to",
"resolve",
"implicit",
"team",
"members",
".",
"Modifies",
"the",
"arguments",
"resSet",
"and",
"appends",
"to",
"resolvedAssertions",
".",
"For",
"each",
"assertion",
"in",
"sourceAssertions",
"try",
"to",
"resolve",
"them",
".",
"If",
"they",
"resolve",
"add",
"the",
"username",
"to",
"resSet",
"and",
"the",
"assertion",
"to",
"resolvedAssertions",
".",
"If",
"they",
"don",
"t",
"resolve",
"add",
"the",
"SocialAssertion",
"to",
"resSet",
"but",
"nothing",
"to",
"resolvedAssertions",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/resolve.go#L146-L180 |
159,321 | keybase/client | go/teams/resolve.go | verifyResolveResult | func verifyResolveResult(ctx context.Context, g *libkb.GlobalContext, resolvedAssertion libkb.ResolvedAssertion) (err error) {
defer g.CTrace(ctx, fmt.Sprintf("verifyResolveResult ID user [%s] %s", resolvedAssertion.UID, resolvedAssertion.Assertion.String()),
func() error { return err })()
if resolvedAssertion.ResolveResult.WasKBAssertion() {
// The resolver does not use server-trust for these sorts of assertions.
// So early out to avoid the performance cost of a full identify.
return nil
}
if resolvedAssertion.ResolveResult.IsServerTrust() {
g.Log.CDebugf(ctx, "Trusting the server on assertion: %q (server trust - no way for clients to verify)", resolvedAssertion.Assertion.String())
return nil
}
id2arg := keybase1.Identify2Arg{
Uid: resolvedAssertion.UID,
UserAssertion: resolvedAssertion.Assertion.String(),
CanSuppressUI: true,
// Use CHAT_GUI to avoid tracker popups and DO externals checks.
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_GUI,
}
uis := libkb.UIs{
// Send a nil IdentifyUI, this IdentifyBehavior should not use it anyway.
IdentifyUI: nil,
}
eng := engine.NewIdentify2WithUID(g, &id2arg)
m := libkb.NewMetaContext(ctx, g).WithUIs(uis)
err = engine.RunEngine2(m, eng)
if err != nil {
idRes, _ := eng.Result(m)
m.Debug("identify failed (IDres %v, TrackBreaks %v): %v", idRes != nil, idRes != nil && idRes.TrackBreaks != nil, err)
}
return err
} | go | func verifyResolveResult(ctx context.Context, g *libkb.GlobalContext, resolvedAssertion libkb.ResolvedAssertion) (err error) {
defer g.CTrace(ctx, fmt.Sprintf("verifyResolveResult ID user [%s] %s", resolvedAssertion.UID, resolvedAssertion.Assertion.String()),
func() error { return err })()
if resolvedAssertion.ResolveResult.WasKBAssertion() {
// The resolver does not use server-trust for these sorts of assertions.
// So early out to avoid the performance cost of a full identify.
return nil
}
if resolvedAssertion.ResolveResult.IsServerTrust() {
g.Log.CDebugf(ctx, "Trusting the server on assertion: %q (server trust - no way for clients to verify)", resolvedAssertion.Assertion.String())
return nil
}
id2arg := keybase1.Identify2Arg{
Uid: resolvedAssertion.UID,
UserAssertion: resolvedAssertion.Assertion.String(),
CanSuppressUI: true,
// Use CHAT_GUI to avoid tracker popups and DO externals checks.
IdentifyBehavior: keybase1.TLFIdentifyBehavior_CHAT_GUI,
}
uis := libkb.UIs{
// Send a nil IdentifyUI, this IdentifyBehavior should not use it anyway.
IdentifyUI: nil,
}
eng := engine.NewIdentify2WithUID(g, &id2arg)
m := libkb.NewMetaContext(ctx, g).WithUIs(uis)
err = engine.RunEngine2(m, eng)
if err != nil {
idRes, _ := eng.Result(m)
m.Debug("identify failed (IDres %v, TrackBreaks %v): %v", idRes != nil, idRes != nil && idRes.TrackBreaks != nil, err)
}
return err
} | [
"func",
"verifyResolveResult",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"resolvedAssertion",
"libkb",
".",
"ResolvedAssertion",
")",
"(",
"err",
"error",
")",
"{",
"defer",
"g",
".",
"CTrace",
"(",
"ctx",
",",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"resolvedAssertion",
".",
"UID",
",",
"resolvedAssertion",
".",
"Assertion",
".",
"String",
"(",
")",
")",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
")",
"(",
")",
"\n\n",
"if",
"resolvedAssertion",
".",
"ResolveResult",
".",
"WasKBAssertion",
"(",
")",
"{",
"// The resolver does not use server-trust for these sorts of assertions.",
"// So early out to avoid the performance cost of a full identify.",
"return",
"nil",
"\n",
"}",
"\n\n",
"if",
"resolvedAssertion",
".",
"ResolveResult",
".",
"IsServerTrust",
"(",
")",
"{",
"g",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"resolvedAssertion",
".",
"Assertion",
".",
"String",
"(",
")",
")",
"\n",
"return",
"nil",
"\n",
"}",
"\n\n",
"id2arg",
":=",
"keybase1",
".",
"Identify2Arg",
"{",
"Uid",
":",
"resolvedAssertion",
".",
"UID",
",",
"UserAssertion",
":",
"resolvedAssertion",
".",
"Assertion",
".",
"String",
"(",
")",
",",
"CanSuppressUI",
":",
"true",
",",
"// Use CHAT_GUI to avoid tracker popups and DO externals checks.",
"IdentifyBehavior",
":",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_GUI",
",",
"}",
"\n\n",
"uis",
":=",
"libkb",
".",
"UIs",
"{",
"// Send a nil IdentifyUI, this IdentifyBehavior should not use it anyway.",
"IdentifyUI",
":",
"nil",
",",
"}",
"\n\n",
"eng",
":=",
"engine",
".",
"NewIdentify2WithUID",
"(",
"g",
",",
"&",
"id2arg",
")",
"\n",
"m",
":=",
"libkb",
".",
"NewMetaContext",
"(",
"ctx",
",",
"g",
")",
".",
"WithUIs",
"(",
"uis",
")",
"\n",
"err",
"=",
"engine",
".",
"RunEngine2",
"(",
"m",
",",
"eng",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"idRes",
",",
"_",
":=",
"eng",
".",
"Result",
"(",
"m",
")",
"\n",
"m",
".",
"Debug",
"(",
"\"",
"\"",
",",
"idRes",
"!=",
"nil",
",",
"idRes",
"!=",
"nil",
"&&",
"idRes",
".",
"TrackBreaks",
"!=",
"nil",
",",
"err",
")",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}"
] | // Verify using Identify that a UID matches an assertion. | [
"Verify",
"using",
"Identify",
"that",
"a",
"UID",
"matches",
"an",
"assertion",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/resolve.go#L183-L220 |
159,322 | keybase/client | go/teams/loader2.go | checkStubbed | func (l *TeamLoader) checkStubbed(ctx context.Context, arg load2ArgT, link *ChainLinkUnpacked) error {
if !link.isStubbed() {
return nil
}
if l.seqnosContains(arg.needSeqnos, link.Seqno()) {
return NewStubbedErrorWithNote(link, "Need seqno")
}
if arg.needAdmin || !link.outerLink.LinkType.TeamAllowStubWithAdminFlag(arg.needAdmin) {
return NewStubbedErrorWithNote(link, "Need admin privilege for this action")
}
return nil
} | go | func (l *TeamLoader) checkStubbed(ctx context.Context, arg load2ArgT, link *ChainLinkUnpacked) error {
if !link.isStubbed() {
return nil
}
if l.seqnosContains(arg.needSeqnos, link.Seqno()) {
return NewStubbedErrorWithNote(link, "Need seqno")
}
if arg.needAdmin || !link.outerLink.LinkType.TeamAllowStubWithAdminFlag(arg.needAdmin) {
return NewStubbedErrorWithNote(link, "Need admin privilege for this action")
}
return nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"checkStubbed",
"(",
"ctx",
"context",
".",
"Context",
",",
"arg",
"load2ArgT",
",",
"link",
"*",
"ChainLinkUnpacked",
")",
"error",
"{",
"if",
"!",
"link",
".",
"isStubbed",
"(",
")",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"l",
".",
"seqnosContains",
"(",
"arg",
".",
"needSeqnos",
",",
"link",
".",
"Seqno",
"(",
")",
")",
"{",
"return",
"NewStubbedErrorWithNote",
"(",
"link",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"arg",
".",
"needAdmin",
"||",
"!",
"link",
".",
"outerLink",
".",
"LinkType",
".",
"TeamAllowStubWithAdminFlag",
"(",
"arg",
".",
"needAdmin",
")",
"{",
"return",
"NewStubbedErrorWithNote",
"(",
"link",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // checkStubbed checks if it's OK if a link is stubbed. | [
"checkStubbed",
"checks",
"if",
"it",
"s",
"OK",
"if",
"a",
"link",
"is",
"stubbed",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L105-L116 |
159,323 | keybase/client | go/teams/loader2.go | loadUserAndKeyFromLinkInnerNoVerify | func (l *TeamLoader) loadUserAndKeyFromLinkInnerNoVerify(ctx context.Context,
link *ChainLinkUnpacked) (signerUV keybase1.UserVersion, err error) {
if !ShouldSuppressLogging(ctx) {
defer l.G().CTraceTimed(ctx, fmt.Sprintf("TeamLoader#loadUserAndKeyFromLinkInnerNoVerify(%d)", int(link.inner.Seqno)), func() error { return err })()
}
keySection := link.inner.Body.Key
if keySection == nil {
return signerUV, libkb.NoUIDError{}
}
// Use the UID from the link body and EldestSeqno from the server-trust API response.
if link.source.EldestSeqno == 0 {
// We should never hit this case
return signerUV, fmt.Errorf("missing server hint for team sigchain link signer")
}
return NewUserVersion(keySection.UID, link.source.EldestSeqno), nil
} | go | func (l *TeamLoader) loadUserAndKeyFromLinkInnerNoVerify(ctx context.Context,
link *ChainLinkUnpacked) (signerUV keybase1.UserVersion, err error) {
if !ShouldSuppressLogging(ctx) {
defer l.G().CTraceTimed(ctx, fmt.Sprintf("TeamLoader#loadUserAndKeyFromLinkInnerNoVerify(%d)", int(link.inner.Seqno)), func() error { return err })()
}
keySection := link.inner.Body.Key
if keySection == nil {
return signerUV, libkb.NoUIDError{}
}
// Use the UID from the link body and EldestSeqno from the server-trust API response.
if link.source.EldestSeqno == 0 {
// We should never hit this case
return signerUV, fmt.Errorf("missing server hint for team sigchain link signer")
}
return NewUserVersion(keySection.UID, link.source.EldestSeqno), nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"loadUserAndKeyFromLinkInnerNoVerify",
"(",
"ctx",
"context",
".",
"Context",
",",
"link",
"*",
"ChainLinkUnpacked",
")",
"(",
"signerUV",
"keybase1",
".",
"UserVersion",
",",
"err",
"error",
")",
"{",
"if",
"!",
"ShouldSuppressLogging",
"(",
"ctx",
")",
"{",
"defer",
"l",
".",
"G",
"(",
")",
".",
"CTraceTimed",
"(",
"ctx",
",",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"int",
"(",
"link",
".",
"inner",
".",
"Seqno",
")",
")",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
")",
"(",
")",
"\n",
"}",
"\n",
"keySection",
":=",
"link",
".",
"inner",
".",
"Body",
".",
"Key",
"\n",
"if",
"keySection",
"==",
"nil",
"{",
"return",
"signerUV",
",",
"libkb",
".",
"NoUIDError",
"{",
"}",
"\n",
"}",
"\n",
"// Use the UID from the link body and EldestSeqno from the server-trust API response.",
"if",
"link",
".",
"source",
".",
"EldestSeqno",
"==",
"0",
"{",
"// We should never hit this case",
"return",
"signerUV",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"NewUserVersion",
"(",
"keySection",
".",
"UID",
",",
"link",
".",
"source",
".",
"EldestSeqno",
")",
",",
"nil",
"\n",
"}"
] | // Get the UV from a link but using server-trust and without verifying anything. | [
"Get",
"the",
"UV",
"from",
"a",
"link",
"but",
"using",
"server",
"-",
"trust",
"and",
"without",
"verifying",
"anything",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L138-L153 |
159,324 | keybase/client | go/teams/loader2.go | verifyExplicitPermission | func (l *TeamLoader) verifyExplicitPermission(ctx context.Context, state *keybase1.TeamData,
link *ChainLinkUnpacked, uv keybase1.UserVersion, atOrAbove keybase1.TeamRole) error {
return (TeamSigChainState{state.Chain}).AssertWasRoleOrAboveAt(uv, atOrAbove, link.SigChainLocation().Sub1())
} | go | func (l *TeamLoader) verifyExplicitPermission(ctx context.Context, state *keybase1.TeamData,
link *ChainLinkUnpacked, uv keybase1.UserVersion, atOrAbove keybase1.TeamRole) error {
return (TeamSigChainState{state.Chain}).AssertWasRoleOrAboveAt(uv, atOrAbove, link.SigChainLocation().Sub1())
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"verifyExplicitPermission",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"link",
"*",
"ChainLinkUnpacked",
",",
"uv",
"keybase1",
".",
"UserVersion",
",",
"atOrAbove",
"keybase1",
".",
"TeamRole",
")",
"error",
"{",
"return",
"(",
"TeamSigChainState",
"{",
"state",
".",
"Chain",
"}",
")",
".",
"AssertWasRoleOrAboveAt",
"(",
"uv",
",",
"atOrAbove",
",",
"link",
".",
"SigChainLocation",
"(",
")",
".",
"Sub1",
"(",
")",
")",
"\n",
"}"
] | // Verify that the user had the explicit on-chain role just before this `link`. | [
"Verify",
"that",
"the",
"user",
"had",
"the",
"explicit",
"on",
"-",
"chain",
"role",
"just",
"before",
"this",
"link",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L308-L311 |
159,325 | keybase/client | go/teams/loader2.go | walkUpToAdmin | func (l *TeamLoader) walkUpToAdmin(
ctx context.Context, team *keybase1.TeamData, me keybase1.UserVersion, readSubteamID keybase1.TeamID,
uv keybase1.UserVersion, admin SCTeamAdmin, parentsCache parentChainCache) (*TeamSigChainState, error) {
target, err := admin.TeamID.ToTeamID()
if err != nil {
return nil, err
}
if t, ok := parentsCache[target]; ok {
return &TeamSigChainState{inner: t.Chain}, nil
}
for team != nil && !team.Chain.Id.Eq(target) {
parent := team.Chain.ParentID
if parent == nil {
return nil, NewAdminNotFoundError(admin)
}
if t, ok := parentsCache[*parent]; ok {
team = t
continue
}
arg := load2ArgT{
teamID: *parent,
reason: "walkUpToAdmin",
me: me,
// Get the latest so that the linkmap is up to date for the proof order checker.
// But do it only once (hence the `parentsCache`) per team.
forceRepoll: true,
readSubteamID: &readSubteamID,
}
if target.Eq(*parent) {
arg.needSeqnos = []keybase1.Seqno{admin.Seqno}
}
load2Res, err := l.load2(ctx, arg)
if err != nil {
return nil, err
}
team = &load2Res.team
parentsCache[*parent] = team
}
if team == nil {
return nil, fmt.Errorf("teamloader fault: nil team after admin walk")
}
return &TeamSigChainState{inner: team.Chain}, nil
} | go | func (l *TeamLoader) walkUpToAdmin(
ctx context.Context, team *keybase1.TeamData, me keybase1.UserVersion, readSubteamID keybase1.TeamID,
uv keybase1.UserVersion, admin SCTeamAdmin, parentsCache parentChainCache) (*TeamSigChainState, error) {
target, err := admin.TeamID.ToTeamID()
if err != nil {
return nil, err
}
if t, ok := parentsCache[target]; ok {
return &TeamSigChainState{inner: t.Chain}, nil
}
for team != nil && !team.Chain.Id.Eq(target) {
parent := team.Chain.ParentID
if parent == nil {
return nil, NewAdminNotFoundError(admin)
}
if t, ok := parentsCache[*parent]; ok {
team = t
continue
}
arg := load2ArgT{
teamID: *parent,
reason: "walkUpToAdmin",
me: me,
// Get the latest so that the linkmap is up to date for the proof order checker.
// But do it only once (hence the `parentsCache`) per team.
forceRepoll: true,
readSubteamID: &readSubteamID,
}
if target.Eq(*parent) {
arg.needSeqnos = []keybase1.Seqno{admin.Seqno}
}
load2Res, err := l.load2(ctx, arg)
if err != nil {
return nil, err
}
team = &load2Res.team
parentsCache[*parent] = team
}
if team == nil {
return nil, fmt.Errorf("teamloader fault: nil team after admin walk")
}
return &TeamSigChainState{inner: team.Chain}, nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"walkUpToAdmin",
"(",
"ctx",
"context",
".",
"Context",
",",
"team",
"*",
"keybase1",
".",
"TeamData",
",",
"me",
"keybase1",
".",
"UserVersion",
",",
"readSubteamID",
"keybase1",
".",
"TeamID",
",",
"uv",
"keybase1",
".",
"UserVersion",
",",
"admin",
"SCTeamAdmin",
",",
"parentsCache",
"parentChainCache",
")",
"(",
"*",
"TeamSigChainState",
",",
"error",
")",
"{",
"target",
",",
"err",
":=",
"admin",
".",
"TeamID",
".",
"ToTeamID",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"t",
",",
"ok",
":=",
"parentsCache",
"[",
"target",
"]",
";",
"ok",
"{",
"return",
"&",
"TeamSigChainState",
"{",
"inner",
":",
"t",
".",
"Chain",
"}",
",",
"nil",
"\n",
"}",
"\n\n",
"for",
"team",
"!=",
"nil",
"&&",
"!",
"team",
".",
"Chain",
".",
"Id",
".",
"Eq",
"(",
"target",
")",
"{",
"parent",
":=",
"team",
".",
"Chain",
".",
"ParentID",
"\n",
"if",
"parent",
"==",
"nil",
"{",
"return",
"nil",
",",
"NewAdminNotFoundError",
"(",
"admin",
")",
"\n",
"}",
"\n",
"if",
"t",
",",
"ok",
":=",
"parentsCache",
"[",
"*",
"parent",
"]",
";",
"ok",
"{",
"team",
"=",
"t",
"\n",
"continue",
"\n",
"}",
"\n",
"arg",
":=",
"load2ArgT",
"{",
"teamID",
":",
"*",
"parent",
",",
"reason",
":",
"\"",
"\"",
",",
"me",
":",
"me",
",",
"// Get the latest so that the linkmap is up to date for the proof order checker.",
"// But do it only once (hence the `parentsCache`) per team.",
"forceRepoll",
":",
"true",
",",
"readSubteamID",
":",
"&",
"readSubteamID",
",",
"}",
"\n",
"if",
"target",
".",
"Eq",
"(",
"*",
"parent",
")",
"{",
"arg",
".",
"needSeqnos",
"=",
"[",
"]",
"keybase1",
".",
"Seqno",
"{",
"admin",
".",
"Seqno",
"}",
"\n",
"}",
"\n",
"load2Res",
",",
"err",
":=",
"l",
".",
"load2",
"(",
"ctx",
",",
"arg",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"team",
"=",
"&",
"load2Res",
".",
"team",
"\n",
"parentsCache",
"[",
"*",
"parent",
"]",
"=",
"team",
"\n",
"}",
"\n",
"if",
"team",
"==",
"nil",
"{",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"&",
"TeamSigChainState",
"{",
"inner",
":",
"team",
".",
"Chain",
"}",
",",
"nil",
"\n",
"}"
] | // Does not return a full TeamData because it might get a subteam-reader version. | [
"Does",
"not",
"return",
"a",
"full",
"TeamData",
"because",
"it",
"might",
"get",
"a",
"subteam",
"-",
"reader",
"version",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L316-L361 |
159,326 | keybase/client | go/teams/loader2.go | verifyAdminPermissions | func (l *TeamLoader) verifyAdminPermissions(ctx context.Context,
state *keybase1.TeamData, me keybase1.UserVersion, link *ChainLinkUnpacked, readSubteamID keybase1.TeamID,
uv keybase1.UserVersion, proofSet *proofSetT, parentsCache parentChainCache) (SignerX, error) {
signer := SignerX{signer: uv}
explicitAdmin := link.inner.TeamAdmin()
teamChain := TeamSigChainState{inner: state.Chain}
// In the simple case, we don't ask for explicit adminship, so we have to be admins of
// the current chain at or before the signature in question.
if explicitAdmin == nil {
err := teamChain.AssertWasAdminAt(uv, link.SigChainLocation().Sub1())
return signer, err
}
// The more complicated case is that there's an explicit admin permission given, perhaps
// of a parent team.
adminTeam, err := l.walkUpToAdmin(ctx, state, me, readSubteamID, uv, *explicitAdmin, parentsCache)
if err != nil {
return signer, err
}
adminBookends, err := adminTeam.assertBecameAdminAt(uv, explicitAdmin.SigChainLocation())
if err != nil {
return signer, err
}
// This was an implicit admin action if the team from which admin-power was derived (adminTeam)
// is not the link's team (state).
if !adminTeam.GetID().Eq(teamChain.GetID()) {
signer.implicitAdmin = true
}
l.addProofsForAdminPermission(ctx, state.Chain.Id, link, adminBookends, proofSet)
return signer, nil
} | go | func (l *TeamLoader) verifyAdminPermissions(ctx context.Context,
state *keybase1.TeamData, me keybase1.UserVersion, link *ChainLinkUnpacked, readSubteamID keybase1.TeamID,
uv keybase1.UserVersion, proofSet *proofSetT, parentsCache parentChainCache) (SignerX, error) {
signer := SignerX{signer: uv}
explicitAdmin := link.inner.TeamAdmin()
teamChain := TeamSigChainState{inner: state.Chain}
// In the simple case, we don't ask for explicit adminship, so we have to be admins of
// the current chain at or before the signature in question.
if explicitAdmin == nil {
err := teamChain.AssertWasAdminAt(uv, link.SigChainLocation().Sub1())
return signer, err
}
// The more complicated case is that there's an explicit admin permission given, perhaps
// of a parent team.
adminTeam, err := l.walkUpToAdmin(ctx, state, me, readSubteamID, uv, *explicitAdmin, parentsCache)
if err != nil {
return signer, err
}
adminBookends, err := adminTeam.assertBecameAdminAt(uv, explicitAdmin.SigChainLocation())
if err != nil {
return signer, err
}
// This was an implicit admin action if the team from which admin-power was derived (adminTeam)
// is not the link's team (state).
if !adminTeam.GetID().Eq(teamChain.GetID()) {
signer.implicitAdmin = true
}
l.addProofsForAdminPermission(ctx, state.Chain.Id, link, adminBookends, proofSet)
return signer, nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"verifyAdminPermissions",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"me",
"keybase1",
".",
"UserVersion",
",",
"link",
"*",
"ChainLinkUnpacked",
",",
"readSubteamID",
"keybase1",
".",
"TeamID",
",",
"uv",
"keybase1",
".",
"UserVersion",
",",
"proofSet",
"*",
"proofSetT",
",",
"parentsCache",
"parentChainCache",
")",
"(",
"SignerX",
",",
"error",
")",
"{",
"signer",
":=",
"SignerX",
"{",
"signer",
":",
"uv",
"}",
"\n",
"explicitAdmin",
":=",
"link",
".",
"inner",
".",
"TeamAdmin",
"(",
")",
"\n",
"teamChain",
":=",
"TeamSigChainState",
"{",
"inner",
":",
"state",
".",
"Chain",
"}",
"\n\n",
"// In the simple case, we don't ask for explicit adminship, so we have to be admins of",
"// the current chain at or before the signature in question.",
"if",
"explicitAdmin",
"==",
"nil",
"{",
"err",
":=",
"teamChain",
".",
"AssertWasAdminAt",
"(",
"uv",
",",
"link",
".",
"SigChainLocation",
"(",
")",
".",
"Sub1",
"(",
")",
")",
"\n",
"return",
"signer",
",",
"err",
"\n",
"}",
"\n\n",
"// The more complicated case is that there's an explicit admin permission given, perhaps",
"// of a parent team.",
"adminTeam",
",",
"err",
":=",
"l",
".",
"walkUpToAdmin",
"(",
"ctx",
",",
"state",
",",
"me",
",",
"readSubteamID",
",",
"uv",
",",
"*",
"explicitAdmin",
",",
"parentsCache",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"signer",
",",
"err",
"\n",
"}",
"\n",
"adminBookends",
",",
"err",
":=",
"adminTeam",
".",
"assertBecameAdminAt",
"(",
"uv",
",",
"explicitAdmin",
".",
"SigChainLocation",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"signer",
",",
"err",
"\n",
"}",
"\n\n",
"// This was an implicit admin action if the team from which admin-power was derived (adminTeam)",
"// is not the link's team (state).",
"if",
"!",
"adminTeam",
".",
"GetID",
"(",
")",
".",
"Eq",
"(",
"teamChain",
".",
"GetID",
"(",
")",
")",
"{",
"signer",
".",
"implicitAdmin",
"=",
"true",
"\n",
"}",
"\n\n",
"l",
".",
"addProofsForAdminPermission",
"(",
"ctx",
",",
"state",
".",
"Chain",
".",
"Id",
",",
"link",
",",
"adminBookends",
",",
"proofSet",
")",
"\n",
"return",
"signer",
",",
"nil",
"\n",
"}"
] | // Verify that a user has admin permissions.
// Because this uses the proofSet, if it is called may return success and fail later. | [
"Verify",
"that",
"a",
"user",
"has",
"admin",
"permissions",
".",
"Because",
"this",
"uses",
"the",
"proofSet",
"if",
"it",
"is",
"called",
"may",
"return",
"success",
"and",
"fail",
"later",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L375-L409 |
159,327 | keybase/client | go/teams/loader2.go | applyNewLink | func (l *TeamLoader) applyNewLink(ctx context.Context,
state *keybase1.TeamData, link *ChainLinkUnpacked,
signer *SignerX, me keybase1.UserVersion) (*keybase1.TeamData, error) {
ctx, tbs := l.G().CTimeBuckets(ctx)
defer tbs.Record("TeamLoader.applyNewLink")()
if !ShouldSuppressLogging(ctx) {
l.G().Log.CDebugf(ctx, "TeamLoader applying link seqno:%v", link.Seqno())
}
var chainState *TeamSigChainState
var newState *keybase1.TeamData
if state == nil {
newState = &keybase1.TeamData{
// Name is left blank until calculateName updates it.
// It shall not be blank by the time it is returned from load2.
Name: keybase1.TeamName{},
PerTeamKeySeedsUnverified: make(map[keybase1.PerTeamKeyGeneration]keybase1.PerTeamKeySeedItem),
ReaderKeyMasks: make(map[keybase1.TeamApplication]map[keybase1.PerTeamKeyGeneration]keybase1.MaskB64),
}
} else {
chainState = &TeamSigChainState{inner: state.Chain}
newState = state
state = nil
}
newChainState, err := AppendChainLink(ctx, l.G(), me, chainState, link, signer)
if err != nil {
return nil, err
}
newState.Chain = newChainState.inner
return newState, nil
} | go | func (l *TeamLoader) applyNewLink(ctx context.Context,
state *keybase1.TeamData, link *ChainLinkUnpacked,
signer *SignerX, me keybase1.UserVersion) (*keybase1.TeamData, error) {
ctx, tbs := l.G().CTimeBuckets(ctx)
defer tbs.Record("TeamLoader.applyNewLink")()
if !ShouldSuppressLogging(ctx) {
l.G().Log.CDebugf(ctx, "TeamLoader applying link seqno:%v", link.Seqno())
}
var chainState *TeamSigChainState
var newState *keybase1.TeamData
if state == nil {
newState = &keybase1.TeamData{
// Name is left blank until calculateName updates it.
// It shall not be blank by the time it is returned from load2.
Name: keybase1.TeamName{},
PerTeamKeySeedsUnverified: make(map[keybase1.PerTeamKeyGeneration]keybase1.PerTeamKeySeedItem),
ReaderKeyMasks: make(map[keybase1.TeamApplication]map[keybase1.PerTeamKeyGeneration]keybase1.MaskB64),
}
} else {
chainState = &TeamSigChainState{inner: state.Chain}
newState = state
state = nil
}
newChainState, err := AppendChainLink(ctx, l.G(), me, chainState, link, signer)
if err != nil {
return nil, err
}
newState.Chain = newChainState.inner
return newState, nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"applyNewLink",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"link",
"*",
"ChainLinkUnpacked",
",",
"signer",
"*",
"SignerX",
",",
"me",
"keybase1",
".",
"UserVersion",
")",
"(",
"*",
"keybase1",
".",
"TeamData",
",",
"error",
")",
"{",
"ctx",
",",
"tbs",
":=",
"l",
".",
"G",
"(",
")",
".",
"CTimeBuckets",
"(",
"ctx",
")",
"\n",
"defer",
"tbs",
".",
"Record",
"(",
"\"",
"\"",
")",
"(",
")",
"\n\n",
"if",
"!",
"ShouldSuppressLogging",
"(",
"ctx",
")",
"{",
"l",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"link",
".",
"Seqno",
"(",
")",
")",
"\n",
"}",
"\n\n",
"var",
"chainState",
"*",
"TeamSigChainState",
"\n",
"var",
"newState",
"*",
"keybase1",
".",
"TeamData",
"\n",
"if",
"state",
"==",
"nil",
"{",
"newState",
"=",
"&",
"keybase1",
".",
"TeamData",
"{",
"// Name is left blank until calculateName updates it.",
"// It shall not be blank by the time it is returned from load2.",
"Name",
":",
"keybase1",
".",
"TeamName",
"{",
"}",
",",
"PerTeamKeySeedsUnverified",
":",
"make",
"(",
"map",
"[",
"keybase1",
".",
"PerTeamKeyGeneration",
"]",
"keybase1",
".",
"PerTeamKeySeedItem",
")",
",",
"ReaderKeyMasks",
":",
"make",
"(",
"map",
"[",
"keybase1",
".",
"TeamApplication",
"]",
"map",
"[",
"keybase1",
".",
"PerTeamKeyGeneration",
"]",
"keybase1",
".",
"MaskB64",
")",
",",
"}",
"\n",
"}",
"else",
"{",
"chainState",
"=",
"&",
"TeamSigChainState",
"{",
"inner",
":",
"state",
".",
"Chain",
"}",
"\n",
"newState",
"=",
"state",
"\n",
"state",
"=",
"nil",
"\n",
"}",
"\n\n",
"newChainState",
",",
"err",
":=",
"AppendChainLink",
"(",
"ctx",
",",
"l",
".",
"G",
"(",
")",
",",
"me",
",",
"chainState",
",",
"link",
",",
"signer",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"newState",
".",
"Chain",
"=",
"newChainState",
".",
"inner",
"\n\n",
"return",
"newState",
",",
"nil",
"\n",
"}"
] | // Apply a new link to the sigchain state.
// `state` is moved into this function. There must exist no live references into it from now on.
// `signer` may be nil iff link is stubbed. | [
"Apply",
"a",
"new",
"link",
"to",
"the",
"sigchain",
"state",
".",
"state",
"is",
"moved",
"into",
"this",
"function",
".",
"There",
"must",
"exist",
"no",
"live",
"references",
"into",
"it",
"from",
"now",
"on",
".",
"signer",
"may",
"be",
"nil",
"iff",
"link",
"is",
"stubbed",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L476-L509 |
159,328 | keybase/client | go/teams/loader2.go | inflateLink | func (l *TeamLoader) inflateLink(ctx context.Context,
state *keybase1.TeamData, link *ChainLinkUnpacked,
signer SignerX, me keybase1.UserVersion) (
*keybase1.TeamData, error) {
l.G().Log.CDebugf(ctx, "TeamLoader inflating link seqno:%v", link.Seqno())
if state == nil {
// The only reason state would be nil is if this is link 1.
// But link 1 can't be stubbed.
return nil, NewInflateErrorWithNote(link, "no prior state")
}
newState := state.DeepCopy() // Clone the state and chain so that our parameters don't get consumed.
newChainState, err := InflateLink(ctx, l.G(), me, TeamSigChainState{inner: newState.Chain}, link, signer)
if err != nil {
return nil, err
}
newState.Chain = newChainState.inner
return &newState, nil
} | go | func (l *TeamLoader) inflateLink(ctx context.Context,
state *keybase1.TeamData, link *ChainLinkUnpacked,
signer SignerX, me keybase1.UserVersion) (
*keybase1.TeamData, error) {
l.G().Log.CDebugf(ctx, "TeamLoader inflating link seqno:%v", link.Seqno())
if state == nil {
// The only reason state would be nil is if this is link 1.
// But link 1 can't be stubbed.
return nil, NewInflateErrorWithNote(link, "no prior state")
}
newState := state.DeepCopy() // Clone the state and chain so that our parameters don't get consumed.
newChainState, err := InflateLink(ctx, l.G(), me, TeamSigChainState{inner: newState.Chain}, link, signer)
if err != nil {
return nil, err
}
newState.Chain = newChainState.inner
return &newState, nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"inflateLink",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"link",
"*",
"ChainLinkUnpacked",
",",
"signer",
"SignerX",
",",
"me",
"keybase1",
".",
"UserVersion",
")",
"(",
"*",
"keybase1",
".",
"TeamData",
",",
"error",
")",
"{",
"l",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"link",
".",
"Seqno",
"(",
")",
")",
"\n\n",
"if",
"state",
"==",
"nil",
"{",
"// The only reason state would be nil is if this is link 1.",
"// But link 1 can't be stubbed.",
"return",
"nil",
",",
"NewInflateErrorWithNote",
"(",
"link",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"newState",
":=",
"state",
".",
"DeepCopy",
"(",
")",
"// Clone the state and chain so that our parameters don't get consumed.",
"\n",
"newChainState",
",",
"err",
":=",
"InflateLink",
"(",
"ctx",
",",
"l",
".",
"G",
"(",
")",
",",
"me",
",",
"TeamSigChainState",
"{",
"inner",
":",
"newState",
".",
"Chain",
"}",
",",
"link",
",",
"signer",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"newState",
".",
"Chain",
"=",
"newChainState",
".",
"inner",
"\n\n",
"return",
"&",
"newState",
",",
"nil",
"\n",
"}"
] | // Inflate a link that was stubbed with its non-stubbed data. | [
"Inflate",
"a",
"link",
"that",
"was",
"stubbed",
"with",
"its",
"non",
"-",
"stubbed",
"data",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L512-L533 |
159,329 | keybase/client | go/teams/loader2.go | checkParentChildOperations | func (l *TeamLoader) checkParentChildOperations(ctx context.Context,
me keybase1.UserVersion, loadingTeamID keybase1.TeamID, parentID *keybase1.TeamID, readSubteamID keybase1.TeamID,
parentChildOperations []*parentChildOperation, proofSet *proofSetT) error {
if len(parentChildOperations) == 0 {
return nil
}
if parentID == nil {
return fmt.Errorf("cannot check parent-child operations with no parent")
}
var needParentSeqnos []keybase1.Seqno
for _, pco := range parentChildOperations {
needParentSeqnos = append(needParentSeqnos, pco.parentSeqno)
}
parent, err := l.load2(ctx, load2ArgT{
teamID: *parentID,
reason: "checkParentChildOperations-parent",
needAdmin: false,
needKeyGeneration: 0,
needApplicationsAtGenerations: nil,
needApplicationsAtGenerationsWithKBFS: nil,
wantMembers: nil,
wantMembersRole: keybase1.TeamRole_NONE,
forceFullReload: false,
forceRepoll: false,
staleOK: true, // stale is fine, as long as get those seqnos.
needSeqnos: needParentSeqnos,
readSubteamID: &readSubteamID,
me: me,
})
if err != nil {
return fmt.Errorf("error loading parent: %v", err)
}
parentChain := TeamSigChainState{inner: parent.team.Chain}
for _, pco := range parentChildOperations {
err = l.checkOneParentChildOperation(ctx, pco, loadingTeamID, &parentChain)
if err != nil {
return err
}
}
// Give a more up-to-date linkmap to the ordering checker for the parent.
// Without this it could fail if the parent is new.
// Because the team linkmap in the proof objects is stale.
proofSet.SetTeamLinkMap(ctx, parentChain.inner.Id, parentChain.inner.LinkIDs)
return nil
} | go | func (l *TeamLoader) checkParentChildOperations(ctx context.Context,
me keybase1.UserVersion, loadingTeamID keybase1.TeamID, parentID *keybase1.TeamID, readSubteamID keybase1.TeamID,
parentChildOperations []*parentChildOperation, proofSet *proofSetT) error {
if len(parentChildOperations) == 0 {
return nil
}
if parentID == nil {
return fmt.Errorf("cannot check parent-child operations with no parent")
}
var needParentSeqnos []keybase1.Seqno
for _, pco := range parentChildOperations {
needParentSeqnos = append(needParentSeqnos, pco.parentSeqno)
}
parent, err := l.load2(ctx, load2ArgT{
teamID: *parentID,
reason: "checkParentChildOperations-parent",
needAdmin: false,
needKeyGeneration: 0,
needApplicationsAtGenerations: nil,
needApplicationsAtGenerationsWithKBFS: nil,
wantMembers: nil,
wantMembersRole: keybase1.TeamRole_NONE,
forceFullReload: false,
forceRepoll: false,
staleOK: true, // stale is fine, as long as get those seqnos.
needSeqnos: needParentSeqnos,
readSubteamID: &readSubteamID,
me: me,
})
if err != nil {
return fmt.Errorf("error loading parent: %v", err)
}
parentChain := TeamSigChainState{inner: parent.team.Chain}
for _, pco := range parentChildOperations {
err = l.checkOneParentChildOperation(ctx, pco, loadingTeamID, &parentChain)
if err != nil {
return err
}
}
// Give a more up-to-date linkmap to the ordering checker for the parent.
// Without this it could fail if the parent is new.
// Because the team linkmap in the proof objects is stale.
proofSet.SetTeamLinkMap(ctx, parentChain.inner.Id, parentChain.inner.LinkIDs)
return nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"checkParentChildOperations",
"(",
"ctx",
"context",
".",
"Context",
",",
"me",
"keybase1",
".",
"UserVersion",
",",
"loadingTeamID",
"keybase1",
".",
"TeamID",
",",
"parentID",
"*",
"keybase1",
".",
"TeamID",
",",
"readSubteamID",
"keybase1",
".",
"TeamID",
",",
"parentChildOperations",
"[",
"]",
"*",
"parentChildOperation",
",",
"proofSet",
"*",
"proofSetT",
")",
"error",
"{",
"if",
"len",
"(",
"parentChildOperations",
")",
"==",
"0",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"parentID",
"==",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"var",
"needParentSeqnos",
"[",
"]",
"keybase1",
".",
"Seqno",
"\n",
"for",
"_",
",",
"pco",
":=",
"range",
"parentChildOperations",
"{",
"needParentSeqnos",
"=",
"append",
"(",
"needParentSeqnos",
",",
"pco",
".",
"parentSeqno",
")",
"\n",
"}",
"\n\n",
"parent",
",",
"err",
":=",
"l",
".",
"load2",
"(",
"ctx",
",",
"load2ArgT",
"{",
"teamID",
":",
"*",
"parentID",
",",
"reason",
":",
"\"",
"\"",
",",
"needAdmin",
":",
"false",
",",
"needKeyGeneration",
":",
"0",
",",
"needApplicationsAtGenerations",
":",
"nil",
",",
"needApplicationsAtGenerationsWithKBFS",
":",
"nil",
",",
"wantMembers",
":",
"nil",
",",
"wantMembersRole",
":",
"keybase1",
".",
"TeamRole_NONE",
",",
"forceFullReload",
":",
"false",
",",
"forceRepoll",
":",
"false",
",",
"staleOK",
":",
"true",
",",
"// stale is fine, as long as get those seqnos.",
"needSeqnos",
":",
"needParentSeqnos",
",",
"readSubteamID",
":",
"&",
"readSubteamID",
",",
"me",
":",
"me",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"parentChain",
":=",
"TeamSigChainState",
"{",
"inner",
":",
"parent",
".",
"team",
".",
"Chain",
"}",
"\n\n",
"for",
"_",
",",
"pco",
":=",
"range",
"parentChildOperations",
"{",
"err",
"=",
"l",
".",
"checkOneParentChildOperation",
"(",
"ctx",
",",
"pco",
",",
"loadingTeamID",
",",
"&",
"parentChain",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Give a more up-to-date linkmap to the ordering checker for the parent.",
"// Without this it could fail if the parent is new.",
"// Because the team linkmap in the proof objects is stale.",
"proofSet",
".",
"SetTeamLinkMap",
"(",
"ctx",
",",
"parentChain",
".",
"inner",
".",
"Id",
",",
"parentChain",
".",
"inner",
".",
"LinkIDs",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // Check that the parent-child operations appear in the parent sigchains. | [
"Check",
"that",
"the",
"parent",
"-",
"child",
"operations",
"appear",
"in",
"the",
"parent",
"sigchains",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L536-L591 |
159,330 | keybase/client | go/teams/loader2.go | checkProofs | func (l *TeamLoader) checkProofs(ctx context.Context,
state *keybase1.TeamData, proofSet *proofSetT) error {
if state == nil {
return fmt.Errorf("teamloader fault: nil team for proof ordering check")
}
// Give the most up-to-date linkmap to the ordering checker.
// Without this it would fail in some cases when the team is on the left.
// Because the team linkmap in the proof objects is stale.
proofSet.SetTeamLinkMap(ctx, state.Chain.Id, state.Chain.LinkIDs)
if !proofSet.checkRequired() {
return nil
}
return proofSet.check(ctx, l.world, teamEnv.ProofSetParallel)
} | go | func (l *TeamLoader) checkProofs(ctx context.Context,
state *keybase1.TeamData, proofSet *proofSetT) error {
if state == nil {
return fmt.Errorf("teamloader fault: nil team for proof ordering check")
}
// Give the most up-to-date linkmap to the ordering checker.
// Without this it would fail in some cases when the team is on the left.
// Because the team linkmap in the proof objects is stale.
proofSet.SetTeamLinkMap(ctx, state.Chain.Id, state.Chain.LinkIDs)
if !proofSet.checkRequired() {
return nil
}
return proofSet.check(ctx, l.world, teamEnv.ProofSetParallel)
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"checkProofs",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"proofSet",
"*",
"proofSetT",
")",
"error",
"{",
"if",
"state",
"==",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"// Give the most up-to-date linkmap to the ordering checker.",
"// Without this it would fail in some cases when the team is on the left.",
"// Because the team linkmap in the proof objects is stale.",
"proofSet",
".",
"SetTeamLinkMap",
"(",
"ctx",
",",
"state",
".",
"Chain",
".",
"Id",
",",
"state",
".",
"Chain",
".",
"LinkIDs",
")",
"\n",
"if",
"!",
"proofSet",
".",
"checkRequired",
"(",
")",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"return",
"proofSet",
".",
"check",
"(",
"ctx",
",",
"l",
".",
"world",
",",
"teamEnv",
".",
"ProofSetParallel",
")",
"\n",
"}"
] | // Check all the proofs and ordering constraints in proofSet | [
"Check",
"all",
"the",
"proofs",
"and",
"ordering",
"constraints",
"in",
"proofSet"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L606-L620 |
159,331 | keybase/client | go/teams/loader2.go | addKBFSCryptKeys | func (l *TeamLoader) addKBFSCryptKeys(ctx context.Context, state *keybase1.TeamData,
upgrades []keybase1.TeamGetLegacyTLFUpgrade) error {
m := make(map[keybase1.TeamApplication][]keybase1.CryptKey)
for _, upgrade := range upgrades {
key, err := ApplicationKeyAtGeneration(libkb.NewMetaContext(ctx, l.G()), state, upgrade.AppType,
keybase1.PerTeamKeyGeneration(upgrade.TeamGeneration))
if err != nil {
return err
}
chainInfo, ok := state.Chain.TlfLegacyUpgrade[upgrade.AppType]
if !ok {
return errors.New("legacy tlf upgrade payload present without chain link")
}
if chainInfo.TeamGeneration != upgrade.TeamGeneration {
return fmt.Errorf("legacy tlf upgrade team generation mismatch: %d != %d",
chainInfo.TeamGeneration, upgrade.TeamGeneration)
}
cryptKeys, err := l.unboxKBFSCryptKeys(ctx, key, chainInfo.KeysetHash, upgrade.EncryptedKeyset)
if err != nil {
return err
}
if chainInfo.LegacyGeneration != cryptKeys[len(cryptKeys)-1].KeyGeneration {
return fmt.Errorf("legacy tlf upgrade legacy generation mismatch: %d != %d",
chainInfo.LegacyGeneration, cryptKeys[len(cryptKeys)-1].KeyGeneration)
}
m[upgrade.AppType] = cryptKeys
}
state.TlfCryptKeys = m
return nil
} | go | func (l *TeamLoader) addKBFSCryptKeys(ctx context.Context, state *keybase1.TeamData,
upgrades []keybase1.TeamGetLegacyTLFUpgrade) error {
m := make(map[keybase1.TeamApplication][]keybase1.CryptKey)
for _, upgrade := range upgrades {
key, err := ApplicationKeyAtGeneration(libkb.NewMetaContext(ctx, l.G()), state, upgrade.AppType,
keybase1.PerTeamKeyGeneration(upgrade.TeamGeneration))
if err != nil {
return err
}
chainInfo, ok := state.Chain.TlfLegacyUpgrade[upgrade.AppType]
if !ok {
return errors.New("legacy tlf upgrade payload present without chain link")
}
if chainInfo.TeamGeneration != upgrade.TeamGeneration {
return fmt.Errorf("legacy tlf upgrade team generation mismatch: %d != %d",
chainInfo.TeamGeneration, upgrade.TeamGeneration)
}
cryptKeys, err := l.unboxKBFSCryptKeys(ctx, key, chainInfo.KeysetHash, upgrade.EncryptedKeyset)
if err != nil {
return err
}
if chainInfo.LegacyGeneration != cryptKeys[len(cryptKeys)-1].KeyGeneration {
return fmt.Errorf("legacy tlf upgrade legacy generation mismatch: %d != %d",
chainInfo.LegacyGeneration, cryptKeys[len(cryptKeys)-1].KeyGeneration)
}
m[upgrade.AppType] = cryptKeys
}
state.TlfCryptKeys = m
return nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"addKBFSCryptKeys",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"upgrades",
"[",
"]",
"keybase1",
".",
"TeamGetLegacyTLFUpgrade",
")",
"error",
"{",
"m",
":=",
"make",
"(",
"map",
"[",
"keybase1",
".",
"TeamApplication",
"]",
"[",
"]",
"keybase1",
".",
"CryptKey",
")",
"\n",
"for",
"_",
",",
"upgrade",
":=",
"range",
"upgrades",
"{",
"key",
",",
"err",
":=",
"ApplicationKeyAtGeneration",
"(",
"libkb",
".",
"NewMetaContext",
"(",
"ctx",
",",
"l",
".",
"G",
"(",
")",
")",
",",
"state",
",",
"upgrade",
".",
"AppType",
",",
"keybase1",
".",
"PerTeamKeyGeneration",
"(",
"upgrade",
".",
"TeamGeneration",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"chainInfo",
",",
"ok",
":=",
"state",
".",
"Chain",
".",
"TlfLegacyUpgrade",
"[",
"upgrade",
".",
"AppType",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"chainInfo",
".",
"TeamGeneration",
"!=",
"upgrade",
".",
"TeamGeneration",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"chainInfo",
".",
"TeamGeneration",
",",
"upgrade",
".",
"TeamGeneration",
")",
"\n",
"}",
"\n\n",
"cryptKeys",
",",
"err",
":=",
"l",
".",
"unboxKBFSCryptKeys",
"(",
"ctx",
",",
"key",
",",
"chainInfo",
".",
"KeysetHash",
",",
"upgrade",
".",
"EncryptedKeyset",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"chainInfo",
".",
"LegacyGeneration",
"!=",
"cryptKeys",
"[",
"len",
"(",
"cryptKeys",
")",
"-",
"1",
"]",
".",
"KeyGeneration",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"chainInfo",
".",
"LegacyGeneration",
",",
"cryptKeys",
"[",
"len",
"(",
"cryptKeys",
")",
"-",
"1",
"]",
".",
"KeyGeneration",
")",
"\n",
"}",
"\n\n",
"m",
"[",
"upgrade",
".",
"AppType",
"]",
"=",
"cryptKeys",
"\n",
"}",
"\n",
"state",
".",
"TlfCryptKeys",
"=",
"m",
"\n",
"return",
"nil",
"\n",
"}"
] | // AddKBFSCryptKeys mutates `state` | [
"AddKBFSCryptKeys",
"mutates",
"state"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L666-L698 |
159,332 | keybase/client | go/teams/loader2.go | checkReaderKeyMaskCoverage | func (l *TeamLoader) checkReaderKeyMaskCoverage(ctx context.Context,
state *keybase1.TeamData, gen keybase1.PerTeamKeyGeneration) error {
for _, app := range keybase1.TeamApplicationMap {
if app == keybase1.TeamApplication_STELLAR_RELAY {
// TODO CORE-7718 Allow clients to be missing these RKMs for now.
// Will need a team cache bust to repair.
continue
}
if _, ok := state.ReaderKeyMasks[app]; !ok {
return fmt.Errorf("missing reader key mask for gen:%v app:%v", gen, app)
}
if _, ok := state.ReaderKeyMasks[app][gen]; !ok {
return fmt.Errorf("missing reader key mask for gen:%v app:%v", gen, app)
}
}
return nil
} | go | func (l *TeamLoader) checkReaderKeyMaskCoverage(ctx context.Context,
state *keybase1.TeamData, gen keybase1.PerTeamKeyGeneration) error {
for _, app := range keybase1.TeamApplicationMap {
if app == keybase1.TeamApplication_STELLAR_RELAY {
// TODO CORE-7718 Allow clients to be missing these RKMs for now.
// Will need a team cache bust to repair.
continue
}
if _, ok := state.ReaderKeyMasks[app]; !ok {
return fmt.Errorf("missing reader key mask for gen:%v app:%v", gen, app)
}
if _, ok := state.ReaderKeyMasks[app][gen]; !ok {
return fmt.Errorf("missing reader key mask for gen:%v app:%v", gen, app)
}
}
return nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"checkReaderKeyMaskCoverage",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"gen",
"keybase1",
".",
"PerTeamKeyGeneration",
")",
"error",
"{",
"for",
"_",
",",
"app",
":=",
"range",
"keybase1",
".",
"TeamApplicationMap",
"{",
"if",
"app",
"==",
"keybase1",
".",
"TeamApplication_STELLAR_RELAY",
"{",
"// TODO CORE-7718 Allow clients to be missing these RKMs for now.",
"// Will need a team cache bust to repair.",
"continue",
"\n",
"}",
"\n",
"if",
"_",
",",
"ok",
":=",
"state",
".",
"ReaderKeyMasks",
"[",
"app",
"]",
";",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"gen",
",",
"app",
")",
"\n",
"}",
"\n",
"if",
"_",
",",
"ok",
":=",
"state",
".",
"ReaderKeyMasks",
"[",
"app",
"]",
"[",
"gen",
"]",
";",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"gen",
",",
"app",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // Check that the RKMs for a generation are covered for all apps. | [
"Check",
"that",
"the",
"RKMs",
"for",
"a",
"generation",
"are",
"covered",
"for",
"all",
"apps",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L815-L833 |
159,333 | keybase/client | go/teams/loader2.go | checkNeededSeqnos | func (l *TeamLoader) checkNeededSeqnos(ctx context.Context,
state *keybase1.TeamData, needSeqnos []keybase1.Seqno) error {
if len(needSeqnos) == 0 {
return nil
}
if state == nil {
return fmt.Errorf("nil team does not contain needed seqnos")
}
for _, seqno := range needSeqnos {
if (TeamSigChainState{inner: state.Chain}).HasStubbedSeqno(seqno) {
return fmt.Errorf("needed seqno is stubbed: %v", seqno)
}
}
return nil
} | go | func (l *TeamLoader) checkNeededSeqnos(ctx context.Context,
state *keybase1.TeamData, needSeqnos []keybase1.Seqno) error {
if len(needSeqnos) == 0 {
return nil
}
if state == nil {
return fmt.Errorf("nil team does not contain needed seqnos")
}
for _, seqno := range needSeqnos {
if (TeamSigChainState{inner: state.Chain}).HasStubbedSeqno(seqno) {
return fmt.Errorf("needed seqno is stubbed: %v", seqno)
}
}
return nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"checkNeededSeqnos",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"needSeqnos",
"[",
"]",
"keybase1",
".",
"Seqno",
")",
"error",
"{",
"if",
"len",
"(",
"needSeqnos",
")",
"==",
"0",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"state",
"==",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"seqno",
":=",
"range",
"needSeqnos",
"{",
"if",
"(",
"TeamSigChainState",
"{",
"inner",
":",
"state",
".",
"Chain",
"}",
")",
".",
"HasStubbedSeqno",
"(",
"seqno",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"seqno",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Whether the snapshot has fully loaded, non-stubbed, all of the links. | [
"Whether",
"the",
"snapshot",
"has",
"fully",
"loaded",
"non",
"-",
"stubbed",
"all",
"of",
"the",
"links",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L906-L922 |
159,334 | keybase/client | go/teams/loader2.go | calculateName | func (l *TeamLoader) calculateName(ctx context.Context,
state *keybase1.TeamData, me keybase1.UserVersion, readSubteamID keybase1.TeamID, staleOK bool) (newName keybase1.TeamName, err error) {
chain := TeamSigChainState{inner: state.Chain}
if !chain.IsSubteam() {
return chain.inner.RootAncestor, nil
}
// Load the parent. The parent load will recalculate its own name,
// so this name recalculation is recursive.
parent, err := l.load2(ctx, load2ArgT{
teamID: *chain.GetParentID(),
reason: "calculateName",
staleOK: staleOK,
readSubteamID: &readSubteamID,
me: me,
})
if err != nil {
return newName, err
}
// Swap out the parent name as the base of this name.
// Check that the root ancestor name and depth still match the subteam chain.
newName, err = parent.team.Name.Append(string(chain.LatestLastNamePart()))
if err != nil {
return newName, fmt.Errorf("invalid new subteam name: %v", err)
}
if !newName.RootAncestorName().Eq(chain.inner.RootAncestor) {
return newName, fmt.Errorf("subteam changed root ancestor: %v -> %v",
chain.inner.RootAncestor, newName.RootAncestorName())
}
if newName.Depth() != chain.inner.NameDepth {
return newName, fmt.Errorf("subteam changed depth: %v -> %v", chain.inner.NameDepth, newName.Depth())
}
return newName, nil
} | go | func (l *TeamLoader) calculateName(ctx context.Context,
state *keybase1.TeamData, me keybase1.UserVersion, readSubteamID keybase1.TeamID, staleOK bool) (newName keybase1.TeamName, err error) {
chain := TeamSigChainState{inner: state.Chain}
if !chain.IsSubteam() {
return chain.inner.RootAncestor, nil
}
// Load the parent. The parent load will recalculate its own name,
// so this name recalculation is recursive.
parent, err := l.load2(ctx, load2ArgT{
teamID: *chain.GetParentID(),
reason: "calculateName",
staleOK: staleOK,
readSubteamID: &readSubteamID,
me: me,
})
if err != nil {
return newName, err
}
// Swap out the parent name as the base of this name.
// Check that the root ancestor name and depth still match the subteam chain.
newName, err = parent.team.Name.Append(string(chain.LatestLastNamePart()))
if err != nil {
return newName, fmt.Errorf("invalid new subteam name: %v", err)
}
if !newName.RootAncestorName().Eq(chain.inner.RootAncestor) {
return newName, fmt.Errorf("subteam changed root ancestor: %v -> %v",
chain.inner.RootAncestor, newName.RootAncestorName())
}
if newName.Depth() != chain.inner.NameDepth {
return newName, fmt.Errorf("subteam changed depth: %v -> %v", chain.inner.NameDepth, newName.Depth())
}
return newName, nil
} | [
"func",
"(",
"l",
"*",
"TeamLoader",
")",
"calculateName",
"(",
"ctx",
"context",
".",
"Context",
",",
"state",
"*",
"keybase1",
".",
"TeamData",
",",
"me",
"keybase1",
".",
"UserVersion",
",",
"readSubteamID",
"keybase1",
".",
"TeamID",
",",
"staleOK",
"bool",
")",
"(",
"newName",
"keybase1",
".",
"TeamName",
",",
"err",
"error",
")",
"{",
"chain",
":=",
"TeamSigChainState",
"{",
"inner",
":",
"state",
".",
"Chain",
"}",
"\n",
"if",
"!",
"chain",
".",
"IsSubteam",
"(",
")",
"{",
"return",
"chain",
".",
"inner",
".",
"RootAncestor",
",",
"nil",
"\n",
"}",
"\n\n",
"// Load the parent. The parent load will recalculate its own name,",
"// so this name recalculation is recursive.",
"parent",
",",
"err",
":=",
"l",
".",
"load2",
"(",
"ctx",
",",
"load2ArgT",
"{",
"teamID",
":",
"*",
"chain",
".",
"GetParentID",
"(",
")",
",",
"reason",
":",
"\"",
"\"",
",",
"staleOK",
":",
"staleOK",
",",
"readSubteamID",
":",
"&",
"readSubteamID",
",",
"me",
":",
"me",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"newName",
",",
"err",
"\n",
"}",
"\n\n",
"// Swap out the parent name as the base of this name.",
"// Check that the root ancestor name and depth still match the subteam chain.",
"newName",
",",
"err",
"=",
"parent",
".",
"team",
".",
"Name",
".",
"Append",
"(",
"string",
"(",
"chain",
".",
"LatestLastNamePart",
"(",
")",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"newName",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"if",
"!",
"newName",
".",
"RootAncestorName",
"(",
")",
".",
"Eq",
"(",
"chain",
".",
"inner",
".",
"RootAncestor",
")",
"{",
"return",
"newName",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"chain",
".",
"inner",
".",
"RootAncestor",
",",
"newName",
".",
"RootAncestorName",
"(",
")",
")",
"\n",
"}",
"\n\n",
"if",
"newName",
".",
"Depth",
"(",
")",
"!=",
"chain",
".",
"inner",
".",
"NameDepth",
"{",
"return",
"newName",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"chain",
".",
"inner",
".",
"NameDepth",
",",
"newName",
".",
"Depth",
"(",
")",
")",
"\n",
"}",
"\n\n",
"return",
"newName",
",",
"nil",
"\n",
"}"
] | // Calculates the latest name of the team.
// The last part will be as up to date as the sigchain in state.
// The mid-team parts can be as old as the cache time, unless staleOK is false in which case they will be fetched. | [
"Calculates",
"the",
"latest",
"name",
"of",
"the",
"team",
".",
"The",
"last",
"part",
"will",
"be",
"as",
"up",
"to",
"date",
"as",
"the",
"sigchain",
"in",
"state",
".",
"The",
"mid",
"-",
"team",
"parts",
"can",
"be",
"as",
"old",
"as",
"the",
"cache",
"time",
"unless",
"staleOK",
"is",
"false",
"in",
"which",
"case",
"they",
"will",
"be",
"fetched",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/teams/loader2.go#L927-L966 |
159,335 | keybase/client | go/service/cryptocurrency.go | RegisterAddress | func (h *CryptocurrencyHandler) RegisterAddress(nctx context.Context, arg keybase1.RegisterAddressArg) (keybase1.RegisterAddressRes, error) {
uis := libkb.UIs{
LogUI: h.getLogUI(arg.SessionID),
SecretUI: h.getSecretUI(arg.SessionID, h.G()),
SessionID: arg.SessionID,
}
eng := engine.NewCryptocurrencyEngine(h.G(), arg)
m := libkb.NewMetaContext(nctx, h.G()).WithUIs(uis)
err := engine.RunEngine2(m, eng)
res := eng.Result()
return res, err
} | go | func (h *CryptocurrencyHandler) RegisterAddress(nctx context.Context, arg keybase1.RegisterAddressArg) (keybase1.RegisterAddressRes, error) {
uis := libkb.UIs{
LogUI: h.getLogUI(arg.SessionID),
SecretUI: h.getSecretUI(arg.SessionID, h.G()),
SessionID: arg.SessionID,
}
eng := engine.NewCryptocurrencyEngine(h.G(), arg)
m := libkb.NewMetaContext(nctx, h.G()).WithUIs(uis)
err := engine.RunEngine2(m, eng)
res := eng.Result()
return res, err
} | [
"func",
"(",
"h",
"*",
"CryptocurrencyHandler",
")",
"RegisterAddress",
"(",
"nctx",
"context",
".",
"Context",
",",
"arg",
"keybase1",
".",
"RegisterAddressArg",
")",
"(",
"keybase1",
".",
"RegisterAddressRes",
",",
"error",
")",
"{",
"uis",
":=",
"libkb",
".",
"UIs",
"{",
"LogUI",
":",
"h",
".",
"getLogUI",
"(",
"arg",
".",
"SessionID",
")",
",",
"SecretUI",
":",
"h",
".",
"getSecretUI",
"(",
"arg",
".",
"SessionID",
",",
"h",
".",
"G",
"(",
")",
")",
",",
"SessionID",
":",
"arg",
".",
"SessionID",
",",
"}",
"\n",
"eng",
":=",
"engine",
".",
"NewCryptocurrencyEngine",
"(",
"h",
".",
"G",
"(",
")",
",",
"arg",
")",
"\n",
"m",
":=",
"libkb",
".",
"NewMetaContext",
"(",
"nctx",
",",
"h",
".",
"G",
"(",
")",
")",
".",
"WithUIs",
"(",
"uis",
")",
"\n",
"err",
":=",
"engine",
".",
"RunEngine2",
"(",
"m",
",",
"eng",
")",
"\n",
"res",
":=",
"eng",
".",
"Result",
"(",
")",
"\n",
"return",
"res",
",",
"err",
"\n",
"}"
] | // BTC creates a BTCEngine and runs it. | [
"BTC",
"creates",
"a",
"BTCEngine",
"and",
"runs",
"it",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/service/cryptocurrency.go#L27-L38 |
159,336 | keybase/client | go/service/notify.go | NewNotifyCtlHandler | func NewNotifyCtlHandler(xp rpc.Transporter, id libkb.ConnectionID, g *libkb.GlobalContext) *NotifyCtlHandler {
return &NotifyCtlHandler{
Contextified: libkb.NewContextified(g),
BaseHandler: NewBaseHandler(g, xp),
id: id,
}
} | go | func NewNotifyCtlHandler(xp rpc.Transporter, id libkb.ConnectionID, g *libkb.GlobalContext) *NotifyCtlHandler {
return &NotifyCtlHandler{
Contextified: libkb.NewContextified(g),
BaseHandler: NewBaseHandler(g, xp),
id: id,
}
} | [
"func",
"NewNotifyCtlHandler",
"(",
"xp",
"rpc",
".",
"Transporter",
",",
"id",
"libkb",
".",
"ConnectionID",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
")",
"*",
"NotifyCtlHandler",
"{",
"return",
"&",
"NotifyCtlHandler",
"{",
"Contextified",
":",
"libkb",
".",
"NewContextified",
"(",
"g",
")",
",",
"BaseHandler",
":",
"NewBaseHandler",
"(",
"g",
",",
"xp",
")",
",",
"id",
":",
"id",
",",
"}",
"\n",
"}"
] | // NewNotifyCtlHandler creates a new handler for setting up notification
// channels | [
"NewNotifyCtlHandler",
"creates",
"a",
"new",
"handler",
"for",
"setting",
"up",
"notification",
"channels"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/service/notify.go#L22-L28 |
159,337 | keybase/client | go/kbfs/libdokan/tlf.go | CanDeleteDirectory | func (tlf *TLF) CanDeleteDirectory(ctx context.Context, fi *dokan.FileInfo) (err error) {
return nil
} | go | func (tlf *TLF) CanDeleteDirectory(ctx context.Context, fi *dokan.FileInfo) (err error) {
return nil
} | [
"func",
"(",
"tlf",
"*",
"TLF",
")",
"CanDeleteDirectory",
"(",
"ctx",
"context",
".",
"Context",
",",
"fi",
"*",
"dokan",
".",
"FileInfo",
")",
"(",
"err",
"error",
")",
"{",
"return",
"nil",
"\n",
"}"
] | // CanDeleteDirectory - return just nil because tlfs
// can always be removed from favorites. | [
"CanDeleteDirectory",
"-",
"return",
"just",
"nil",
"because",
"tlfs",
"can",
"always",
"be",
"removed",
"from",
"favorites",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libdokan/tlf.go#L256-L258 |
159,338 | keybase/client | go/kbfs/libdokan/tlf.go | Cleanup | func (tlf *TLF) Cleanup(ctx context.Context, fi *dokan.FileInfo) {
var err error
if fi != nil && fi.IsDeleteOnClose() {
tlf.folder.handleMu.Lock()
fav := tlf.folder.h.ToFavorite()
tlf.folder.handleMu.Unlock()
tlf.folder.fs.vlog.CLogf(
ctx, libkb.VLog1, "TLF Removing favorite %q", fav.Name)
defer func() {
tlf.folder.reportErr(ctx, libkbfs.WriteMode, err)
}()
err = tlf.folder.fs.config.KBFSOps().DeleteFavorite(ctx, fav)
}
if tlf.refcount.Decrease() {
dir := tlf.getStoredDir()
if dir == nil {
return
}
dir.Cleanup(ctx, fi)
}
} | go | func (tlf *TLF) Cleanup(ctx context.Context, fi *dokan.FileInfo) {
var err error
if fi != nil && fi.IsDeleteOnClose() {
tlf.folder.handleMu.Lock()
fav := tlf.folder.h.ToFavorite()
tlf.folder.handleMu.Unlock()
tlf.folder.fs.vlog.CLogf(
ctx, libkb.VLog1, "TLF Removing favorite %q", fav.Name)
defer func() {
tlf.folder.reportErr(ctx, libkbfs.WriteMode, err)
}()
err = tlf.folder.fs.config.KBFSOps().DeleteFavorite(ctx, fav)
}
if tlf.refcount.Decrease() {
dir := tlf.getStoredDir()
if dir == nil {
return
}
dir.Cleanup(ctx, fi)
}
} | [
"func",
"(",
"tlf",
"*",
"TLF",
")",
"Cleanup",
"(",
"ctx",
"context",
".",
"Context",
",",
"fi",
"*",
"dokan",
".",
"FileInfo",
")",
"{",
"var",
"err",
"error",
"\n",
"if",
"fi",
"!=",
"nil",
"&&",
"fi",
".",
"IsDeleteOnClose",
"(",
")",
"{",
"tlf",
".",
"folder",
".",
"handleMu",
".",
"Lock",
"(",
")",
"\n",
"fav",
":=",
"tlf",
".",
"folder",
".",
"h",
".",
"ToFavorite",
"(",
")",
"\n",
"tlf",
".",
"folder",
".",
"handleMu",
".",
"Unlock",
"(",
")",
"\n",
"tlf",
".",
"folder",
".",
"fs",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"fav",
".",
"Name",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"tlf",
".",
"folder",
".",
"reportErr",
"(",
"ctx",
",",
"libkbfs",
".",
"WriteMode",
",",
"err",
")",
"\n",
"}",
"(",
")",
"\n",
"err",
"=",
"tlf",
".",
"folder",
".",
"fs",
".",
"config",
".",
"KBFSOps",
"(",
")",
".",
"DeleteFavorite",
"(",
"ctx",
",",
"fav",
")",
"\n",
"}",
"\n\n",
"if",
"tlf",
".",
"refcount",
".",
"Decrease",
"(",
")",
"{",
"dir",
":=",
"tlf",
".",
"getStoredDir",
"(",
")",
"\n",
"if",
"dir",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"dir",
".",
"Cleanup",
"(",
"ctx",
",",
"fi",
")",
"\n",
"}",
"\n",
"}"
] | // Cleanup - forget references, perform deletions etc. | [
"Cleanup",
"-",
"forget",
"references",
"perform",
"deletions",
"etc",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libdokan/tlf.go#L261-L282 |
159,339 | keybase/client | go/chat/unfurl/scrape_generic_scoring.go | getAppleTouchFaviconScore | func getAppleTouchFaviconScore(domain string, e *colly.HTMLElement) int {
return (getDefaultScore(domain, e) + 1) * getFaviconMultiplier(e)
} | go | func getAppleTouchFaviconScore(domain string, e *colly.HTMLElement) int {
return (getDefaultScore(domain, e) + 1) * getFaviconMultiplier(e)
} | [
"func",
"getAppleTouchFaviconScore",
"(",
"domain",
"string",
",",
"e",
"*",
"colly",
".",
"HTMLElement",
")",
"int",
"{",
"return",
"(",
"getDefaultScore",
"(",
"domain",
",",
"e",
")",
"+",
"1",
")",
"*",
"getFaviconMultiplier",
"(",
"e",
")",
"\n",
"}"
] | // Favor apple-touch-icon over other favicons, try to get the highest
// resolution. | [
"Favor",
"apple",
"-",
"touch",
"-",
"icon",
"over",
"other",
"favicons",
"try",
"to",
"get",
"the",
"highest",
"resolution",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/unfurl/scrape_generic_scoring.go#L75-L77 |
159,340 | keybase/client | go/kbfs/libkbfs/folder_branch_status.go | setRootMetadata | func (fbsk *folderBranchStatusKeeper) setRootMetadata(md ImmutableRootMetadata) {
fbsk.dataMutex.Lock()
defer fbsk.dataMutex.Unlock()
if fbsk.md.MdID() == md.MdID() {
return
}
fbsk.md = md
fbsk.signalChangeLocked()
} | go | func (fbsk *folderBranchStatusKeeper) setRootMetadata(md ImmutableRootMetadata) {
fbsk.dataMutex.Lock()
defer fbsk.dataMutex.Unlock()
if fbsk.md.MdID() == md.MdID() {
return
}
fbsk.md = md
fbsk.signalChangeLocked()
} | [
"func",
"(",
"fbsk",
"*",
"folderBranchStatusKeeper",
")",
"setRootMetadata",
"(",
"md",
"ImmutableRootMetadata",
")",
"{",
"fbsk",
".",
"dataMutex",
".",
"Lock",
"(",
")",
"\n",
"defer",
"fbsk",
".",
"dataMutex",
".",
"Unlock",
"(",
")",
"\n",
"if",
"fbsk",
".",
"md",
".",
"MdID",
"(",
")",
"==",
"md",
".",
"MdID",
"(",
")",
"{",
"return",
"\n",
"}",
"\n",
"fbsk",
".",
"md",
"=",
"md",
"\n",
"fbsk",
".",
"signalChangeLocked",
"(",
")",
"\n",
"}"
] | // setRootMetadata sets the current head metadata for the
// corresponding folder-branch. | [
"setRootMetadata",
"sets",
"the",
"current",
"head",
"metadata",
"for",
"the",
"corresponding",
"folder",
"-",
"branch",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_branch_status.go#L127-L135 |
159,341 | keybase/client | go/kbfs/libkbfs/folder_branch_status.go | getStatus | func (fbsk *folderBranchStatusKeeper) getStatus(ctx context.Context,
blocks *folderBlockOps) (FolderBranchStatus, <-chan StatusUpdate, error) {
fbs, ch, tlfID, err := fbsk.getStatusWithoutJournaling(ctx)
if err != nil {
return FolderBranchStatus{}, nil, err
}
if tlfID == tlf.NullID {
return fbs, ch, nil
}
// Fetch journal info without holding any locks, to avoid possible
// deadlocks with folderBlockOps.
// TODO: Ideally, the journal would push status
// updates to this object instead, so we can notify
// listeners.
jManager, err := GetJournalManager(fbsk.config)
if err != nil {
return fbs, ch, nil
}
var jStatus TLFJournalStatus
if blocks != nil {
jStatus, err =
jManager.JournalStatusWithPaths(ctx, tlfID, blocks)
} else {
jStatus, err =
jManager.JournalStatus(tlfID)
}
if err != nil {
log := fbsk.config.MakeLogger("")
log.CWarningf(ctx, "Error getting journal status for %s: %v",
tlfID, err)
} else {
fbs.Journal = &jStatus
}
return fbs, ch, nil
} | go | func (fbsk *folderBranchStatusKeeper) getStatus(ctx context.Context,
blocks *folderBlockOps) (FolderBranchStatus, <-chan StatusUpdate, error) {
fbs, ch, tlfID, err := fbsk.getStatusWithoutJournaling(ctx)
if err != nil {
return FolderBranchStatus{}, nil, err
}
if tlfID == tlf.NullID {
return fbs, ch, nil
}
// Fetch journal info without holding any locks, to avoid possible
// deadlocks with folderBlockOps.
// TODO: Ideally, the journal would push status
// updates to this object instead, so we can notify
// listeners.
jManager, err := GetJournalManager(fbsk.config)
if err != nil {
return fbs, ch, nil
}
var jStatus TLFJournalStatus
if blocks != nil {
jStatus, err =
jManager.JournalStatusWithPaths(ctx, tlfID, blocks)
} else {
jStatus, err =
jManager.JournalStatus(tlfID)
}
if err != nil {
log := fbsk.config.MakeLogger("")
log.CWarningf(ctx, "Error getting journal status for %s: %v",
tlfID, err)
} else {
fbs.Journal = &jStatus
}
return fbs, ch, nil
} | [
"func",
"(",
"fbsk",
"*",
"folderBranchStatusKeeper",
")",
"getStatus",
"(",
"ctx",
"context",
".",
"Context",
",",
"blocks",
"*",
"folderBlockOps",
")",
"(",
"FolderBranchStatus",
",",
"<-",
"chan",
"StatusUpdate",
",",
"error",
")",
"{",
"fbs",
",",
"ch",
",",
"tlfID",
",",
"err",
":=",
"fbsk",
".",
"getStatusWithoutJournaling",
"(",
"ctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"FolderBranchStatus",
"{",
"}",
",",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"tlfID",
"==",
"tlf",
".",
"NullID",
"{",
"return",
"fbs",
",",
"ch",
",",
"nil",
"\n",
"}",
"\n\n",
"// Fetch journal info without holding any locks, to avoid possible",
"// deadlocks with folderBlockOps.",
"// TODO: Ideally, the journal would push status",
"// updates to this object instead, so we can notify",
"// listeners.",
"jManager",
",",
"err",
":=",
"GetJournalManager",
"(",
"fbsk",
".",
"config",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fbs",
",",
"ch",
",",
"nil",
"\n",
"}",
"\n\n",
"var",
"jStatus",
"TLFJournalStatus",
"\n",
"if",
"blocks",
"!=",
"nil",
"{",
"jStatus",
",",
"err",
"=",
"jManager",
".",
"JournalStatusWithPaths",
"(",
"ctx",
",",
"tlfID",
",",
"blocks",
")",
"\n",
"}",
"else",
"{",
"jStatus",
",",
"err",
"=",
"jManager",
".",
"JournalStatus",
"(",
"tlfID",
")",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
":=",
"fbsk",
".",
"config",
".",
"MakeLogger",
"(",
"\"",
"\"",
")",
"\n",
"log",
".",
"CWarningf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"tlfID",
",",
"err",
")",
"\n",
"}",
"else",
"{",
"fbs",
".",
"Journal",
"=",
"&",
"jStatus",
"\n",
"}",
"\n",
"return",
"fbs",
",",
"ch",
",",
"nil",
"\n",
"}"
] | // getStatus returns a FolderBranchStatus-representation of the
// current status. If blocks != nil, the paths of any unflushed files
// in the journals will be included in the status. The returned
// channel is closed whenever the status changes, except for journal
// status changes. | [
"getStatus",
"returns",
"a",
"FolderBranchStatus",
"-",
"representation",
"of",
"the",
"current",
"status",
".",
"If",
"blocks",
"!",
"=",
"nil",
"the",
"paths",
"of",
"any",
"unflushed",
"files",
"in",
"the",
"journals",
"will",
"be",
"included",
"in",
"the",
"status",
".",
"The",
"returned",
"channel",
"is",
"closed",
"whenever",
"the",
"status",
"changes",
"except",
"for",
"journal",
"status",
"changes",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_branch_status.go#L305-L342 |
159,342 | keybase/client | go/kbfs/data/block_tree.go | processGetBlocksTask | func (bt *blockTree) processGetBlocksTask(ctx context.Context,
wg *sync.WaitGroup, wp *workerpool.WorkerPool,
job getBlocksForOffsetRangeTask,
results chan<- getBlocksForOffsetRangeResult) {
defer wg.Done()
select {
case <-ctx.Done():
results <- getBlocksForOffsetRangeResult{err: ctx.Err()}
return
default:
}
// We may have been passed just a pointer and need to fetch the block here.
var pblock BlockWithPtrs
if job.pblock == nil {
var err error
pblock, _, err = bt.getter(ctx, bt.kmd, job.ptr, bt.file, BlockReadParallel)
if err != nil {
results <- getBlocksForOffsetRangeResult{
firstBlock: job.firstBlock,
err: err,
}
return
}
} else {
pblock = job.pblock
}
if !pblock.IsIndirect() {
// Return this block, under the assumption that the
// caller already checked the range for this block.
if job.getDirect {
results <- getBlocksForOffsetRangeResult{
pathFromRoot: job.pathPrefix,
ptr: job.ptr,
block: pblock,
nextBlockOffset: nil,
firstBlock: job.firstBlock,
err: nil,
}
}
return
}
// Search all of the in-range child blocks, and their child
// blocks, etc, in parallel.
childIsFirstBlock := job.firstBlock
for i := 0; i < pblock.NumIndirectPtrs(); i++ {
info, iptrOff := pblock.IndirectPtr(i)
// Some byte of this block is included in the left side of the
// range if `job.startOff` is less than the largest byte offset in
// the block.
inRangeLeft := true
if i < pblock.NumIndirectPtrs()-1 {
_, off := pblock.IndirectPtr(i + 1)
inRangeLeft = job.startOff.Less(off)
}
if !inRangeLeft {
continue
}
// Some byte of this block is included in the right side of
// the range if `job.endOff` is bigger than the smallest byte
// offset in the block (or if we're explicitly reading all the
// data to the end).
inRangeRight := job.endOff == nil || iptrOff.Less(job.endOff)
if !inRangeRight {
// This block is the first one past the offset range
// amount the children.
results <- getBlocksForOffsetRangeResult{nextBlockOffset: iptrOff}
return
}
childPtr := info.BlockPointer
childIndex := i
childPath := make([]ParentBlockAndChildIndex, len(job.pathPrefix)+1)
copy(childPath, job.pathPrefix)
childPath[len(childPath)-1] = ParentBlockAndChildIndex{
pblock: pblock,
childIndex: childIndex,
}
// We only need to fetch direct blocks if we've been asked
// to do so. If the direct type of the pointer is
// unknown, we can assume all the children are direct
// blocks, since there weren't multiple levels of
// indirection before the introduction of the flag.
if job.getDirect || childPtr.DirectType == IndirectBlock {
subTask := job.subTask(childPtr, childPath, childIsFirstBlock)
// Enqueue the subTask with the WorkerPool.
wg.Add(1)
wp.Submit(func() {
bt.processGetBlocksTask(ctx, wg, wp, subTask, results)
})
} else {
results <- getBlocksForOffsetRangeResult{
pathFromRoot: childPath,
firstBlock: childIsFirstBlock,
}
}
childIsFirstBlock = false
}
} | go | func (bt *blockTree) processGetBlocksTask(ctx context.Context,
wg *sync.WaitGroup, wp *workerpool.WorkerPool,
job getBlocksForOffsetRangeTask,
results chan<- getBlocksForOffsetRangeResult) {
defer wg.Done()
select {
case <-ctx.Done():
results <- getBlocksForOffsetRangeResult{err: ctx.Err()}
return
default:
}
// We may have been passed just a pointer and need to fetch the block here.
var pblock BlockWithPtrs
if job.pblock == nil {
var err error
pblock, _, err = bt.getter(ctx, bt.kmd, job.ptr, bt.file, BlockReadParallel)
if err != nil {
results <- getBlocksForOffsetRangeResult{
firstBlock: job.firstBlock,
err: err,
}
return
}
} else {
pblock = job.pblock
}
if !pblock.IsIndirect() {
// Return this block, under the assumption that the
// caller already checked the range for this block.
if job.getDirect {
results <- getBlocksForOffsetRangeResult{
pathFromRoot: job.pathPrefix,
ptr: job.ptr,
block: pblock,
nextBlockOffset: nil,
firstBlock: job.firstBlock,
err: nil,
}
}
return
}
// Search all of the in-range child blocks, and their child
// blocks, etc, in parallel.
childIsFirstBlock := job.firstBlock
for i := 0; i < pblock.NumIndirectPtrs(); i++ {
info, iptrOff := pblock.IndirectPtr(i)
// Some byte of this block is included in the left side of the
// range if `job.startOff` is less than the largest byte offset in
// the block.
inRangeLeft := true
if i < pblock.NumIndirectPtrs()-1 {
_, off := pblock.IndirectPtr(i + 1)
inRangeLeft = job.startOff.Less(off)
}
if !inRangeLeft {
continue
}
// Some byte of this block is included in the right side of
// the range if `job.endOff` is bigger than the smallest byte
// offset in the block (or if we're explicitly reading all the
// data to the end).
inRangeRight := job.endOff == nil || iptrOff.Less(job.endOff)
if !inRangeRight {
// This block is the first one past the offset range
// amount the children.
results <- getBlocksForOffsetRangeResult{nextBlockOffset: iptrOff}
return
}
childPtr := info.BlockPointer
childIndex := i
childPath := make([]ParentBlockAndChildIndex, len(job.pathPrefix)+1)
copy(childPath, job.pathPrefix)
childPath[len(childPath)-1] = ParentBlockAndChildIndex{
pblock: pblock,
childIndex: childIndex,
}
// We only need to fetch direct blocks if we've been asked
// to do so. If the direct type of the pointer is
// unknown, we can assume all the children are direct
// blocks, since there weren't multiple levels of
// indirection before the introduction of the flag.
if job.getDirect || childPtr.DirectType == IndirectBlock {
subTask := job.subTask(childPtr, childPath, childIsFirstBlock)
// Enqueue the subTask with the WorkerPool.
wg.Add(1)
wp.Submit(func() {
bt.processGetBlocksTask(ctx, wg, wp, subTask, results)
})
} else {
results <- getBlocksForOffsetRangeResult{
pathFromRoot: childPath,
firstBlock: childIsFirstBlock,
}
}
childIsFirstBlock = false
}
} | [
"func",
"(",
"bt",
"*",
"blockTree",
")",
"processGetBlocksTask",
"(",
"ctx",
"context",
".",
"Context",
",",
"wg",
"*",
"sync",
".",
"WaitGroup",
",",
"wp",
"*",
"workerpool",
".",
"WorkerPool",
",",
"job",
"getBlocksForOffsetRangeTask",
",",
"results",
"chan",
"<-",
"getBlocksForOffsetRangeResult",
")",
"{",
"defer",
"wg",
".",
"Done",
"(",
")",
"\n\n",
"select",
"{",
"case",
"<-",
"ctx",
".",
"Done",
"(",
")",
":",
"results",
"<-",
"getBlocksForOffsetRangeResult",
"{",
"err",
":",
"ctx",
".",
"Err",
"(",
")",
"}",
"\n",
"return",
"\n",
"default",
":",
"}",
"\n\n",
"// We may have been passed just a pointer and need to fetch the block here.",
"var",
"pblock",
"BlockWithPtrs",
"\n",
"if",
"job",
".",
"pblock",
"==",
"nil",
"{",
"var",
"err",
"error",
"\n",
"pblock",
",",
"_",
",",
"err",
"=",
"bt",
".",
"getter",
"(",
"ctx",
",",
"bt",
".",
"kmd",
",",
"job",
".",
"ptr",
",",
"bt",
".",
"file",
",",
"BlockReadParallel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"results",
"<-",
"getBlocksForOffsetRangeResult",
"{",
"firstBlock",
":",
"job",
".",
"firstBlock",
",",
"err",
":",
"err",
",",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"else",
"{",
"pblock",
"=",
"job",
".",
"pblock",
"\n",
"}",
"\n\n",
"if",
"!",
"pblock",
".",
"IsIndirect",
"(",
")",
"{",
"// Return this block, under the assumption that the",
"// caller already checked the range for this block.",
"if",
"job",
".",
"getDirect",
"{",
"results",
"<-",
"getBlocksForOffsetRangeResult",
"{",
"pathFromRoot",
":",
"job",
".",
"pathPrefix",
",",
"ptr",
":",
"job",
".",
"ptr",
",",
"block",
":",
"pblock",
",",
"nextBlockOffset",
":",
"nil",
",",
"firstBlock",
":",
"job",
".",
"firstBlock",
",",
"err",
":",
"nil",
",",
"}",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n\n",
"// Search all of the in-range child blocks, and their child",
"// blocks, etc, in parallel.",
"childIsFirstBlock",
":=",
"job",
".",
"firstBlock",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"pblock",
".",
"NumIndirectPtrs",
"(",
")",
";",
"i",
"++",
"{",
"info",
",",
"iptrOff",
":=",
"pblock",
".",
"IndirectPtr",
"(",
"i",
")",
"\n",
"// Some byte of this block is included in the left side of the",
"// range if `job.startOff` is less than the largest byte offset in",
"// the block.",
"inRangeLeft",
":=",
"true",
"\n",
"if",
"i",
"<",
"pblock",
".",
"NumIndirectPtrs",
"(",
")",
"-",
"1",
"{",
"_",
",",
"off",
":=",
"pblock",
".",
"IndirectPtr",
"(",
"i",
"+",
"1",
")",
"\n",
"inRangeLeft",
"=",
"job",
".",
"startOff",
".",
"Less",
"(",
"off",
")",
"\n",
"}",
"\n",
"if",
"!",
"inRangeLeft",
"{",
"continue",
"\n",
"}",
"\n",
"// Some byte of this block is included in the right side of",
"// the range if `job.endOff` is bigger than the smallest byte",
"// offset in the block (or if we're explicitly reading all the",
"// data to the end).",
"inRangeRight",
":=",
"job",
".",
"endOff",
"==",
"nil",
"||",
"iptrOff",
".",
"Less",
"(",
"job",
".",
"endOff",
")",
"\n",
"if",
"!",
"inRangeRight",
"{",
"// This block is the first one past the offset range",
"// amount the children.",
"results",
"<-",
"getBlocksForOffsetRangeResult",
"{",
"nextBlockOffset",
":",
"iptrOff",
"}",
"\n",
"return",
"\n",
"}",
"\n\n",
"childPtr",
":=",
"info",
".",
"BlockPointer",
"\n",
"childIndex",
":=",
"i",
"\n\n",
"childPath",
":=",
"make",
"(",
"[",
"]",
"ParentBlockAndChildIndex",
",",
"len",
"(",
"job",
".",
"pathPrefix",
")",
"+",
"1",
")",
"\n",
"copy",
"(",
"childPath",
",",
"job",
".",
"pathPrefix",
")",
"\n",
"childPath",
"[",
"len",
"(",
"childPath",
")",
"-",
"1",
"]",
"=",
"ParentBlockAndChildIndex",
"{",
"pblock",
":",
"pblock",
",",
"childIndex",
":",
"childIndex",
",",
"}",
"\n\n",
"// We only need to fetch direct blocks if we've been asked",
"// to do so. If the direct type of the pointer is",
"// unknown, we can assume all the children are direct",
"// blocks, since there weren't multiple levels of",
"// indirection before the introduction of the flag.",
"if",
"job",
".",
"getDirect",
"||",
"childPtr",
".",
"DirectType",
"==",
"IndirectBlock",
"{",
"subTask",
":=",
"job",
".",
"subTask",
"(",
"childPtr",
",",
"childPath",
",",
"childIsFirstBlock",
")",
"\n\n",
"// Enqueue the subTask with the WorkerPool.",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"wp",
".",
"Submit",
"(",
"func",
"(",
")",
"{",
"bt",
".",
"processGetBlocksTask",
"(",
"ctx",
",",
"wg",
",",
"wp",
",",
"subTask",
",",
"results",
")",
"\n",
"}",
")",
"\n",
"}",
"else",
"{",
"results",
"<-",
"getBlocksForOffsetRangeResult",
"{",
"pathFromRoot",
":",
"childPath",
",",
"firstBlock",
":",
"childIsFirstBlock",
",",
"}",
"\n",
"}",
"\n",
"childIsFirstBlock",
"=",
"false",
"\n",
"}",
"\n",
"}"
] | // processGetBlocksTask examines the block it is passed, enqueueing any children
// in range into wp, and passing data back through results. | [
"processGetBlocksTask",
"examines",
"the",
"block",
"it",
"is",
"passed",
"enqueueing",
"any",
"children",
"in",
"range",
"into",
"wp",
"and",
"passing",
"data",
"back",
"through",
"results",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/block_tree.go#L327-L431 |
159,343 | keybase/client | go/kbfs/data/block_tree.go | markParentsDirty | func (bt *blockTree) markParentsDirty(
ctx context.Context, parentBlocks []ParentBlockAndChildIndex) (
dirtyPtrs []BlockPointer, unrefs []BlockInfo, err error) {
parentPtr := bt.rootBlockPointer()
for _, pb := range parentBlocks {
dirtyPtrs = append(dirtyPtrs, parentPtr)
childInfo, _ := pb.childIPtr()
// Remember the size of each newly-dirtied child.
if childInfo.EncodedSize != 0 {
unrefs = append(unrefs, childInfo)
pb.clearEncodedSize()
}
if err := bt.cacher(ctx, parentPtr, pb.pblock); err != nil {
return nil, unrefs, err
}
parentPtr = childInfo.BlockPointer
}
return dirtyPtrs, unrefs, nil
} | go | func (bt *blockTree) markParentsDirty(
ctx context.Context, parentBlocks []ParentBlockAndChildIndex) (
dirtyPtrs []BlockPointer, unrefs []BlockInfo, err error) {
parentPtr := bt.rootBlockPointer()
for _, pb := range parentBlocks {
dirtyPtrs = append(dirtyPtrs, parentPtr)
childInfo, _ := pb.childIPtr()
// Remember the size of each newly-dirtied child.
if childInfo.EncodedSize != 0 {
unrefs = append(unrefs, childInfo)
pb.clearEncodedSize()
}
if err := bt.cacher(ctx, parentPtr, pb.pblock); err != nil {
return nil, unrefs, err
}
parentPtr = childInfo.BlockPointer
}
return dirtyPtrs, unrefs, nil
} | [
"func",
"(",
"bt",
"*",
"blockTree",
")",
"markParentsDirty",
"(",
"ctx",
"context",
".",
"Context",
",",
"parentBlocks",
"[",
"]",
"ParentBlockAndChildIndex",
")",
"(",
"dirtyPtrs",
"[",
"]",
"BlockPointer",
",",
"unrefs",
"[",
"]",
"BlockInfo",
",",
"err",
"error",
")",
"{",
"parentPtr",
":=",
"bt",
".",
"rootBlockPointer",
"(",
")",
"\n",
"for",
"_",
",",
"pb",
":=",
"range",
"parentBlocks",
"{",
"dirtyPtrs",
"=",
"append",
"(",
"dirtyPtrs",
",",
"parentPtr",
")",
"\n",
"childInfo",
",",
"_",
":=",
"pb",
".",
"childIPtr",
"(",
")",
"\n\n",
"// Remember the size of each newly-dirtied child.",
"if",
"childInfo",
".",
"EncodedSize",
"!=",
"0",
"{",
"unrefs",
"=",
"append",
"(",
"unrefs",
",",
"childInfo",
")",
"\n",
"pb",
".",
"clearEncodedSize",
"(",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"bt",
".",
"cacher",
"(",
"ctx",
",",
"parentPtr",
",",
"pb",
".",
"pblock",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"unrefs",
",",
"err",
"\n",
"}",
"\n",
"parentPtr",
"=",
"childInfo",
".",
"BlockPointer",
"\n",
"}",
"\n",
"return",
"dirtyPtrs",
",",
"unrefs",
",",
"nil",
"\n",
"}"
] | // markParentsDirty caches all the blocks in `parentBlocks` as dirty,
// and returns the dirtied block pointers as well as any block infos
// with non-zero encoded sizes that will now need to be unreferenced. | [
"markParentsDirty",
"caches",
"all",
"the",
"blocks",
"in",
"parentBlocks",
"as",
"dirty",
"and",
"returns",
"the",
"dirtied",
"block",
"pointers",
"as",
"well",
"as",
"any",
"block",
"infos",
"with",
"non",
"-",
"zero",
"encoded",
"sizes",
"that",
"will",
"now",
"need",
"to",
"be",
"unreferenced",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/block_tree.go#L992-L1011 |
159,344 | keybase/client | go/kbfs/data/block_tree.go | readyHelper | func (bt *blockTree) readyHelper(
ctx context.Context, id tlf.ID, bcache BlockCache,
rp ReadyProvider, bps BlockPutState,
pathsFromRoot [][]ParentBlockAndChildIndex, makeSync makeSyncFunc) (
map[BlockInfo]BlockPointer, error) {
oldPtrs := make(map[BlockInfo]BlockPointer)
donePtrs := make(map[BlockPointer]bool)
// lock protects `bps`, `oldPtrs`, and `donePtrs` while
// parallelizing block readies below.
var lock sync.Mutex
// Starting from the leaf level, ready each block at each level,
// and put the new BlockInfo into the parent block at the level
// above. At each level, only ready each block once. Don't ready
// the root block though; the folderUpdatePrepper code will do
// that.
for level := len(pathsFromRoot[0]) - 1; level > 0; level-- {
eg, groupCtx := errgroup.WithContext(ctx)
indices := make(chan int, len(pathsFromRoot))
numWorkers := len(pathsFromRoot)
if numWorkers > maxParallelReadies {
numWorkers = maxParallelReadies
}
worker := func() error {
for i := range indices {
err := bt.readyWorker(
groupCtx, id, bcache, rp, bps, pathsFromRoot, makeSync,
i, level, &lock, oldPtrs, donePtrs)
if err != nil {
return err
}
}
return nil
}
for i := 0; i < numWorkers; i++ {
eg.Go(worker)
}
for i := 0; i < len(pathsFromRoot); i++ {
indices <- i
}
close(indices)
err := eg.Wait()
if err != nil {
return nil, err
}
}
return oldPtrs, nil
} | go | func (bt *blockTree) readyHelper(
ctx context.Context, id tlf.ID, bcache BlockCache,
rp ReadyProvider, bps BlockPutState,
pathsFromRoot [][]ParentBlockAndChildIndex, makeSync makeSyncFunc) (
map[BlockInfo]BlockPointer, error) {
oldPtrs := make(map[BlockInfo]BlockPointer)
donePtrs := make(map[BlockPointer]bool)
// lock protects `bps`, `oldPtrs`, and `donePtrs` while
// parallelizing block readies below.
var lock sync.Mutex
// Starting from the leaf level, ready each block at each level,
// and put the new BlockInfo into the parent block at the level
// above. At each level, only ready each block once. Don't ready
// the root block though; the folderUpdatePrepper code will do
// that.
for level := len(pathsFromRoot[0]) - 1; level > 0; level-- {
eg, groupCtx := errgroup.WithContext(ctx)
indices := make(chan int, len(pathsFromRoot))
numWorkers := len(pathsFromRoot)
if numWorkers > maxParallelReadies {
numWorkers = maxParallelReadies
}
worker := func() error {
for i := range indices {
err := bt.readyWorker(
groupCtx, id, bcache, rp, bps, pathsFromRoot, makeSync,
i, level, &lock, oldPtrs, donePtrs)
if err != nil {
return err
}
}
return nil
}
for i := 0; i < numWorkers; i++ {
eg.Go(worker)
}
for i := 0; i < len(pathsFromRoot); i++ {
indices <- i
}
close(indices)
err := eg.Wait()
if err != nil {
return nil, err
}
}
return oldPtrs, nil
} | [
"func",
"(",
"bt",
"*",
"blockTree",
")",
"readyHelper",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"tlf",
".",
"ID",
",",
"bcache",
"BlockCache",
",",
"rp",
"ReadyProvider",
",",
"bps",
"BlockPutState",
",",
"pathsFromRoot",
"[",
"]",
"[",
"]",
"ParentBlockAndChildIndex",
",",
"makeSync",
"makeSyncFunc",
")",
"(",
"map",
"[",
"BlockInfo",
"]",
"BlockPointer",
",",
"error",
")",
"{",
"oldPtrs",
":=",
"make",
"(",
"map",
"[",
"BlockInfo",
"]",
"BlockPointer",
")",
"\n",
"donePtrs",
":=",
"make",
"(",
"map",
"[",
"BlockPointer",
"]",
"bool",
")",
"\n\n",
"// lock protects `bps`, `oldPtrs`, and `donePtrs` while",
"// parallelizing block readies below.",
"var",
"lock",
"sync",
".",
"Mutex",
"\n\n",
"// Starting from the leaf level, ready each block at each level,",
"// and put the new BlockInfo into the parent block at the level",
"// above. At each level, only ready each block once. Don't ready",
"// the root block though; the folderUpdatePrepper code will do",
"// that.",
"for",
"level",
":=",
"len",
"(",
"pathsFromRoot",
"[",
"0",
"]",
")",
"-",
"1",
";",
"level",
">",
"0",
";",
"level",
"--",
"{",
"eg",
",",
"groupCtx",
":=",
"errgroup",
".",
"WithContext",
"(",
"ctx",
")",
"\n",
"indices",
":=",
"make",
"(",
"chan",
"int",
",",
"len",
"(",
"pathsFromRoot",
")",
")",
"\n",
"numWorkers",
":=",
"len",
"(",
"pathsFromRoot",
")",
"\n",
"if",
"numWorkers",
">",
"maxParallelReadies",
"{",
"numWorkers",
"=",
"maxParallelReadies",
"\n",
"}",
"\n\n",
"worker",
":=",
"func",
"(",
")",
"error",
"{",
"for",
"i",
":=",
"range",
"indices",
"{",
"err",
":=",
"bt",
".",
"readyWorker",
"(",
"groupCtx",
",",
"id",
",",
"bcache",
",",
"rp",
",",
"bps",
",",
"pathsFromRoot",
",",
"makeSync",
",",
"i",
",",
"level",
",",
"&",
"lock",
",",
"oldPtrs",
",",
"donePtrs",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"numWorkers",
";",
"i",
"++",
"{",
"eg",
".",
"Go",
"(",
"worker",
")",
"\n",
"}",
"\n\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"pathsFromRoot",
")",
";",
"i",
"++",
"{",
"indices",
"<-",
"i",
"\n",
"}",
"\n",
"close",
"(",
"indices",
")",
"\n",
"err",
":=",
"eg",
".",
"Wait",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"oldPtrs",
",",
"nil",
"\n",
"}"
] | // readyHelper takes a set of paths from a root down to a child block,
// and readies all the blocks represented in those paths. If the
// caller wants leaf blocks readied, then the last element of each
// slice in `pathsFromRoot` should contain a leaf block, with a child
// index of -1. It's assumed that all slices in `pathsFromRoot` have
// the same size. This function returns a map pointing from the new
// block info from any readied block to its corresponding old block
// pointer. | [
"readyHelper",
"takes",
"a",
"set",
"of",
"paths",
"from",
"a",
"root",
"down",
"to",
"a",
"child",
"block",
"and",
"readies",
"all",
"the",
"blocks",
"represented",
"in",
"those",
"paths",
".",
"If",
"the",
"caller",
"wants",
"leaf",
"blocks",
"readied",
"then",
"the",
"last",
"element",
"of",
"each",
"slice",
"in",
"pathsFromRoot",
"should",
"contain",
"a",
"leaf",
"block",
"with",
"a",
"child",
"index",
"of",
"-",
"1",
".",
"It",
"s",
"assumed",
"that",
"all",
"slices",
"in",
"pathsFromRoot",
"have",
"the",
"same",
"size",
".",
"This",
"function",
"returns",
"a",
"map",
"pointing",
"from",
"the",
"new",
"block",
"info",
"from",
"any",
"readied",
"block",
"to",
"its",
"corresponding",
"old",
"block",
"pointer",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/block_tree.go#L1081-L1131 |
159,345 | keybase/client | go/kbfs/data/block_tree.go | ready | func (bt *blockTree) ready(
ctx context.Context, id tlf.ID, bcache BlockCache,
dirtyBcache IsDirtyProvider, rp ReadyProvider, bps BlockPutState,
topBlock BlockWithPtrs, makeSync makeSyncFunc) (
map[BlockInfo]BlockPointer, error) {
if !topBlock.IsIndirect() {
return nil, nil
}
// This will contain paths to all dirty leaf paths. The final
// entry index in each path will be the leaf node block itself
// (with a -1 child index).
var dirtyLeafPaths [][]ParentBlockAndChildIndex
// Gather all the paths to all dirty leaf blocks first.
off := topBlock.FirstOffset()
for off != nil {
_, parentBlocks, block, nextBlockOff, _, err :=
bt.getNextDirtyBlockAtOffset(
ctx, topBlock, off, BlockWrite, dirtyBcache)
if err != nil {
return nil, err
}
if block == nil {
// No more dirty blocks.
break
}
off = nextBlockOff // Will be `nil` if there are no more blocks.
// Make sure there's only one copy of each pblock among all
// the paths, so `readyHelper` can update the blocks in place
// along any path, and they will all be updated.
for _, p := range dirtyLeafPaths {
for i := range parentBlocks {
if i == 0 || p[i-1].childBlockPtr() ==
parentBlocks[i-1].childBlockPtr() {
parentBlocks[i].pblock = p[i].pblock
}
}
}
dirtyLeafPaths = append(dirtyLeafPaths,
append(parentBlocks, ParentBlockAndChildIndex{block, -1}))
}
// No dirty blocks means nothing to do.
if len(dirtyLeafPaths) == 0 {
return nil, nil
}
return bt.readyHelper(ctx, id, bcache, rp, bps, dirtyLeafPaths, makeSync)
} | go | func (bt *blockTree) ready(
ctx context.Context, id tlf.ID, bcache BlockCache,
dirtyBcache IsDirtyProvider, rp ReadyProvider, bps BlockPutState,
topBlock BlockWithPtrs, makeSync makeSyncFunc) (
map[BlockInfo]BlockPointer, error) {
if !topBlock.IsIndirect() {
return nil, nil
}
// This will contain paths to all dirty leaf paths. The final
// entry index in each path will be the leaf node block itself
// (with a -1 child index).
var dirtyLeafPaths [][]ParentBlockAndChildIndex
// Gather all the paths to all dirty leaf blocks first.
off := topBlock.FirstOffset()
for off != nil {
_, parentBlocks, block, nextBlockOff, _, err :=
bt.getNextDirtyBlockAtOffset(
ctx, topBlock, off, BlockWrite, dirtyBcache)
if err != nil {
return nil, err
}
if block == nil {
// No more dirty blocks.
break
}
off = nextBlockOff // Will be `nil` if there are no more blocks.
// Make sure there's only one copy of each pblock among all
// the paths, so `readyHelper` can update the blocks in place
// along any path, and they will all be updated.
for _, p := range dirtyLeafPaths {
for i := range parentBlocks {
if i == 0 || p[i-1].childBlockPtr() ==
parentBlocks[i-1].childBlockPtr() {
parentBlocks[i].pblock = p[i].pblock
}
}
}
dirtyLeafPaths = append(dirtyLeafPaths,
append(parentBlocks, ParentBlockAndChildIndex{block, -1}))
}
// No dirty blocks means nothing to do.
if len(dirtyLeafPaths) == 0 {
return nil, nil
}
return bt.readyHelper(ctx, id, bcache, rp, bps, dirtyLeafPaths, makeSync)
} | [
"func",
"(",
"bt",
"*",
"blockTree",
")",
"ready",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"tlf",
".",
"ID",
",",
"bcache",
"BlockCache",
",",
"dirtyBcache",
"IsDirtyProvider",
",",
"rp",
"ReadyProvider",
",",
"bps",
"BlockPutState",
",",
"topBlock",
"BlockWithPtrs",
",",
"makeSync",
"makeSyncFunc",
")",
"(",
"map",
"[",
"BlockInfo",
"]",
"BlockPointer",
",",
"error",
")",
"{",
"if",
"!",
"topBlock",
".",
"IsIndirect",
"(",
")",
"{",
"return",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"// This will contain paths to all dirty leaf paths. The final",
"// entry index in each path will be the leaf node block itself",
"// (with a -1 child index).",
"var",
"dirtyLeafPaths",
"[",
"]",
"[",
"]",
"ParentBlockAndChildIndex",
"\n\n",
"// Gather all the paths to all dirty leaf blocks first.",
"off",
":=",
"topBlock",
".",
"FirstOffset",
"(",
")",
"\n",
"for",
"off",
"!=",
"nil",
"{",
"_",
",",
"parentBlocks",
",",
"block",
",",
"nextBlockOff",
",",
"_",
",",
"err",
":=",
"bt",
".",
"getNextDirtyBlockAtOffset",
"(",
"ctx",
",",
"topBlock",
",",
"off",
",",
"BlockWrite",
",",
"dirtyBcache",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"block",
"==",
"nil",
"{",
"// No more dirty blocks.",
"break",
"\n",
"}",
"\n",
"off",
"=",
"nextBlockOff",
"// Will be `nil` if there are no more blocks.",
"\n\n",
"// Make sure there's only one copy of each pblock among all",
"// the paths, so `readyHelper` can update the blocks in place",
"// along any path, and they will all be updated.",
"for",
"_",
",",
"p",
":=",
"range",
"dirtyLeafPaths",
"{",
"for",
"i",
":=",
"range",
"parentBlocks",
"{",
"if",
"i",
"==",
"0",
"||",
"p",
"[",
"i",
"-",
"1",
"]",
".",
"childBlockPtr",
"(",
")",
"==",
"parentBlocks",
"[",
"i",
"-",
"1",
"]",
".",
"childBlockPtr",
"(",
")",
"{",
"parentBlocks",
"[",
"i",
"]",
".",
"pblock",
"=",
"p",
"[",
"i",
"]",
".",
"pblock",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"dirtyLeafPaths",
"=",
"append",
"(",
"dirtyLeafPaths",
",",
"append",
"(",
"parentBlocks",
",",
"ParentBlockAndChildIndex",
"{",
"block",
",",
"-",
"1",
"}",
")",
")",
"\n",
"}",
"\n\n",
"// No dirty blocks means nothing to do.",
"if",
"len",
"(",
"dirtyLeafPaths",
")",
"==",
"0",
"{",
"return",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"return",
"bt",
".",
"readyHelper",
"(",
"ctx",
",",
"id",
",",
"bcache",
",",
"rp",
",",
"bps",
",",
"dirtyLeafPaths",
",",
"makeSync",
")",
"\n",
"}"
] | // ready, if given an indirect top-block, readies all the dirty child
// blocks, and updates their block IDs in their parent block's list of
// indirect pointers. It returns a map pointing from the new block
// info from any readied block to its corresponding old block pointer. | [
"ready",
"if",
"given",
"an",
"indirect",
"top",
"-",
"block",
"readies",
"all",
"the",
"dirty",
"child",
"blocks",
"and",
"updates",
"their",
"block",
"IDs",
"in",
"their",
"parent",
"block",
"s",
"list",
"of",
"indirect",
"pointers",
".",
"It",
"returns",
"a",
"map",
"pointing",
"from",
"the",
"new",
"block",
"info",
"from",
"any",
"readied",
"block",
"to",
"its",
"corresponding",
"old",
"block",
"pointer",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/block_tree.go#L1137-L1189 |
159,346 | keybase/client | go/kbfs/libkbfs/rekey_fsm.go | NewRekeyRequestWithPaperPromptEvent | func NewRekeyRequestWithPaperPromptEvent() RekeyEvent {
e := NewRekeyRequestEvent()
d := rekeyWithPromptWaitTimeDefault
e.request.promptPaper = true
e.request.timeout = &d
return e
} | go | func NewRekeyRequestWithPaperPromptEvent() RekeyEvent {
e := NewRekeyRequestEvent()
d := rekeyWithPromptWaitTimeDefault
e.request.promptPaper = true
e.request.timeout = &d
return e
} | [
"func",
"NewRekeyRequestWithPaperPromptEvent",
"(",
")",
"RekeyEvent",
"{",
"e",
":=",
"NewRekeyRequestEvent",
"(",
")",
"\n",
"d",
":=",
"rekeyWithPromptWaitTimeDefault",
"\n",
"e",
".",
"request",
".",
"promptPaper",
"=",
"true",
"\n",
"e",
".",
"request",
".",
"timeout",
"=",
"&",
"d",
"\n",
"return",
"e",
"\n",
"}"
] | // NewRekeyRequestWithPaperPromptEvent creates a non-delayed rekey request
// Event that causes a paper prompt. | [
"NewRekeyRequestWithPaperPromptEvent",
"creates",
"a",
"non",
"-",
"delayed",
"rekey",
"request",
"Event",
"that",
"causes",
"a",
"paper",
"prompt",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_fsm.go#L158-L164 |
159,347 | keybase/client | go/kbfs/libkbfs/rekey_fsm.go | NewRekeyFSM | func NewRekeyFSM(fbo *folderBranchOps) RekeyFSM {
fsm := &rekeyFSM{
reqs: make(chan RekeyEvent, fbo.config.Mode().RekeyQueueSize()),
shutdownCh: make(chan struct{}),
fbo: fbo,
log: fbo.config.MakeLogger("RekeyFSM"),
listeners: make(map[rekeyEventType][]rekeyFSMListener),
}
fsm.current = newRekeyStateIdle(fsm)
if fbo.bType == standard {
go fsm.loop()
}
return fsm
} | go | func NewRekeyFSM(fbo *folderBranchOps) RekeyFSM {
fsm := &rekeyFSM{
reqs: make(chan RekeyEvent, fbo.config.Mode().RekeyQueueSize()),
shutdownCh: make(chan struct{}),
fbo: fbo,
log: fbo.config.MakeLogger("RekeyFSM"),
listeners: make(map[rekeyEventType][]rekeyFSMListener),
}
fsm.current = newRekeyStateIdle(fsm)
if fbo.bType == standard {
go fsm.loop()
}
return fsm
} | [
"func",
"NewRekeyFSM",
"(",
"fbo",
"*",
"folderBranchOps",
")",
"RekeyFSM",
"{",
"fsm",
":=",
"&",
"rekeyFSM",
"{",
"reqs",
":",
"make",
"(",
"chan",
"RekeyEvent",
",",
"fbo",
".",
"config",
".",
"Mode",
"(",
")",
".",
"RekeyQueueSize",
"(",
")",
")",
",",
"shutdownCh",
":",
"make",
"(",
"chan",
"struct",
"{",
"}",
")",
",",
"fbo",
":",
"fbo",
",",
"log",
":",
"fbo",
".",
"config",
".",
"MakeLogger",
"(",
"\"",
"\"",
")",
",",
"listeners",
":",
"make",
"(",
"map",
"[",
"rekeyEventType",
"]",
"[",
"]",
"rekeyFSMListener",
")",
",",
"}",
"\n",
"fsm",
".",
"current",
"=",
"newRekeyStateIdle",
"(",
"fsm",
")",
"\n",
"if",
"fbo",
".",
"bType",
"==",
"standard",
"{",
"go",
"fsm",
".",
"loop",
"(",
")",
"\n",
"}",
"\n",
"return",
"fsm",
"\n",
"}"
] | // NewRekeyFSM creates a new rekey FSM. | [
"NewRekeyFSM",
"creates",
"a",
"new",
"rekey",
"FSM",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_fsm.go#L457-L471 |
159,348 | keybase/client | go/kbfs/libkbfs/rekey_fsm.go | Event | func (m *rekeyFSM) Event(event RekeyEvent) {
select {
case m.reqs <- event:
case <-m.shutdownCh:
}
} | go | func (m *rekeyFSM) Event(event RekeyEvent) {
select {
case m.reqs <- event:
case <-m.shutdownCh:
}
} | [
"func",
"(",
"m",
"*",
"rekeyFSM",
")",
"Event",
"(",
"event",
"RekeyEvent",
")",
"{",
"select",
"{",
"case",
"m",
".",
"reqs",
"<-",
"event",
":",
"case",
"<-",
"m",
".",
"shutdownCh",
":",
"}",
"\n",
"}"
] | // Event implements RekeyFSM interface for rekeyFSM. | [
"Event",
"implements",
"RekeyFSM",
"interface",
"for",
"rekeyFSM",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_fsm.go#L502-L507 |
159,349 | keybase/client | go/kbfs/libkbfs/rekey_fsm.go | listenOnEvent | func (m *rekeyFSM) listenOnEvent(
event rekeyEventType, callback func(RekeyEvent), repeatedly bool) {
m.muListeners.Lock()
defer m.muListeners.Unlock()
m.listeners[event] = append(m.listeners[event], rekeyFSMListener{
onEvent: callback,
repeatedly: repeatedly,
})
} | go | func (m *rekeyFSM) listenOnEvent(
event rekeyEventType, callback func(RekeyEvent), repeatedly bool) {
m.muListeners.Lock()
defer m.muListeners.Unlock()
m.listeners[event] = append(m.listeners[event], rekeyFSMListener{
onEvent: callback,
repeatedly: repeatedly,
})
} | [
"func",
"(",
"m",
"*",
"rekeyFSM",
")",
"listenOnEvent",
"(",
"event",
"rekeyEventType",
",",
"callback",
"func",
"(",
"RekeyEvent",
")",
",",
"repeatedly",
"bool",
")",
"{",
"m",
".",
"muListeners",
".",
"Lock",
"(",
")",
"\n",
"defer",
"m",
".",
"muListeners",
".",
"Unlock",
"(",
")",
"\n",
"m",
".",
"listeners",
"[",
"event",
"]",
"=",
"append",
"(",
"m",
".",
"listeners",
"[",
"event",
"]",
",",
"rekeyFSMListener",
"{",
"onEvent",
":",
"callback",
",",
"repeatedly",
":",
"repeatedly",
",",
"}",
")",
"\n",
"}"
] | // listenOnEvent implements RekeyFSM interface for rekeyFSM. | [
"listenOnEvent",
"implements",
"RekeyFSM",
"interface",
"for",
"rekeyFSM",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_fsm.go#L534-L542 |
159,350 | keybase/client | go/kbfs/libkbfs/rekey_fsm.go | RequestRekeyAndWaitForOneFinishEvent | func RequestRekeyAndWaitForOneFinishEvent(ctx context.Context,
ops KBFSOps, tlfID tlf.ID) (res RekeyResult, err error) {
fsm := getRekeyFSM(ctx, ops, tlfID)
rekeyWaiter := make(chan struct{})
fsm.listenOnEvent(rekeyFinishedEvent, func(e RekeyEvent) {
res = e.finished.RekeyResult
err = e.finished.err
close(rekeyWaiter)
}, false)
fsm.Event(newRekeyRequestEventWithContext(ctx))
<-rekeyWaiter
return res, err
} | go | func RequestRekeyAndWaitForOneFinishEvent(ctx context.Context,
ops KBFSOps, tlfID tlf.ID) (res RekeyResult, err error) {
fsm := getRekeyFSM(ctx, ops, tlfID)
rekeyWaiter := make(chan struct{})
fsm.listenOnEvent(rekeyFinishedEvent, func(e RekeyEvent) {
res = e.finished.RekeyResult
err = e.finished.err
close(rekeyWaiter)
}, false)
fsm.Event(newRekeyRequestEventWithContext(ctx))
<-rekeyWaiter
return res, err
} | [
"func",
"RequestRekeyAndWaitForOneFinishEvent",
"(",
"ctx",
"context",
".",
"Context",
",",
"ops",
"KBFSOps",
",",
"tlfID",
"tlf",
".",
"ID",
")",
"(",
"res",
"RekeyResult",
",",
"err",
"error",
")",
"{",
"fsm",
":=",
"getRekeyFSM",
"(",
"ctx",
",",
"ops",
",",
"tlfID",
")",
"\n",
"rekeyWaiter",
":=",
"make",
"(",
"chan",
"struct",
"{",
"}",
")",
"\n",
"fsm",
".",
"listenOnEvent",
"(",
"rekeyFinishedEvent",
",",
"func",
"(",
"e",
"RekeyEvent",
")",
"{",
"res",
"=",
"e",
".",
"finished",
".",
"RekeyResult",
"\n",
"err",
"=",
"e",
".",
"finished",
".",
"err",
"\n",
"close",
"(",
"rekeyWaiter",
")",
"\n",
"}",
",",
"false",
")",
"\n",
"fsm",
".",
"Event",
"(",
"newRekeyRequestEventWithContext",
"(",
"ctx",
")",
")",
"\n",
"<-",
"rekeyWaiter",
"\n",
"return",
"res",
",",
"err",
"\n",
"}"
] | // RequestRekeyAndWaitForOneFinishEvent sends a rekey request to the FSM
// associated with tlfID, and wait for exact one rekeyFinished event. This can
// be useful for waiting for a rekey result in tests.
//
// Note that the supplied ctx is injected to the rekey task, so canceling ctx
// would actually cancel the rekey.
//
// Currently this is only used in tests and RekeyFile. Normal rekey activities
// should go through the FSM asychronously. | [
"RequestRekeyAndWaitForOneFinishEvent",
"sends",
"a",
"rekey",
"request",
"to",
"the",
"FSM",
"associated",
"with",
"tlfID",
"and",
"wait",
"for",
"exact",
"one",
"rekeyFinished",
"event",
".",
"This",
"can",
"be",
"useful",
"for",
"waiting",
"for",
"a",
"rekey",
"result",
"in",
"tests",
".",
"Note",
"that",
"the",
"supplied",
"ctx",
"is",
"injected",
"to",
"the",
"rekey",
"task",
"so",
"canceling",
"ctx",
"would",
"actually",
"cancel",
"the",
"rekey",
".",
"Currently",
"this",
"is",
"only",
"used",
"in",
"tests",
"and",
"RekeyFile",
".",
"Normal",
"rekey",
"activities",
"should",
"go",
"through",
"the",
"FSM",
"asychronously",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_fsm.go#L566-L578 |
159,351 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerError) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerError
s.Name = "SERVER_ERROR"
s.Desc = e.Error()
return
} | go | func (e ServerError) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerError
s.Name = "SERVER_ERROR"
s.Desc = e.Error()
return
} | [
"func",
"(",
"e",
"ServerError",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerError",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Error",
"(",
")",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for ServerError. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerError",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L83-L88 |
159,352 | keybase/client | go/kbfs/kbfsmd/server_errors.go | Error | func (e ServerError) Error() string {
if e.Err != nil {
return e.Err.Error()
}
return "ServerError"
} | go | func (e ServerError) Error() string {
if e.Err != nil {
return e.Err.Error()
}
return "ServerError"
} | [
"func",
"(",
"e",
"ServerError",
")",
"Error",
"(",
")",
"string",
"{",
"if",
"e",
".",
"Err",
"!=",
"nil",
"{",
"return",
"e",
".",
"Err",
".",
"Error",
"(",
")",
"\n",
"}",
"\n",
"return",
"\"",
"\"",
"\n",
"}"
] | // Error implements the Error interface for ServerError. | [
"Error",
"implements",
"the",
"Error",
"interface",
"for",
"ServerError",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L91-L96 |
159,353 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerErrorBadRequest) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorBadRequest
s.Name = "BAD_REQUEST"
s.Desc = e.Reason
return
} | go | func (e ServerErrorBadRequest) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorBadRequest
s.Name = "BAD_REQUEST"
s.Desc = e.Reason
return
} | [
"func",
"(",
"e",
"ServerErrorBadRequest",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerErrorBadRequest",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Reason",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for ServerErrorBadRequest. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerErrorBadRequest",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L104-L109 |
159,354 | keybase/client | go/kbfs/kbfsmd/server_errors.go | Error | func (e ServerErrorConflictRevision) Error() string {
if e.Desc == "" {
return fmt.Sprintf("Conflict: expected revision %d, actual %d", e.Expected, e.Actual)
}
return "MDServerConflictRevision{" + e.Desc + "}"
} | go | func (e ServerErrorConflictRevision) Error() string {
if e.Desc == "" {
return fmt.Sprintf("Conflict: expected revision %d, actual %d", e.Expected, e.Actual)
}
return "MDServerConflictRevision{" + e.Desc + "}"
} | [
"func",
"(",
"e",
"ServerErrorConflictRevision",
")",
"Error",
"(",
")",
"string",
"{",
"if",
"e",
".",
"Desc",
"==",
"\"",
"\"",
"{",
"return",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"e",
".",
"Expected",
",",
"e",
".",
"Actual",
")",
"\n",
"}",
"\n",
"return",
"\"",
"\"",
"+",
"e",
".",
"Desc",
"+",
"\"",
"\"",
"\n",
"}"
] | // Error implements the Error interface for ServerErrorConflictRevision. | [
"Error",
"implements",
"the",
"Error",
"interface",
"for",
"ServerErrorConflictRevision",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L124-L129 |
159,355 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerErrorConflictRevision) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorConflictRevision
s.Name = "CONFLICT_REVISION"
s.Desc = e.Error()
return
} | go | func (e ServerErrorConflictRevision) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorConflictRevision
s.Name = "CONFLICT_REVISION"
s.Desc = e.Error()
return
} | [
"func",
"(",
"e",
"ServerErrorConflictRevision",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerErrorConflictRevision",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Error",
"(",
")",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for ServerErrorConflictRevision. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerErrorConflictRevision",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L132-L137 |
159,356 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerErrorConflictPrevRoot) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorConflictPrevRoot
s.Name = "CONFLICT_PREV_ROOT"
s.Desc = e.Error()
return
} | go | func (e ServerErrorConflictPrevRoot) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorConflictPrevRoot
s.Name = "CONFLICT_PREV_ROOT"
s.Desc = e.Error()
return
} | [
"func",
"(",
"e",
"ServerErrorConflictPrevRoot",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerErrorConflictPrevRoot",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Error",
"(",
")",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for ServerErrorConflictPrevRoot. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerErrorConflictPrevRoot",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L155-L160 |
159,357 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerErrorConflictDiskUsage) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorConflictDiskUsage
s.Name = "CONFLICT_DISK_USAGE"
s.Desc = e.Error()
return
} | go | func (e ServerErrorConflictDiskUsage) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorConflictDiskUsage
s.Name = "CONFLICT_DISK_USAGE"
s.Desc = e.Error()
return
} | [
"func",
"(",
"e",
"ServerErrorConflictDiskUsage",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerErrorConflictDiskUsage",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Error",
"(",
")",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for ServerErrorConflictDiskUsage. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerErrorConflictDiskUsage",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L170-L175 |
159,358 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerErrorLocked) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorLocked
s.Name = "LOCKED"
s.Desc = e.Error()
return
} | go | func (e ServerErrorLocked) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorLocked
s.Name = "LOCKED"
s.Desc = e.Error()
return
} | [
"func",
"(",
"e",
"ServerErrorLocked",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerErrorLocked",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Error",
"(",
")",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for ServerErrorLocked. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerErrorLocked",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L195-L200 |
159,359 | keybase/client | go/kbfs/kbfsmd/server_errors.go | Error | func (e ServerErrorUnauthorized) Error() string {
msg := "MDServer Unauthorized"
if e.Err != nil {
msg += ": " + e.Err.Error()
}
return msg
} | go | func (e ServerErrorUnauthorized) Error() string {
msg := "MDServer Unauthorized"
if e.Err != nil {
msg += ": " + e.Err.Error()
}
return msg
} | [
"func",
"(",
"e",
"ServerErrorUnauthorized",
")",
"Error",
"(",
")",
"string",
"{",
"msg",
":=",
"\"",
"\"",
"\n",
"if",
"e",
".",
"Err",
"!=",
"nil",
"{",
"msg",
"+=",
"\"",
"\"",
"+",
"e",
".",
"Err",
".",
"Error",
"(",
")",
"\n",
"}",
"\n",
"return",
"msg",
"\n",
"}"
] | // Error implements the Error interface for ServerErrorUnauthorized. | [
"Error",
"implements",
"the",
"Error",
"interface",
"for",
"ServerErrorUnauthorized",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L208-L214 |
159,360 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerErrorWriteAccess) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorWriteAccess
s.Name = "WRITE_ACCESS"
s.Desc = e.Error()
return
} | go | func (e ServerErrorWriteAccess) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorWriteAccess
s.Name = "WRITE_ACCESS"
s.Desc = e.Error()
return
} | [
"func",
"(",
"e",
"ServerErrorWriteAccess",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerErrorWriteAccess",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Error",
"(",
")",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for ServerErrorWriteAccess. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerErrorWriteAccess",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L234-L239 |
159,361 | keybase/client | go/kbfs/kbfsmd/server_errors.go | Error | func (e ServerErrorThrottle) Error() string {
if e.SuggestedRetryIn == nil {
return fmt.Sprintf("ServerErrorThrottle{%s}", e.Err.Error())
}
return fmt.Sprintf("ServerErrorThrottle[%s]{%s}", *e.SuggestedRetryIn, e.Err.Error())
} | go | func (e ServerErrorThrottle) Error() string {
if e.SuggestedRetryIn == nil {
return fmt.Sprintf("ServerErrorThrottle{%s}", e.Err.Error())
}
return fmt.Sprintf("ServerErrorThrottle[%s]{%s}", *e.SuggestedRetryIn, e.Err.Error())
} | [
"func",
"(",
"e",
"ServerErrorThrottle",
")",
"Error",
"(",
")",
"string",
"{",
"if",
"e",
".",
"SuggestedRetryIn",
"==",
"nil",
"{",
"return",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"e",
".",
"Err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"return",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"*",
"e",
".",
"SuggestedRetryIn",
",",
"e",
".",
"Err",
".",
"Error",
"(",
")",
")",
"\n",
"}"
] | // Error implements the Error interface for ServerErrorThrottle. | [
"Error",
"implements",
"the",
"Error",
"interface",
"for",
"ServerErrorThrottle",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L248-L253 |
159,362 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerErrorConditionFailed) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorConditionFailed
s.Name = "CONDITION_FAILED"
s.Desc = e.Err.Error()
s.Fields = []keybase1.StringKVPair{
keybase1.StringKVPair{
Key: "ShouldThrottle",
Value: strconv.FormatBool(e.ShouldThrottle),
},
}
return
} | go | func (e ServerErrorConditionFailed) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorConditionFailed
s.Name = "CONDITION_FAILED"
s.Desc = e.Err.Error()
s.Fields = []keybase1.StringKVPair{
keybase1.StringKVPair{
Key: "ShouldThrottle",
Value: strconv.FormatBool(e.ShouldThrottle),
},
}
return
} | [
"func",
"(",
"e",
"ServerErrorConditionFailed",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerErrorConditionFailed",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Err",
".",
"Error",
"(",
")",
"\n",
"s",
".",
"Fields",
"=",
"[",
"]",
"keybase1",
".",
"StringKVPair",
"{",
"keybase1",
".",
"StringKVPair",
"{",
"Key",
":",
"\"",
"\"",
",",
"Value",
":",
"strconv",
".",
"FormatBool",
"(",
"e",
".",
"ShouldThrottle",
")",
",",
"}",
",",
"}",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for ServerErrorConditionFailed. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerErrorConditionFailed",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L282-L293 |
159,363 | keybase/client | go/kbfs/kbfsmd/server_errors.go | Error | func (e ServerErrorTooManyFoldersCreated) Error() string {
return fmt.Sprintf("Too many folders created. Created: %d, limit: %d",
e.Created, e.Limit)
} | go | func (e ServerErrorTooManyFoldersCreated) Error() string {
return fmt.Sprintf("Too many folders created. Created: %d, limit: %d",
e.Created, e.Limit)
} | [
"func",
"(",
"e",
"ServerErrorTooManyFoldersCreated",
")",
"Error",
"(",
")",
"string",
"{",
"return",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"e",
".",
"Created",
",",
"e",
".",
"Limit",
")",
"\n",
"}"
] | // Error implements the Error interface for ServerErrorTooManyFoldersCreated. | [
"Error",
"implements",
"the",
"Error",
"interface",
"for",
"ServerErrorTooManyFoldersCreated",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L328-L331 |
159,364 | keybase/client | go/kbfs/kbfsmd/server_errors.go | ToStatus | func (e ServerErrorCannotReadFinalizedTLF) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorCannotReadFinalizedTLF
s.Name = "CANNOT_READ_FINALIZED_TLF"
s.Desc = e.Error()
return
} | go | func (e ServerErrorCannotReadFinalizedTLF) ToStatus() (s keybase1.Status) {
s.Code = StatusCodeServerErrorCannotReadFinalizedTLF
s.Name = "CANNOT_READ_FINALIZED_TLF"
s.Desc = e.Error()
return
} | [
"func",
"(",
"e",
"ServerErrorCannotReadFinalizedTLF",
")",
"ToStatus",
"(",
")",
"(",
"s",
"keybase1",
".",
"Status",
")",
"{",
"s",
".",
"Code",
"=",
"StatusCodeServerErrorCannotReadFinalizedTLF",
"\n",
"s",
".",
"Name",
"=",
"\"",
"\"",
"\n",
"s",
".",
"Desc",
"=",
"e",
".",
"Error",
"(",
")",
"\n",
"return",
"\n",
"}"
] | // ToStatus implements the ExportableError interface for
// ServerErrorCannotReadFinalizedTLF. | [
"ToStatus",
"implements",
"the",
"ExportableError",
"interface",
"for",
"ServerErrorCannotReadFinalizedTLF",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/server_errors.go#L357-L362 |
159,365 | keybase/client | go/engine/scankeys.go | NewScanKeys | func NewScanKeys(m libkb.MetaContext) (sk *ScanKeys, err error) {
sk = &ScanKeys{
keyOwners: make(map[uint64]*libkb.User),
MetaContextified: libkb.NewMetaContextified(m),
}
defer m.Trace("NewScanKeys", func() error { return err })()
var loggedIn bool
loggedIn, err = isLoggedInWithError(m)
if err != nil {
return nil, err
}
if !loggedIn {
return sk, nil
}
sk.me, err = libkb.LoadMe(libkb.NewLoadUserArgWithMetaContext(m))
if err != nil {
return nil, fmt.Errorf("loadme error: %s", err)
}
// if user provided, then load their local keys, and their synced secret key:
synced, err := sk.me.GetSyncedSecretKey(m)
if err != nil {
return nil, fmt.Errorf("getsyncedsecret err: %s", err)
}
ring, err := m.ActiveDevice().Keyring(m)
if err != nil {
return nil, err
}
err = sk.coalesceBlocks(m, ring, synced)
if err != nil {
return nil, err
}
return sk, nil
} | go | func NewScanKeys(m libkb.MetaContext) (sk *ScanKeys, err error) {
sk = &ScanKeys{
keyOwners: make(map[uint64]*libkb.User),
MetaContextified: libkb.NewMetaContextified(m),
}
defer m.Trace("NewScanKeys", func() error { return err })()
var loggedIn bool
loggedIn, err = isLoggedInWithError(m)
if err != nil {
return nil, err
}
if !loggedIn {
return sk, nil
}
sk.me, err = libkb.LoadMe(libkb.NewLoadUserArgWithMetaContext(m))
if err != nil {
return nil, fmt.Errorf("loadme error: %s", err)
}
// if user provided, then load their local keys, and their synced secret key:
synced, err := sk.me.GetSyncedSecretKey(m)
if err != nil {
return nil, fmt.Errorf("getsyncedsecret err: %s", err)
}
ring, err := m.ActiveDevice().Keyring(m)
if err != nil {
return nil, err
}
err = sk.coalesceBlocks(m, ring, synced)
if err != nil {
return nil, err
}
return sk, nil
} | [
"func",
"NewScanKeys",
"(",
"m",
"libkb",
".",
"MetaContext",
")",
"(",
"sk",
"*",
"ScanKeys",
",",
"err",
"error",
")",
"{",
"sk",
"=",
"&",
"ScanKeys",
"{",
"keyOwners",
":",
"make",
"(",
"map",
"[",
"uint64",
"]",
"*",
"libkb",
".",
"User",
")",
",",
"MetaContextified",
":",
"libkb",
".",
"NewMetaContextified",
"(",
"m",
")",
",",
"}",
"\n\n",
"defer",
"m",
".",
"Trace",
"(",
"\"",
"\"",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
")",
"(",
")",
"\n\n",
"var",
"loggedIn",
"bool",
"\n",
"loggedIn",
",",
"err",
"=",
"isLoggedInWithError",
"(",
"m",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"!",
"loggedIn",
"{",
"return",
"sk",
",",
"nil",
"\n",
"}",
"\n\n",
"sk",
".",
"me",
",",
"err",
"=",
"libkb",
".",
"LoadMe",
"(",
"libkb",
".",
"NewLoadUserArgWithMetaContext",
"(",
"m",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"// if user provided, then load their local keys, and their synced secret key:",
"synced",
",",
"err",
":=",
"sk",
".",
"me",
".",
"GetSyncedSecretKey",
"(",
"m",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"ring",
",",
"err",
":=",
"m",
".",
"ActiveDevice",
"(",
")",
".",
"Keyring",
"(",
"m",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"err",
"=",
"sk",
".",
"coalesceBlocks",
"(",
"m",
",",
"ring",
",",
"synced",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"sk",
",",
"nil",
"\n",
"}"
] | // NewScanKeys creates a ScanKeys type. If there is a login
// session, it will load the pgp keys for that user. | [
"NewScanKeys",
"creates",
"a",
"ScanKeys",
"type",
".",
"If",
"there",
"is",
"a",
"login",
"session",
"it",
"will",
"load",
"the",
"pgp",
"keys",
"for",
"that",
"user",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/scankeys.go#L40-L77 |
159,366 | keybase/client | go/engine/scankeys.go | KeysById | func (s *ScanKeys) KeysById(id uint64, fp []byte) []openpgp.Key {
m := s.M()
primaries := s.unlockByID(m, id)
memres := primaries.KeysById(id, fp)
m.Debug("ScanKeys:KeysById(%016x) => %d keys match in memory", id, len(memres))
if len(memres) > 0 {
m.Debug("ScanKeys:KeysById(%016x) => owner == me (%s)", id, s.me.GetName())
s.Lock()
s.keyOwners[id] = s.me
s.Unlock()
return memres
}
// KeysById is only used for decryption, so getting public keys from
// API server via s.scan(id) is pointless, so just returning nil.
return nil
} | go | func (s *ScanKeys) KeysById(id uint64, fp []byte) []openpgp.Key {
m := s.M()
primaries := s.unlockByID(m, id)
memres := primaries.KeysById(id, fp)
m.Debug("ScanKeys:KeysById(%016x) => %d keys match in memory", id, len(memres))
if len(memres) > 0 {
m.Debug("ScanKeys:KeysById(%016x) => owner == me (%s)", id, s.me.GetName())
s.Lock()
s.keyOwners[id] = s.me
s.Unlock()
return memres
}
// KeysById is only used for decryption, so getting public keys from
// API server via s.scan(id) is pointless, so just returning nil.
return nil
} | [
"func",
"(",
"s",
"*",
"ScanKeys",
")",
"KeysById",
"(",
"id",
"uint64",
",",
"fp",
"[",
"]",
"byte",
")",
"[",
"]",
"openpgp",
".",
"Key",
"{",
"m",
":=",
"s",
".",
"M",
"(",
")",
"\n",
"primaries",
":=",
"s",
".",
"unlockByID",
"(",
"m",
",",
"id",
")",
"\n",
"memres",
":=",
"primaries",
".",
"KeysById",
"(",
"id",
",",
"fp",
")",
"\n",
"m",
".",
"Debug",
"(",
"\"",
"\"",
",",
"id",
",",
"len",
"(",
"memres",
")",
")",
"\n",
"if",
"len",
"(",
"memres",
")",
">",
"0",
"{",
"m",
".",
"Debug",
"(",
"\"",
"\"",
",",
"id",
",",
"s",
".",
"me",
".",
"GetName",
"(",
")",
")",
"\n",
"s",
".",
"Lock",
"(",
")",
"\n",
"s",
".",
"keyOwners",
"[",
"id",
"]",
"=",
"s",
".",
"me",
"\n",
"s",
".",
"Unlock",
"(",
")",
"\n",
"return",
"memres",
"\n",
"}",
"\n\n",
"// KeysById is only used for decryption, so getting public keys from",
"// API server via s.scan(id) is pointless, so just returning nil.",
"return",
"nil",
"\n",
"}"
] | // KeysById returns the set of keys that have the given key id.
// It is only called during decryption by openpgp. | [
"KeysById",
"returns",
"the",
"set",
"of",
"keys",
"that",
"have",
"the",
"given",
"key",
"id",
".",
"It",
"is",
"only",
"called",
"during",
"decryption",
"by",
"openpgp",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/scankeys.go#L100-L116 |
159,367 | keybase/client | go/engine/scankeys.go | DecryptionKeys | func (s *ScanKeys) DecryptionKeys() []openpgp.Key {
m := s.M()
m.Debug("ScanKeys#DecryptionKeys() => %d keys available", s.Count())
all := s.unlockAll(m)
return all.DecryptionKeys()
} | go | func (s *ScanKeys) DecryptionKeys() []openpgp.Key {
m := s.M()
m.Debug("ScanKeys#DecryptionKeys() => %d keys available", s.Count())
all := s.unlockAll(m)
return all.DecryptionKeys()
} | [
"func",
"(",
"s",
"*",
"ScanKeys",
")",
"DecryptionKeys",
"(",
")",
"[",
"]",
"openpgp",
".",
"Key",
"{",
"m",
":=",
"s",
".",
"M",
"(",
")",
"\n",
"m",
".",
"Debug",
"(",
"\"",
"\"",
",",
"s",
".",
"Count",
"(",
")",
")",
"\n",
"all",
":=",
"s",
".",
"unlockAll",
"(",
"m",
")",
"\n",
"return",
"all",
".",
"DecryptionKeys",
"(",
")",
"\n",
"}"
] | // DecryptionKeys returns all private keys that are valid for
// decryption. It is only used if there is no key id in the
// message. | [
"DecryptionKeys",
"returns",
"all",
"private",
"keys",
"that",
"are",
"valid",
"for",
"decryption",
".",
"It",
"is",
"only",
"used",
"if",
"there",
"is",
"no",
"key",
"id",
"in",
"the",
"message",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/scankeys.go#L162-L167 |
159,368 | keybase/client | go/engine/scankeys.go | KeyOwner | func (s *ScanKeys) KeyOwner(keyID uint64) *libkb.User {
s.Lock()
defer s.Unlock()
return s.keyOwners[keyID]
} | go | func (s *ScanKeys) KeyOwner(keyID uint64) *libkb.User {
s.Lock()
defer s.Unlock()
return s.keyOwners[keyID]
} | [
"func",
"(",
"s",
"*",
"ScanKeys",
")",
"KeyOwner",
"(",
"keyID",
"uint64",
")",
"*",
"libkb",
".",
"User",
"{",
"s",
".",
"Lock",
"(",
")",
"\n",
"defer",
"s",
".",
"Unlock",
"(",
")",
"\n\n",
"return",
"s",
".",
"keyOwners",
"[",
"keyID",
"]",
"\n",
"}"
] | // KeyOwner returns the owner of the keys found by ScanKeys that were
// used in KeysById or KeysByIdUsage, indexed by keyID. | [
"KeyOwner",
"returns",
"the",
"owner",
"of",
"the",
"keys",
"found",
"by",
"ScanKeys",
"that",
"were",
"used",
"in",
"KeysById",
"or",
"KeysByIdUsage",
"indexed",
"by",
"keyID",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/scankeys.go#L171-L176 |
159,369 | keybase/client | go/engine/scankeys.go | coalesceBlocks | func (s *ScanKeys) coalesceBlocks(m libkb.MetaContext, ring *libkb.SKBKeyringFile, synced *libkb.SKB) (err error) {
defer m.Trace("ScanKeys#coalesceBlocks", func() error { return err })()
// We want keys in this order: first local keyring keys that are LKSec, and
// then server synced keys that are triplesec. In ScanKeys.KeysById, this
// allows us to prompt for passphrase once and get both passphrase stream
// cache and triplesec cache the moment first LKSec key is processed by
// SKB.UnlockSecretKey.
// If they were in different order and we got triplesec bundle first, we
// would prompt for passphrase to get triplesec stream, and then prompt
// again to get passphrase stream to unlock LKSec bundle, prompting twice
// in total (assuming someone has both a server-synced bundle and local
// one).
for _, b := range ring.Blocks {
if !libkb.IsPGPAlgo(b.Type) {
continue
}
// make sure uid set on each block:
b.SetUID(s.me.GetUID())
s.skbs = append(s.skbs, b)
}
if synced != nil {
s.skbs = append(s.skbs, synced)
}
return nil
} | go | func (s *ScanKeys) coalesceBlocks(m libkb.MetaContext, ring *libkb.SKBKeyringFile, synced *libkb.SKB) (err error) {
defer m.Trace("ScanKeys#coalesceBlocks", func() error { return err })()
// We want keys in this order: first local keyring keys that are LKSec, and
// then server synced keys that are triplesec. In ScanKeys.KeysById, this
// allows us to prompt for passphrase once and get both passphrase stream
// cache and triplesec cache the moment first LKSec key is processed by
// SKB.UnlockSecretKey.
// If they were in different order and we got triplesec bundle first, we
// would prompt for passphrase to get triplesec stream, and then prompt
// again to get passphrase stream to unlock LKSec bundle, prompting twice
// in total (assuming someone has both a server-synced bundle and local
// one).
for _, b := range ring.Blocks {
if !libkb.IsPGPAlgo(b.Type) {
continue
}
// make sure uid set on each block:
b.SetUID(s.me.GetUID())
s.skbs = append(s.skbs, b)
}
if synced != nil {
s.skbs = append(s.skbs, synced)
}
return nil
} | [
"func",
"(",
"s",
"*",
"ScanKeys",
")",
"coalesceBlocks",
"(",
"m",
"libkb",
".",
"MetaContext",
",",
"ring",
"*",
"libkb",
".",
"SKBKeyringFile",
",",
"synced",
"*",
"libkb",
".",
"SKB",
")",
"(",
"err",
"error",
")",
"{",
"defer",
"m",
".",
"Trace",
"(",
"\"",
"\"",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
")",
"(",
")",
"\n\n",
"// We want keys in this order: first local keyring keys that are LKSec, and",
"// then server synced keys that are triplesec. In ScanKeys.KeysById, this",
"// allows us to prompt for passphrase once and get both passphrase stream",
"// cache and triplesec cache the moment first LKSec key is processed by",
"// SKB.UnlockSecretKey.",
"// If they were in different order and we got triplesec bundle first, we",
"// would prompt for passphrase to get triplesec stream, and then prompt",
"// again to get passphrase stream to unlock LKSec bundle, prompting twice",
"// in total (assuming someone has both a server-synced bundle and local",
"// one).",
"for",
"_",
",",
"b",
":=",
"range",
"ring",
".",
"Blocks",
"{",
"if",
"!",
"libkb",
".",
"IsPGPAlgo",
"(",
"b",
".",
"Type",
")",
"{",
"continue",
"\n",
"}",
"\n",
"// make sure uid set on each block:",
"b",
".",
"SetUID",
"(",
"s",
".",
"me",
".",
"GetUID",
"(",
")",
")",
"\n",
"s",
".",
"skbs",
"=",
"append",
"(",
"s",
".",
"skbs",
",",
"b",
")",
"\n",
"}",
"\n\n",
"if",
"synced",
"!=",
"nil",
"{",
"s",
".",
"skbs",
"=",
"append",
"(",
"s",
".",
"skbs",
",",
"synced",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // coalesceBlocks puts the synced pgp key block and all the pgp key
// blocks in ring into s.skbs. | [
"coalesceBlocks",
"puts",
"the",
"synced",
"pgp",
"key",
"block",
"and",
"all",
"the",
"pgp",
"key",
"blocks",
"in",
"ring",
"into",
"s",
".",
"skbs",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/scankeys.go#L198-L227 |
159,370 | keybase/client | go/engine/scankeys.go | scan | func (s *ScanKeys) scan(m libkb.MetaContext, id uint64) (openpgp.EntityList, error) {
// lookup the user on the api server by the key id.
username, uid, err := s.apiLookup(m, id)
if err != nil {
return nil, err
}
m.Debug("key id %016x => %s, %s", id, id, username, uid)
if len(username) == 0 || len(uid) == 0 {
return nil, libkb.NoKeyError{}
}
// use PGPKeyfinder engine to get the pgp keys for the user
arg := &PGPKeyfinderArg{Usernames: []string{username}}
eng := NewPGPKeyfinder(m.G(), arg)
if err := RunEngine2(m, eng); err != nil {
return nil, err
}
uplus := eng.UsersPlusKeys()
if len(uplus) != 1 {
m.Warning("error getting user plus pgp key from %s", username)
return nil, err
}
// user found is the owner of the keys
m.Debug("scan(%016x) => owner of key = (%s)", id, uplus[0].User.GetName())
s.Lock()
s.keyOwners[id] = uplus[0].User
s.Unlock()
// convert the bundles to an openpgp entity list
// (which implements the openpgp.KeyRing interface)
var list openpgp.EntityList
for _, k := range uplus[0].Keys {
list = append(list, k.Entity)
}
return list, nil
} | go | func (s *ScanKeys) scan(m libkb.MetaContext, id uint64) (openpgp.EntityList, error) {
// lookup the user on the api server by the key id.
username, uid, err := s.apiLookup(m, id)
if err != nil {
return nil, err
}
m.Debug("key id %016x => %s, %s", id, id, username, uid)
if len(username) == 0 || len(uid) == 0 {
return nil, libkb.NoKeyError{}
}
// use PGPKeyfinder engine to get the pgp keys for the user
arg := &PGPKeyfinderArg{Usernames: []string{username}}
eng := NewPGPKeyfinder(m.G(), arg)
if err := RunEngine2(m, eng); err != nil {
return nil, err
}
uplus := eng.UsersPlusKeys()
if len(uplus) != 1 {
m.Warning("error getting user plus pgp key from %s", username)
return nil, err
}
// user found is the owner of the keys
m.Debug("scan(%016x) => owner of key = (%s)", id, uplus[0].User.GetName())
s.Lock()
s.keyOwners[id] = uplus[0].User
s.Unlock()
// convert the bundles to an openpgp entity list
// (which implements the openpgp.KeyRing interface)
var list openpgp.EntityList
for _, k := range uplus[0].Keys {
list = append(list, k.Entity)
}
return list, nil
} | [
"func",
"(",
"s",
"*",
"ScanKeys",
")",
"scan",
"(",
"m",
"libkb",
".",
"MetaContext",
",",
"id",
"uint64",
")",
"(",
"openpgp",
".",
"EntityList",
",",
"error",
")",
"{",
"// lookup the user on the api server by the key id.",
"username",
",",
"uid",
",",
"err",
":=",
"s",
".",
"apiLookup",
"(",
"m",
",",
"id",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"m",
".",
"Debug",
"(",
"\"",
"\"",
",",
"id",
",",
"id",
",",
"username",
",",
"uid",
")",
"\n",
"if",
"len",
"(",
"username",
")",
"==",
"0",
"||",
"len",
"(",
"uid",
")",
"==",
"0",
"{",
"return",
"nil",
",",
"libkb",
".",
"NoKeyError",
"{",
"}",
"\n",
"}",
"\n\n",
"// use PGPKeyfinder engine to get the pgp keys for the user",
"arg",
":=",
"&",
"PGPKeyfinderArg",
"{",
"Usernames",
":",
"[",
"]",
"string",
"{",
"username",
"}",
"}",
"\n",
"eng",
":=",
"NewPGPKeyfinder",
"(",
"m",
".",
"G",
"(",
")",
",",
"arg",
")",
"\n",
"if",
"err",
":=",
"RunEngine2",
"(",
"m",
",",
"eng",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"uplus",
":=",
"eng",
".",
"UsersPlusKeys",
"(",
")",
"\n",
"if",
"len",
"(",
"uplus",
")",
"!=",
"1",
"{",
"m",
".",
"Warning",
"(",
"\"",
"\"",
",",
"username",
")",
"\n",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"// user found is the owner of the keys",
"m",
".",
"Debug",
"(",
"\"",
"\"",
",",
"id",
",",
"uplus",
"[",
"0",
"]",
".",
"User",
".",
"GetName",
"(",
")",
")",
"\n",
"s",
".",
"Lock",
"(",
")",
"\n",
"s",
".",
"keyOwners",
"[",
"id",
"]",
"=",
"uplus",
"[",
"0",
"]",
".",
"User",
"\n",
"s",
".",
"Unlock",
"(",
")",
"\n\n",
"// convert the bundles to an openpgp entity list",
"// (which implements the openpgp.KeyRing interface)",
"var",
"list",
"openpgp",
".",
"EntityList",
"\n",
"for",
"_",
",",
"k",
":=",
"range",
"uplus",
"[",
"0",
"]",
".",
"Keys",
"{",
"list",
"=",
"append",
"(",
"list",
",",
"k",
".",
"Entity",
")",
"\n",
"}",
"\n",
"return",
"list",
",",
"nil",
"\n",
"}"
] | // scan finds the user on the api server for the key id. Then it
// uses PGPKeyfinder to find the public pgp keys for the user. | [
"scan",
"finds",
"the",
"user",
"on",
"the",
"api",
"server",
"for",
"the",
"key",
"id",
".",
"Then",
"it",
"uses",
"PGPKeyfinder",
"to",
"find",
"the",
"public",
"pgp",
"keys",
"for",
"the",
"user",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/scankeys.go#L231-L266 |
159,371 | keybase/client | go/engine/scankeys.go | apiLookup | func (s *ScanKeys) apiLookup(m libkb.MetaContext, id uint64) (username string, uid keybase1.UID, err error) {
return libkb.PGPLookup(m, id)
} | go | func (s *ScanKeys) apiLookup(m libkb.MetaContext, id uint64) (username string, uid keybase1.UID, err error) {
return libkb.PGPLookup(m, id)
} | [
"func",
"(",
"s",
"*",
"ScanKeys",
")",
"apiLookup",
"(",
"m",
"libkb",
".",
"MetaContext",
",",
"id",
"uint64",
")",
"(",
"username",
"string",
",",
"uid",
"keybase1",
".",
"UID",
",",
"err",
"error",
")",
"{",
"return",
"libkb",
".",
"PGPLookup",
"(",
"m",
",",
"id",
")",
"\n",
"}"
] | // apiLookup gets the username and uid from the api server for the
// key id. | [
"apiLookup",
"gets",
"the",
"username",
"and",
"uid",
"from",
"the",
"api",
"server",
"for",
"the",
"key",
"id",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/scankeys.go#L270-L272 |
159,372 | keybase/client | go/client/cmd_device_add.go | NewCmdDeviceAdd | func NewCmdDeviceAdd(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "add",
Usage: "Authorize a new device",
Description: cmdDevAddDesc,
Action: func(c *cli.Context) {
cl.ChooseCommand(&CmdDeviceAdd{Contextified: libkb.NewContextified(g)}, "add", c)
},
}
} | go | func NewCmdDeviceAdd(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "add",
Usage: "Authorize a new device",
Description: cmdDevAddDesc,
Action: func(c *cli.Context) {
cl.ChooseCommand(&CmdDeviceAdd{Contextified: libkb.NewContextified(g)}, "add", c)
},
}
} | [
"func",
"NewCmdDeviceAdd",
"(",
"cl",
"*",
"libcmdline",
".",
"CommandLine",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
")",
"cli",
".",
"Command",
"{",
"return",
"cli",
".",
"Command",
"{",
"Name",
":",
"\"",
"\"",
",",
"Usage",
":",
"\"",
"\"",
",",
"Description",
":",
"cmdDevAddDesc",
",",
"Action",
":",
"func",
"(",
"c",
"*",
"cli",
".",
"Context",
")",
"{",
"cl",
".",
"ChooseCommand",
"(",
"&",
"CmdDeviceAdd",
"{",
"Contextified",
":",
"libkb",
".",
"NewContextified",
"(",
"g",
")",
"}",
",",
"\"",
"\"",
",",
"c",
")",
"\n",
"}",
",",
"}",
"\n",
"}"
] | // NewCmdDeviceAdd creates a new cli.Command. | [
"NewCmdDeviceAdd",
"creates",
"a",
"new",
"cli",
".",
"Command",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/cmd_device_add.go#L28-L37 |
159,373 | keybase/client | go/service/handler.go | GetPassphrase | func (u *SecretUI) GetPassphrase(pinentry keybase1.GUIEntryArg, terminal *keybase1.SecretEntryArg) (keybase1.GetPassphraseRes, error) {
u.G().Log.Debug("SecretUI:GetPassphrase, sessionID = %d", u.sessionID)
return u.cli.GetPassphrase(context.TODO(), keybase1.GetPassphraseArg{SessionID: u.sessionID, Pinentry: pinentry, Terminal: terminal})
} | go | func (u *SecretUI) GetPassphrase(pinentry keybase1.GUIEntryArg, terminal *keybase1.SecretEntryArg) (keybase1.GetPassphraseRes, error) {
u.G().Log.Debug("SecretUI:GetPassphrase, sessionID = %d", u.sessionID)
return u.cli.GetPassphrase(context.TODO(), keybase1.GetPassphraseArg{SessionID: u.sessionID, Pinentry: pinentry, Terminal: terminal})
} | [
"func",
"(",
"u",
"*",
"SecretUI",
")",
"GetPassphrase",
"(",
"pinentry",
"keybase1",
".",
"GUIEntryArg",
",",
"terminal",
"*",
"keybase1",
".",
"SecretEntryArg",
")",
"(",
"keybase1",
".",
"GetPassphraseRes",
",",
"error",
")",
"{",
"u",
".",
"G",
"(",
")",
".",
"Log",
".",
"Debug",
"(",
"\"",
"\"",
",",
"u",
".",
"sessionID",
")",
"\n",
"return",
"u",
".",
"cli",
".",
"GetPassphrase",
"(",
"context",
".",
"TODO",
"(",
")",
",",
"keybase1",
".",
"GetPassphraseArg",
"{",
"SessionID",
":",
"u",
".",
"sessionID",
",",
"Pinentry",
":",
"pinentry",
",",
"Terminal",
":",
"terminal",
"}",
")",
"\n",
"}"
] | // GetPassphrase gets the current keybase passphrase from delegated pinentry. | [
"GetPassphrase",
"gets",
"the",
"current",
"keybase",
"passphrase",
"from",
"delegated",
"pinentry",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/service/handler.go#L72-L75 |
159,374 | keybase/client | go/service/handler.go | DelegateRekeyUI | func (r *RekeyUI) DelegateRekeyUI(ctx context.Context) (int, error) {
r.G().Log.Warning("service RekeyUI.DelegateRekeyUI() called to get session id after RekeyUI object created")
return r.cli.DelegateRekeyUI(ctx)
} | go | func (r *RekeyUI) DelegateRekeyUI(ctx context.Context) (int, error) {
r.G().Log.Warning("service RekeyUI.DelegateRekeyUI() called to get session id after RekeyUI object created")
return r.cli.DelegateRekeyUI(ctx)
} | [
"func",
"(",
"r",
"*",
"RekeyUI",
")",
"DelegateRekeyUI",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"int",
",",
"error",
")",
"{",
"r",
".",
"G",
"(",
")",
".",
"Log",
".",
"Warning",
"(",
"\"",
"\"",
")",
"\n",
"return",
"r",
".",
"cli",
".",
"DelegateRekeyUI",
"(",
"ctx",
")",
"\n",
"}"
] | // DelegateRekeyUI shouldn't be called on this object since it
// should already have a sessionID. | [
"DelegateRekeyUI",
"shouldn",
"t",
"be",
"called",
"on",
"this",
"object",
"since",
"it",
"should",
"already",
"have",
"a",
"sessionID",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/service/handler.go#L162-L165 |
159,375 | keybase/client | go/kbfs/libkbfs/block_journal.go | getSingleContext | func (e blockJournalEntry) getSingleContext() (
kbfsblock.ID, kbfsblock.Context, error) {
switch e.Op {
case blockPutOp, addRefOp:
if len(e.Contexts) != 1 {
return kbfsblock.ID{}, kbfsblock.Context{}, errors.Errorf(
"Op %s doesn't have exactly one context: %v",
e.Op, e.Contexts)
}
for id, idContexts := range e.Contexts {
if len(idContexts) != 1 {
return kbfsblock.ID{}, kbfsblock.Context{}, errors.Errorf(
"Op %s doesn't have exactly one context for id=%s: %v",
e.Op, id, idContexts)
}
return id, idContexts[0], nil
}
}
return kbfsblock.ID{}, kbfsblock.Context{}, errors.Errorf(
"getSingleContext() erroneously called on op %s", e.Op)
} | go | func (e blockJournalEntry) getSingleContext() (
kbfsblock.ID, kbfsblock.Context, error) {
switch e.Op {
case blockPutOp, addRefOp:
if len(e.Contexts) != 1 {
return kbfsblock.ID{}, kbfsblock.Context{}, errors.Errorf(
"Op %s doesn't have exactly one context: %v",
e.Op, e.Contexts)
}
for id, idContexts := range e.Contexts {
if len(idContexts) != 1 {
return kbfsblock.ID{}, kbfsblock.Context{}, errors.Errorf(
"Op %s doesn't have exactly one context for id=%s: %v",
e.Op, id, idContexts)
}
return id, idContexts[0], nil
}
}
return kbfsblock.ID{}, kbfsblock.Context{}, errors.Errorf(
"getSingleContext() erroneously called on op %s", e.Op)
} | [
"func",
"(",
"e",
"blockJournalEntry",
")",
"getSingleContext",
"(",
")",
"(",
"kbfsblock",
".",
"ID",
",",
"kbfsblock",
".",
"Context",
",",
"error",
")",
"{",
"switch",
"e",
".",
"Op",
"{",
"case",
"blockPutOp",
",",
"addRefOp",
":",
"if",
"len",
"(",
"e",
".",
"Contexts",
")",
"!=",
"1",
"{",
"return",
"kbfsblock",
".",
"ID",
"{",
"}",
",",
"kbfsblock",
".",
"Context",
"{",
"}",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"e",
".",
"Op",
",",
"e",
".",
"Contexts",
")",
"\n",
"}",
"\n",
"for",
"id",
",",
"idContexts",
":=",
"range",
"e",
".",
"Contexts",
"{",
"if",
"len",
"(",
"idContexts",
")",
"!=",
"1",
"{",
"return",
"kbfsblock",
".",
"ID",
"{",
"}",
",",
"kbfsblock",
".",
"Context",
"{",
"}",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"e",
".",
"Op",
",",
"id",
",",
"idContexts",
")",
"\n",
"}",
"\n",
"return",
"id",
",",
"idContexts",
"[",
"0",
"]",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"kbfsblock",
".",
"ID",
"{",
"}",
",",
"kbfsblock",
".",
"Context",
"{",
"}",
",",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"e",
".",
"Op",
")",
"\n",
"}"
] | // Get the single context stored in this entry. Only applicable to
// blockPutOp and addRefOp. | [
"Get",
"the",
"single",
"context",
"stored",
"in",
"this",
"entry",
".",
"Only",
"applicable",
"to",
"blockPutOp",
"and",
"addRefOp",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/block_journal.go#L143-L164 |
159,376 | keybase/client | go/kbfs/libkbfs/block_journal.go | makeBlockJournal | func makeBlockJournal(
ctx context.Context, codec kbfscodec.Codec, dir string,
log logger.Logger, vlog *libkb.VDebugLog) (*blockJournal, error) {
journalPath := blockJournalDir(dir)
deferLog := log.CloneWithAddedDepth(1)
j, err := makeDiskJournal(
codec, journalPath, reflect.TypeOf(blockJournalEntry{}))
if err != nil {
return nil, err
}
gcJournalPath := deferredGCBlockJournalDir(dir)
gcj, err := makeDiskJournal(
codec, gcJournalPath, reflect.TypeOf(blockJournalEntry{}))
if err != nil {
return nil, err
}
storeDir := blockJournalStoreDir(dir)
s := makeBlockDiskStore(codec, storeDir)
journal := &blockJournal{
codec: codec,
dir: dir,
log: traceLogger{log},
vlog: vlog,
deferLog: traceLogger{deferLog},
j: j,
deferredGC: gcj,
s: s,
}
// Get initial aggregate info.
err = kbfscodec.DeserializeFromFile(
codec, aggregateInfoPath(dir), &journal.aggregateInfo)
if !ioutil.IsNotExist(err) && err != nil {
return nil, err
}
return journal, nil
} | go | func makeBlockJournal(
ctx context.Context, codec kbfscodec.Codec, dir string,
log logger.Logger, vlog *libkb.VDebugLog) (*blockJournal, error) {
journalPath := blockJournalDir(dir)
deferLog := log.CloneWithAddedDepth(1)
j, err := makeDiskJournal(
codec, journalPath, reflect.TypeOf(blockJournalEntry{}))
if err != nil {
return nil, err
}
gcJournalPath := deferredGCBlockJournalDir(dir)
gcj, err := makeDiskJournal(
codec, gcJournalPath, reflect.TypeOf(blockJournalEntry{}))
if err != nil {
return nil, err
}
storeDir := blockJournalStoreDir(dir)
s := makeBlockDiskStore(codec, storeDir)
journal := &blockJournal{
codec: codec,
dir: dir,
log: traceLogger{log},
vlog: vlog,
deferLog: traceLogger{deferLog},
j: j,
deferredGC: gcj,
s: s,
}
// Get initial aggregate info.
err = kbfscodec.DeserializeFromFile(
codec, aggregateInfoPath(dir), &journal.aggregateInfo)
if !ioutil.IsNotExist(err) && err != nil {
return nil, err
}
return journal, nil
} | [
"func",
"makeBlockJournal",
"(",
"ctx",
"context",
".",
"Context",
",",
"codec",
"kbfscodec",
".",
"Codec",
",",
"dir",
"string",
",",
"log",
"logger",
".",
"Logger",
",",
"vlog",
"*",
"libkb",
".",
"VDebugLog",
")",
"(",
"*",
"blockJournal",
",",
"error",
")",
"{",
"journalPath",
":=",
"blockJournalDir",
"(",
"dir",
")",
"\n",
"deferLog",
":=",
"log",
".",
"CloneWithAddedDepth",
"(",
"1",
")",
"\n",
"j",
",",
"err",
":=",
"makeDiskJournal",
"(",
"codec",
",",
"journalPath",
",",
"reflect",
".",
"TypeOf",
"(",
"blockJournalEntry",
"{",
"}",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"gcJournalPath",
":=",
"deferredGCBlockJournalDir",
"(",
"dir",
")",
"\n",
"gcj",
",",
"err",
":=",
"makeDiskJournal",
"(",
"codec",
",",
"gcJournalPath",
",",
"reflect",
".",
"TypeOf",
"(",
"blockJournalEntry",
"{",
"}",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"storeDir",
":=",
"blockJournalStoreDir",
"(",
"dir",
")",
"\n",
"s",
":=",
"makeBlockDiskStore",
"(",
"codec",
",",
"storeDir",
")",
"\n",
"journal",
":=",
"&",
"blockJournal",
"{",
"codec",
":",
"codec",
",",
"dir",
":",
"dir",
",",
"log",
":",
"traceLogger",
"{",
"log",
"}",
",",
"vlog",
":",
"vlog",
",",
"deferLog",
":",
"traceLogger",
"{",
"deferLog",
"}",
",",
"j",
":",
"j",
",",
"deferredGC",
":",
"gcj",
",",
"s",
":",
"s",
",",
"}",
"\n\n",
"// Get initial aggregate info.",
"err",
"=",
"kbfscodec",
".",
"DeserializeFromFile",
"(",
"codec",
",",
"aggregateInfoPath",
"(",
"dir",
")",
",",
"&",
"journal",
".",
"aggregateInfo",
")",
"\n",
"if",
"!",
"ioutil",
".",
"IsNotExist",
"(",
"err",
")",
"&&",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"journal",
",",
"nil",
"\n",
"}"
] | // makeBlockJournal returns a new blockJournal for the given
// directory. Any existing journal entries are read. | [
"makeBlockJournal",
"returns",
"a",
"new",
"blockJournal",
"for",
"the",
"given",
"directory",
".",
"Any",
"existing",
"journal",
"entries",
"are",
"read",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/block_journal.go#L180-L219 |
159,377 | keybase/client | go/kbfs/libkbfs/block_journal.go | putBlockData | func (j *blockJournal) putBlockData(
ctx context.Context, id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) (
putData bool, err error) {
j.vlog.CLogf(
ctx, libkb.VLog1,
"Putting %d bytes of data for block %s with context %v",
len(buf), id, context)
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Put for block %s with context %v failed with %+v",
id, context, err)
}
}()
putData, err = j.s.put(ctx, true, id, context, buf, serverHalf)
if err != nil {
return false, err
}
return putData, nil
} | go | func (j *blockJournal) putBlockData(
ctx context.Context, id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) (
putData bool, err error) {
j.vlog.CLogf(
ctx, libkb.VLog1,
"Putting %d bytes of data for block %s with context %v",
len(buf), id, context)
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Put for block %s with context %v failed with %+v",
id, context, err)
}
}()
putData, err = j.s.put(ctx, true, id, context, buf, serverHalf)
if err != nil {
return false, err
}
return putData, nil
} | [
"func",
"(",
"j",
"*",
"blockJournal",
")",
"putBlockData",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"kbfsblock",
".",
"ID",
",",
"context",
"kbfsblock",
".",
"Context",
",",
"buf",
"[",
"]",
"byte",
",",
"serverHalf",
"kbfscrypto",
".",
"BlockCryptKeyServerHalf",
")",
"(",
"putData",
"bool",
",",
"err",
"error",
")",
"{",
"j",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"len",
"(",
"buf",
")",
",",
"id",
",",
"context",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"if",
"err",
"!=",
"nil",
"{",
"j",
".",
"deferLog",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"id",
",",
"context",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"putData",
",",
"err",
"=",
"j",
".",
"s",
".",
"put",
"(",
"ctx",
",",
"true",
",",
"id",
",",
"context",
",",
"buf",
",",
"serverHalf",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"false",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"putData",
",",
"nil",
"\n",
"}"
] | // putBlockData puts the given block data. If err is non-nil, putData will
// always be false. | [
"putBlockData",
"puts",
"the",
"given",
"block",
"data",
".",
"If",
"err",
"is",
"non",
"-",
"nil",
"putData",
"will",
"always",
"be",
"false",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/block_journal.go#L419-L441 |
159,378 | keybase/client | go/kbfs/libkbfs/block_journal.go | appendBlock | func (j *blockJournal) appendBlock(
ctx context.Context, id kbfsblock.ID, context kbfsblock.Context,
bufLenToAdd int64) error {
j.vlog.CLogf(ctx, libkb.VLog1, "Appending block %s to journal", id)
if bufLenToAdd > 0 {
var putFiles int64 = filesPerBlockMax
err := j.accumulateBlock(bufLenToAdd, putFiles)
if err != nil {
return err
}
}
next, err := j.next()
if err != nil {
return err
}
err = j.s.addReference(ctx, id, context, next.String())
if err != nil {
return err
}
_, err = j.appendJournalEntry(ctx, blockJournalEntry{
Op: blockPutOp,
Contexts: kbfsblock.ContextMap{id: {context}},
})
return err
} | go | func (j *blockJournal) appendBlock(
ctx context.Context, id kbfsblock.ID, context kbfsblock.Context,
bufLenToAdd int64) error {
j.vlog.CLogf(ctx, libkb.VLog1, "Appending block %s to journal", id)
if bufLenToAdd > 0 {
var putFiles int64 = filesPerBlockMax
err := j.accumulateBlock(bufLenToAdd, putFiles)
if err != nil {
return err
}
}
next, err := j.next()
if err != nil {
return err
}
err = j.s.addReference(ctx, id, context, next.String())
if err != nil {
return err
}
_, err = j.appendJournalEntry(ctx, blockJournalEntry{
Op: blockPutOp,
Contexts: kbfsblock.ContextMap{id: {context}},
})
return err
} | [
"func",
"(",
"j",
"*",
"blockJournal",
")",
"appendBlock",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"kbfsblock",
".",
"ID",
",",
"context",
"kbfsblock",
".",
"Context",
",",
"bufLenToAdd",
"int64",
")",
"error",
"{",
"j",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"id",
")",
"\n\n",
"if",
"bufLenToAdd",
">",
"0",
"{",
"var",
"putFiles",
"int64",
"=",
"filesPerBlockMax",
"\n",
"err",
":=",
"j",
".",
"accumulateBlock",
"(",
"bufLenToAdd",
",",
"putFiles",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"next",
",",
"err",
":=",
"j",
".",
"next",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"err",
"=",
"j",
".",
"s",
".",
"addReference",
"(",
"ctx",
",",
"id",
",",
"context",
",",
"next",
".",
"String",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"_",
",",
"err",
"=",
"j",
".",
"appendJournalEntry",
"(",
"ctx",
",",
"blockJournalEntry",
"{",
"Op",
":",
"blockPutOp",
",",
"Contexts",
":",
"kbfsblock",
".",
"ContextMap",
"{",
"id",
":",
"{",
"context",
"}",
"}",
",",
"}",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // appendBlock appends an entry for the previously-put block to the
// journal, and records the size for the put block. | [
"appendBlock",
"appends",
"an",
"entry",
"for",
"the",
"previously",
"-",
"put",
"block",
"to",
"the",
"journal",
"and",
"records",
"the",
"size",
"for",
"the",
"put",
"block",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/block_journal.go#L445-L473 |
159,379 | keybase/client | go/kbfs/libkbfs/block_journal.go | removeReferences | func (j *blockJournal) removeReferences(
ctx context.Context, contexts kbfsblock.ContextMap) (
liveCounts map[kbfsblock.ID]int, err error) {
j.vlog.CLogf(ctx, libkb.VLog1, "Removing references for %v", contexts)
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Removing references for %+v", contexts, err)
}
}()
// Add the journal entry first, so that if we crash before
// removing the refs, we have at worst un-GCed blocks.
_, err = j.appendJournalEntry(ctx, blockJournalEntry{
Op: removeRefsOp,
Contexts: contexts,
})
if err != nil {
return nil, err
}
liveCounts = make(map[kbfsblock.ID]int)
for id, idContexts := range contexts {
// Remove the references unconditionally here (i.e.,
// with an empty tag), since j.s should reflect the
// most recent state.
liveCount, err := j.s.removeReferences(ctx, id, idContexts, "")
if err != nil {
return nil, err
}
liveCounts[id] = liveCount
}
return liveCounts, nil
} | go | func (j *blockJournal) removeReferences(
ctx context.Context, contexts kbfsblock.ContextMap) (
liveCounts map[kbfsblock.ID]int, err error) {
j.vlog.CLogf(ctx, libkb.VLog1, "Removing references for %v", contexts)
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Removing references for %+v", contexts, err)
}
}()
// Add the journal entry first, so that if we crash before
// removing the refs, we have at worst un-GCed blocks.
_, err = j.appendJournalEntry(ctx, blockJournalEntry{
Op: removeRefsOp,
Contexts: contexts,
})
if err != nil {
return nil, err
}
liveCounts = make(map[kbfsblock.ID]int)
for id, idContexts := range contexts {
// Remove the references unconditionally here (i.e.,
// with an empty tag), since j.s should reflect the
// most recent state.
liveCount, err := j.s.removeReferences(ctx, id, idContexts, "")
if err != nil {
return nil, err
}
liveCounts[id] = liveCount
}
return liveCounts, nil
} | [
"func",
"(",
"j",
"*",
"blockJournal",
")",
"removeReferences",
"(",
"ctx",
"context",
".",
"Context",
",",
"contexts",
"kbfsblock",
".",
"ContextMap",
")",
"(",
"liveCounts",
"map",
"[",
"kbfsblock",
".",
"ID",
"]",
"int",
",",
"err",
"error",
")",
"{",
"j",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"contexts",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"if",
"err",
"!=",
"nil",
"{",
"j",
".",
"deferLog",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"contexts",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"// Add the journal entry first, so that if we crash before",
"// removing the refs, we have at worst un-GCed blocks.",
"_",
",",
"err",
"=",
"j",
".",
"appendJournalEntry",
"(",
"ctx",
",",
"blockJournalEntry",
"{",
"Op",
":",
"removeRefsOp",
",",
"Contexts",
":",
"contexts",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"liveCounts",
"=",
"make",
"(",
"map",
"[",
"kbfsblock",
".",
"ID",
"]",
"int",
")",
"\n",
"for",
"id",
",",
"idContexts",
":=",
"range",
"contexts",
"{",
"// Remove the references unconditionally here (i.e.,",
"// with an empty tag), since j.s should reflect the",
"// most recent state.",
"liveCount",
",",
"err",
":=",
"j",
".",
"s",
".",
"removeReferences",
"(",
"ctx",
",",
"id",
",",
"idContexts",
",",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"liveCounts",
"[",
"id",
"]",
"=",
"liveCount",
"\n",
"}",
"\n\n",
"return",
"liveCounts",
",",
"nil",
"\n",
"}"
] | // removeReferences removes references for the given contexts from
// their respective IDs. | [
"removeReferences",
"removes",
"references",
"for",
"the",
"given",
"contexts",
"from",
"their",
"respective",
"IDs",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/block_journal.go#L543-L579 |
159,380 | keybase/client | go/kbfs/libkbfs/block_journal.go | flushNonBPSBlockJournalEntry | func flushNonBPSBlockJournalEntry(
ctx context.Context, log logger.Logger,
bserver BlockServer, tlfID tlf.ID, entry blockJournalEntry) error {
log.CDebugf(ctx, "Flushing other block op %v", entry)
switch entry.Op {
case removeRefsOp:
_, err := bserver.RemoveBlockReferences(
ctx, tlfID, entry.Contexts)
if err != nil {
return err
}
case archiveRefsOp:
err := bserver.ArchiveBlockReferences(
ctx, tlfID, entry.Contexts)
if err != nil {
return err
}
case blockPutOp:
if !entry.Ignore {
return errors.New("Trying to flush unignored blockPut as other")
}
// Otherwise nothing to do.
case mdRevMarkerOp:
// Nothing to do.
default:
return errors.Errorf("Unknown op %s", entry.Op)
}
return nil
} | go | func flushNonBPSBlockJournalEntry(
ctx context.Context, log logger.Logger,
bserver BlockServer, tlfID tlf.ID, entry blockJournalEntry) error {
log.CDebugf(ctx, "Flushing other block op %v", entry)
switch entry.Op {
case removeRefsOp:
_, err := bserver.RemoveBlockReferences(
ctx, tlfID, entry.Contexts)
if err != nil {
return err
}
case archiveRefsOp:
err := bserver.ArchiveBlockReferences(
ctx, tlfID, entry.Contexts)
if err != nil {
return err
}
case blockPutOp:
if !entry.Ignore {
return errors.New("Trying to flush unignored blockPut as other")
}
// Otherwise nothing to do.
case mdRevMarkerOp:
// Nothing to do.
default:
return errors.Errorf("Unknown op %s", entry.Op)
}
return nil
} | [
"func",
"flushNonBPSBlockJournalEntry",
"(",
"ctx",
"context",
".",
"Context",
",",
"log",
"logger",
".",
"Logger",
",",
"bserver",
"BlockServer",
",",
"tlfID",
"tlf",
".",
"ID",
",",
"entry",
"blockJournalEntry",
")",
"error",
"{",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"entry",
")",
"\n\n",
"switch",
"entry",
".",
"Op",
"{",
"case",
"removeRefsOp",
":",
"_",
",",
"err",
":=",
"bserver",
".",
"RemoveBlockReferences",
"(",
"ctx",
",",
"tlfID",
",",
"entry",
".",
"Contexts",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"case",
"archiveRefsOp",
":",
"err",
":=",
"bserver",
".",
"ArchiveBlockReferences",
"(",
"ctx",
",",
"tlfID",
",",
"entry",
".",
"Contexts",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"case",
"blockPutOp",
":",
"if",
"!",
"entry",
".",
"Ignore",
"{",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"// Otherwise nothing to do.",
"case",
"mdRevMarkerOp",
":",
"// Nothing to do.",
"default",
":",
"return",
"errors",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"entry",
".",
"Op",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // flushNonBPSBlockJournalEntry flushes journal entries that can't be
// parallelized via a blockPutState. | [
"flushNonBPSBlockJournalEntry",
"flushes",
"journal",
"entries",
"that",
"can",
"t",
"be",
"parallelized",
"via",
"a",
"blockPutState",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/block_journal.go#L775-L809 |
159,381 | keybase/client | go/kbfs/libkbfs/block_journal.go | getDeferredGCRange | func (j *blockJournal) getDeferredGCRange() (
len int, earliest, latest journalOrdinal, err error) {
earliest, err = j.deferredGC.readEarliestOrdinal()
if ioutil.IsNotExist(err) {
return 0, 0, 0, nil
} else if err != nil {
return 0, 0, 0, err
}
latest, err = j.deferredGC.readLatestOrdinal()
if ioutil.IsNotExist(err) {
return 0, 0, 0, nil
} else if err != nil {
return 0, 0, 0, err
}
return int(latest - earliest + 1), earliest, latest, nil
} | go | func (j *blockJournal) getDeferredGCRange() (
len int, earliest, latest journalOrdinal, err error) {
earliest, err = j.deferredGC.readEarliestOrdinal()
if ioutil.IsNotExist(err) {
return 0, 0, 0, nil
} else if err != nil {
return 0, 0, 0, err
}
latest, err = j.deferredGC.readLatestOrdinal()
if ioutil.IsNotExist(err) {
return 0, 0, 0, nil
} else if err != nil {
return 0, 0, 0, err
}
return int(latest - earliest + 1), earliest, latest, nil
} | [
"func",
"(",
"j",
"*",
"blockJournal",
")",
"getDeferredGCRange",
"(",
")",
"(",
"len",
"int",
",",
"earliest",
",",
"latest",
"journalOrdinal",
",",
"err",
"error",
")",
"{",
"earliest",
",",
"err",
"=",
"j",
".",
"deferredGC",
".",
"readEarliestOrdinal",
"(",
")",
"\n",
"if",
"ioutil",
".",
"IsNotExist",
"(",
"err",
")",
"{",
"return",
"0",
",",
"0",
",",
"0",
",",
"nil",
"\n",
"}",
"else",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"latest",
",",
"err",
"=",
"j",
".",
"deferredGC",
".",
"readLatestOrdinal",
"(",
")",
"\n",
"if",
"ioutil",
".",
"IsNotExist",
"(",
"err",
")",
"{",
"return",
"0",
",",
"0",
",",
"0",
",",
"nil",
"\n",
"}",
"else",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"int",
"(",
"latest",
"-",
"earliest",
"+",
"1",
")",
",",
"earliest",
",",
"latest",
",",
"nil",
"\n",
"}"
] | // getDeferredRange gets the earliest and latest revision of the
// deferred GC journal. If the returned length is 0, there's no need
// for further GC. | [
"getDeferredRange",
"gets",
"the",
"earliest",
"and",
"latest",
"revision",
"of",
"the",
"deferred",
"GC",
"journal",
".",
"If",
"the",
"returned",
"length",
"is",
"0",
"there",
"s",
"no",
"need",
"for",
"further",
"GC",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/block_journal.go#L1062-L1079 |
159,382 | keybase/client | go/kbfs/libkbfs/block_journal.go | clearDeferredGCRange | func (j *blockJournal) clearDeferredGCRange(
ctx context.Context, removedBytes, removedFiles int64,
earliest, latest journalOrdinal) (
clearedJournal bool, aggregateInfo blockAggregateInfo,
err error) {
for i := earliest; i <= latest; i++ {
_, err := j.deferredGC.removeEarliest()
if err != nil {
return false, blockAggregateInfo{}, err
}
}
// If we crash before calling this, the journal bytes/files
// counts will be inaccurate. But this will be resolved when
// the journal goes empty in the clause above.
j.unstoreBlocks(removedBytes, removedFiles)
aggregateInfo = j.aggregateInfo
if j.empty() {
j.log.CDebugf(ctx, "Block journal is now empty")
j.aggregateInfo = blockAggregateInfo{}
err = j.s.clear()
if err != nil {
return false, blockAggregateInfo{}, err
}
for _, dir := range j.blockJournalFiles() {
j.log.CDebugf(ctx, "Removing all files in %s", dir)
err := ioutil.RemoveAll(dir)
if err != nil {
return false, blockAggregateInfo{}, err
}
}
clearedJournal = true
}
return clearedJournal, aggregateInfo, nil
} | go | func (j *blockJournal) clearDeferredGCRange(
ctx context.Context, removedBytes, removedFiles int64,
earliest, latest journalOrdinal) (
clearedJournal bool, aggregateInfo blockAggregateInfo,
err error) {
for i := earliest; i <= latest; i++ {
_, err := j.deferredGC.removeEarliest()
if err != nil {
return false, blockAggregateInfo{}, err
}
}
// If we crash before calling this, the journal bytes/files
// counts will be inaccurate. But this will be resolved when
// the journal goes empty in the clause above.
j.unstoreBlocks(removedBytes, removedFiles)
aggregateInfo = j.aggregateInfo
if j.empty() {
j.log.CDebugf(ctx, "Block journal is now empty")
j.aggregateInfo = blockAggregateInfo{}
err = j.s.clear()
if err != nil {
return false, blockAggregateInfo{}, err
}
for _, dir := range j.blockJournalFiles() {
j.log.CDebugf(ctx, "Removing all files in %s", dir)
err := ioutil.RemoveAll(dir)
if err != nil {
return false, blockAggregateInfo{}, err
}
}
clearedJournal = true
}
return clearedJournal, aggregateInfo, nil
} | [
"func",
"(",
"j",
"*",
"blockJournal",
")",
"clearDeferredGCRange",
"(",
"ctx",
"context",
".",
"Context",
",",
"removedBytes",
",",
"removedFiles",
"int64",
",",
"earliest",
",",
"latest",
"journalOrdinal",
")",
"(",
"clearedJournal",
"bool",
",",
"aggregateInfo",
"blockAggregateInfo",
",",
"err",
"error",
")",
"{",
"for",
"i",
":=",
"earliest",
";",
"i",
"<=",
"latest",
";",
"i",
"++",
"{",
"_",
",",
"err",
":=",
"j",
".",
"deferredGC",
".",
"removeEarliest",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"false",
",",
"blockAggregateInfo",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"// If we crash before calling this, the journal bytes/files",
"// counts will be inaccurate. But this will be resolved when",
"// the journal goes empty in the clause above.",
"j",
".",
"unstoreBlocks",
"(",
"removedBytes",
",",
"removedFiles",
")",
"\n\n",
"aggregateInfo",
"=",
"j",
".",
"aggregateInfo",
"\n\n",
"if",
"j",
".",
"empty",
"(",
")",
"{",
"j",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n\n",
"j",
".",
"aggregateInfo",
"=",
"blockAggregateInfo",
"{",
"}",
"\n\n",
"err",
"=",
"j",
".",
"s",
".",
"clear",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"false",
",",
"blockAggregateInfo",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"dir",
":=",
"range",
"j",
".",
"blockJournalFiles",
"(",
")",
"{",
"j",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"dir",
")",
"\n",
"err",
":=",
"ioutil",
".",
"RemoveAll",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"false",
",",
"blockAggregateInfo",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"clearedJournal",
"=",
"true",
"\n",
"}",
"\n\n",
"return",
"clearedJournal",
",",
"aggregateInfo",
",",
"nil",
"\n",
"}"
] | // clearDeferredGCRange removes the given range from the deferred
// journal. If the journal goes completely empty, it then nukes the
// journal directories. | [
"clearDeferredGCRange",
"removes",
"the",
"given",
"range",
"from",
"the",
"deferred",
"journal",
".",
"If",
"the",
"journal",
"goes",
"completely",
"empty",
"it",
"then",
"nukes",
"the",
"journal",
"directories",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/block_journal.go#L1143-L1184 |
159,383 | keybase/client | go/protocol/keybase1/device.go | DeviceList | func (c DeviceClient) DeviceList(ctx context.Context, sessionID int) (res []Device, err error) {
__arg := DeviceListArg{SessionID: sessionID}
err = c.Cli.Call(ctx, "keybase.1.device.deviceList", []interface{}{__arg}, &res)
return
} | go | func (c DeviceClient) DeviceList(ctx context.Context, sessionID int) (res []Device, err error) {
__arg := DeviceListArg{SessionID: sessionID}
err = c.Cli.Call(ctx, "keybase.1.device.deviceList", []interface{}{__arg}, &res)
return
} | [
"func",
"(",
"c",
"DeviceClient",
")",
"DeviceList",
"(",
"ctx",
"context",
".",
"Context",
",",
"sessionID",
"int",
")",
"(",
"res",
"[",
"]",
"Device",
",",
"err",
"error",
")",
"{",
"__arg",
":=",
"DeviceListArg",
"{",
"SessionID",
":",
"sessionID",
"}",
"\n",
"err",
"=",
"c",
".",
"Cli",
".",
"Call",
"(",
"ctx",
",",
"\"",
"\"",
",",
"[",
"]",
"interface",
"{",
"}",
"{",
"__arg",
"}",
",",
"&",
"res",
")",
"\n",
"return",
"\n",
"}"
] | // List devices for the user. | [
"List",
"devices",
"for",
"the",
"user",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/protocol/keybase1/device.go#L204-L208 |
159,384 | keybase/client | go/protocol/keybase1/device.go | DeviceHistoryList | func (c DeviceClient) DeviceHistoryList(ctx context.Context, sessionID int) (res []DeviceDetail, err error) {
__arg := DeviceHistoryListArg{SessionID: sessionID}
err = c.Cli.Call(ctx, "keybase.1.device.deviceHistoryList", []interface{}{__arg}, &res)
return
} | go | func (c DeviceClient) DeviceHistoryList(ctx context.Context, sessionID int) (res []DeviceDetail, err error) {
__arg := DeviceHistoryListArg{SessionID: sessionID}
err = c.Cli.Call(ctx, "keybase.1.device.deviceHistoryList", []interface{}{__arg}, &res)
return
} | [
"func",
"(",
"c",
"DeviceClient",
")",
"DeviceHistoryList",
"(",
"ctx",
"context",
".",
"Context",
",",
"sessionID",
"int",
")",
"(",
"res",
"[",
"]",
"DeviceDetail",
",",
"err",
"error",
")",
"{",
"__arg",
":=",
"DeviceHistoryListArg",
"{",
"SessionID",
":",
"sessionID",
"}",
"\n",
"err",
"=",
"c",
".",
"Cli",
".",
"Call",
"(",
"ctx",
",",
"\"",
"\"",
",",
"[",
"]",
"interface",
"{",
"}",
"{",
"__arg",
"}",
",",
"&",
"res",
")",
"\n",
"return",
"\n",
"}"
] | // List all devices with detailed history and status information. | [
"List",
"all",
"devices",
"with",
"detailed",
"history",
"and",
"status",
"information",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/protocol/keybase1/device.go#L211-L215 |
159,385 | keybase/client | go/protocol/keybase1/device.go | DeviceAdd | func (c DeviceClient) DeviceAdd(ctx context.Context, sessionID int) (err error) {
__arg := DeviceAddArg{SessionID: sessionID}
err = c.Cli.Call(ctx, "keybase.1.device.deviceAdd", []interface{}{__arg}, nil)
return
} | go | func (c DeviceClient) DeviceAdd(ctx context.Context, sessionID int) (err error) {
__arg := DeviceAddArg{SessionID: sessionID}
err = c.Cli.Call(ctx, "keybase.1.device.deviceAdd", []interface{}{__arg}, nil)
return
} | [
"func",
"(",
"c",
"DeviceClient",
")",
"DeviceAdd",
"(",
"ctx",
"context",
".",
"Context",
",",
"sessionID",
"int",
")",
"(",
"err",
"error",
")",
"{",
"__arg",
":=",
"DeviceAddArg",
"{",
"SessionID",
":",
"sessionID",
"}",
"\n",
"err",
"=",
"c",
".",
"Cli",
".",
"Call",
"(",
"ctx",
",",
"\"",
"\"",
",",
"[",
"]",
"interface",
"{",
"}",
"{",
"__arg",
"}",
",",
"nil",
")",
"\n",
"return",
"\n",
"}"
] | // Starts the process of adding a new device using an existing
// device. It is called on the existing device.
// This is for kex2. | [
"Starts",
"the",
"process",
"of",
"adding",
"a",
"new",
"device",
"using",
"an",
"existing",
"device",
".",
"It",
"is",
"called",
"on",
"the",
"existing",
"device",
".",
"This",
"is",
"for",
"kex2",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/protocol/keybase1/device.go#L220-L224 |
159,386 | keybase/client | go/protocol/keybase1/device.go | CheckDeviceNameFormat | func (c DeviceClient) CheckDeviceNameFormat(ctx context.Context, __arg CheckDeviceNameFormatArg) (res bool, err error) {
err = c.Cli.Call(ctx, "keybase.1.device.checkDeviceNameFormat", []interface{}{__arg}, &res)
return
} | go | func (c DeviceClient) CheckDeviceNameFormat(ctx context.Context, __arg CheckDeviceNameFormatArg) (res bool, err error) {
err = c.Cli.Call(ctx, "keybase.1.device.checkDeviceNameFormat", []interface{}{__arg}, &res)
return
} | [
"func",
"(",
"c",
"DeviceClient",
")",
"CheckDeviceNameFormat",
"(",
"ctx",
"context",
".",
"Context",
",",
"__arg",
"CheckDeviceNameFormatArg",
")",
"(",
"res",
"bool",
",",
"err",
"error",
")",
"{",
"err",
"=",
"c",
".",
"Cli",
".",
"Call",
"(",
"ctx",
",",
"\"",
"\"",
",",
"[",
"]",
"interface",
"{",
"}",
"{",
"__arg",
"}",
",",
"&",
"res",
")",
"\n",
"return",
"\n",
"}"
] | // Checks the device name format. | [
"Checks",
"the",
"device",
"name",
"format",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/protocol/keybase1/device.go#L227-L230 |
159,387 | keybase/client | go/protocol/keybase1/device.go | DismissDeviceChangeNotifications | func (c DeviceClient) DismissDeviceChangeNotifications(ctx context.Context) (err error) {
err = c.Cli.Call(ctx, "keybase.1.device.dismissDeviceChangeNotifications", []interface{}{DismissDeviceChangeNotificationsArg{}}, nil)
return
} | go | func (c DeviceClient) DismissDeviceChangeNotifications(ctx context.Context) (err error) {
err = c.Cli.Call(ctx, "keybase.1.device.dismissDeviceChangeNotifications", []interface{}{DismissDeviceChangeNotificationsArg{}}, nil)
return
} | [
"func",
"(",
"c",
"DeviceClient",
")",
"DismissDeviceChangeNotifications",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"err",
"error",
")",
"{",
"err",
"=",
"c",
".",
"Cli",
".",
"Call",
"(",
"ctx",
",",
"\"",
"\"",
",",
"[",
"]",
"interface",
"{",
"}",
"{",
"DismissDeviceChangeNotificationsArg",
"{",
"}",
"}",
",",
"nil",
")",
"\n",
"return",
"\n",
"}"
] | // Dismisses the notifications for a new or revoked device
// assuming this is not that device. | [
"Dismisses",
"the",
"notifications",
"for",
"a",
"new",
"or",
"revoked",
"device",
"assuming",
"this",
"is",
"not",
"that",
"device",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/protocol/keybase1/device.go#L234-L237 |
159,388 | keybase/client | go/protocol/keybase1/device.go | CheckDeviceNameForUser | func (c DeviceClient) CheckDeviceNameForUser(ctx context.Context, __arg CheckDeviceNameForUserArg) (err error) {
err = c.Cli.Call(ctx, "keybase.1.device.checkDeviceNameForUser", []interface{}{__arg}, nil)
return
} | go | func (c DeviceClient) CheckDeviceNameForUser(ctx context.Context, __arg CheckDeviceNameForUserArg) (err error) {
err = c.Cli.Call(ctx, "keybase.1.device.checkDeviceNameForUser", []interface{}{__arg}, nil)
return
} | [
"func",
"(",
"c",
"DeviceClient",
")",
"CheckDeviceNameForUser",
"(",
"ctx",
"context",
".",
"Context",
",",
"__arg",
"CheckDeviceNameForUserArg",
")",
"(",
"err",
"error",
")",
"{",
"err",
"=",
"c",
".",
"Cli",
".",
"Call",
"(",
"ctx",
",",
"\"",
"\"",
",",
"[",
"]",
"interface",
"{",
"}",
"{",
"__arg",
"}",
",",
"nil",
")",
"\n",
"return",
"\n",
"}"
] | // Checks a given device against all of user's past devices,
// including those that predate a reset. It will also check a device name
// for proper formatting. Return null error on success, and a non-null
// error otherwise. | [
"Checks",
"a",
"given",
"device",
"against",
"all",
"of",
"user",
"s",
"past",
"devices",
"including",
"those",
"that",
"predate",
"a",
"reset",
".",
"It",
"will",
"also",
"check",
"a",
"device",
"name",
"for",
"proper",
"formatting",
".",
"Return",
"null",
"error",
"on",
"success",
"and",
"a",
"non",
"-",
"null",
"error",
"otherwise",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/protocol/keybase1/device.go#L243-L246 |
159,389 | keybase/client | go/kbfs/libkbfs/readonly_node.go | Readonly | func (rn ReadonlyNode) Readonly(ctx context.Context) bool {
return ctx.Value(CtxReadWriteKey) == nil
} | go | func (rn ReadonlyNode) Readonly(ctx context.Context) bool {
return ctx.Value(CtxReadWriteKey) == nil
} | [
"func",
"(",
"rn",
"ReadonlyNode",
")",
"Readonly",
"(",
"ctx",
"context",
".",
"Context",
")",
"bool",
"{",
"return",
"ctx",
".",
"Value",
"(",
"CtxReadWriteKey",
")",
"==",
"nil",
"\n",
"}"
] | // Readonly implements the Node interface for ReadonlyNode. | [
"Readonly",
"implements",
"the",
"Node",
"interface",
"for",
"ReadonlyNode",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/readonly_node.go#L30-L32 |
159,390 | keybase/client | go/kbfs/libkbfs/readonly_node.go | WrapChild | func (rn ReadonlyNode) WrapChild(child Node) Node {
return &ReadonlyNode{rn.Node.WrapChild(child)}
} | go | func (rn ReadonlyNode) WrapChild(child Node) Node {
return &ReadonlyNode{rn.Node.WrapChild(child)}
} | [
"func",
"(",
"rn",
"ReadonlyNode",
")",
"WrapChild",
"(",
"child",
"Node",
")",
"Node",
"{",
"return",
"&",
"ReadonlyNode",
"{",
"rn",
".",
"Node",
".",
"WrapChild",
"(",
"child",
")",
"}",
"\n",
"}"
] | // WrapChild implements the Node interface for ReadonlyNode. | [
"WrapChild",
"implements",
"the",
"Node",
"interface",
"for",
"ReadonlyNode",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/readonly_node.go#L35-L37 |
159,391 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | NewDiskBlockCacheRemote | func NewDiskBlockCacheRemote(kbCtx Context, config diskBlockCacheRemoteConfig) (
*DiskBlockCacheRemote, error) {
conn, xp, _, err := kbCtx.GetKBFSSocket(true)
if err != nil {
return nil, err
}
cli := rpc.NewClient(xp, KBFSErrorUnwrapper{},
libkb.LogTagsFromContext)
client := kbgitkbfs.DiskBlockCacheClient{Cli: cli}
statuses, err := lru.New(diskBlockCacheRemoteStatusCacheCapacity)
if err != nil {
return nil, err
}
return &DiskBlockCacheRemote{
conn: conn,
client: client,
log: traceLogger{config.MakeLogger("DBR")},
statuses: statuses,
}, nil
} | go | func NewDiskBlockCacheRemote(kbCtx Context, config diskBlockCacheRemoteConfig) (
*DiskBlockCacheRemote, error) {
conn, xp, _, err := kbCtx.GetKBFSSocket(true)
if err != nil {
return nil, err
}
cli := rpc.NewClient(xp, KBFSErrorUnwrapper{},
libkb.LogTagsFromContext)
client := kbgitkbfs.DiskBlockCacheClient{Cli: cli}
statuses, err := lru.New(diskBlockCacheRemoteStatusCacheCapacity)
if err != nil {
return nil, err
}
return &DiskBlockCacheRemote{
conn: conn,
client: client,
log: traceLogger{config.MakeLogger("DBR")},
statuses: statuses,
}, nil
} | [
"func",
"NewDiskBlockCacheRemote",
"(",
"kbCtx",
"Context",
",",
"config",
"diskBlockCacheRemoteConfig",
")",
"(",
"*",
"DiskBlockCacheRemote",
",",
"error",
")",
"{",
"conn",
",",
"xp",
",",
"_",
",",
"err",
":=",
"kbCtx",
".",
"GetKBFSSocket",
"(",
"true",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"cli",
":=",
"rpc",
".",
"NewClient",
"(",
"xp",
",",
"KBFSErrorUnwrapper",
"{",
"}",
",",
"libkb",
".",
"LogTagsFromContext",
")",
"\n",
"client",
":=",
"kbgitkbfs",
".",
"DiskBlockCacheClient",
"{",
"Cli",
":",
"cli",
"}",
"\n\n",
"statuses",
",",
"err",
":=",
"lru",
".",
"New",
"(",
"diskBlockCacheRemoteStatusCacheCapacity",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"&",
"DiskBlockCacheRemote",
"{",
"conn",
":",
"conn",
",",
"client",
":",
"client",
",",
"log",
":",
"traceLogger",
"{",
"config",
".",
"MakeLogger",
"(",
"\"",
"\"",
")",
"}",
",",
"statuses",
":",
"statuses",
",",
"}",
",",
"nil",
"\n",
"}"
] | // NewDiskBlockCacheRemote creates a new remote disk cache client. | [
"NewDiskBlockCacheRemote",
"creates",
"a",
"new",
"remote",
"disk",
"cache",
"client",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L50-L71 |
159,392 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | Get | func (dbcr *DiskBlockCacheRemote) Get(ctx context.Context, tlfID tlf.ID,
blockID kbfsblock.ID, _ DiskBlockCacheType) (buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf,
prefetchStatus PrefetchStatus, err error) {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Get %s", blockID)
defer func() {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Get %s done (err=%+v)", blockID, err)
}()
res, err := dbcr.client.GetBlock(ctx, kbgitkbfs.GetBlockArg{
TlfID: tlfID.Bytes(),
BlockID: blockID.Bytes(),
})
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, NoPrefetch, err
}
err = serverHalf.UnmarshalBinary(res.ServerHalf)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, NoPrefetch, err
}
prefetchStatus = PrefetchStatusFromProtocol(res.PrefetchStatus)
dbcr.statuses.Add(blockID, prefetchStatus)
return res.Buf, serverHalf, prefetchStatus, nil
} | go | func (dbcr *DiskBlockCacheRemote) Get(ctx context.Context, tlfID tlf.ID,
blockID kbfsblock.ID, _ DiskBlockCacheType) (buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf,
prefetchStatus PrefetchStatus, err error) {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Get %s", blockID)
defer func() {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Get %s done (err=%+v)", blockID, err)
}()
res, err := dbcr.client.GetBlock(ctx, kbgitkbfs.GetBlockArg{
TlfID: tlfID.Bytes(),
BlockID: blockID.Bytes(),
})
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, NoPrefetch, err
}
err = serverHalf.UnmarshalBinary(res.ServerHalf)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, NoPrefetch, err
}
prefetchStatus = PrefetchStatusFromProtocol(res.PrefetchStatus)
dbcr.statuses.Add(blockID, prefetchStatus)
return res.Buf, serverHalf, prefetchStatus, nil
} | [
"func",
"(",
"dbcr",
"*",
"DiskBlockCacheRemote",
")",
"Get",
"(",
"ctx",
"context",
".",
"Context",
",",
"tlfID",
"tlf",
".",
"ID",
",",
"blockID",
"kbfsblock",
".",
"ID",
",",
"_",
"DiskBlockCacheType",
")",
"(",
"buf",
"[",
"]",
"byte",
",",
"serverHalf",
"kbfscrypto",
".",
"BlockCryptKeyServerHalf",
",",
"prefetchStatus",
"PrefetchStatus",
",",
"err",
"error",
")",
"{",
"dbcr",
".",
"log",
".",
"LazyTrace",
"(",
"ctx",
",",
"\"",
"\"",
",",
"blockID",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"dbcr",
".",
"log",
".",
"LazyTrace",
"(",
"ctx",
",",
"\"",
"\"",
",",
"blockID",
",",
"err",
")",
"\n",
"}",
"(",
")",
"\n\n",
"res",
",",
"err",
":=",
"dbcr",
".",
"client",
".",
"GetBlock",
"(",
"ctx",
",",
"kbgitkbfs",
".",
"GetBlockArg",
"{",
"TlfID",
":",
"tlfID",
".",
"Bytes",
"(",
")",
",",
"BlockID",
":",
"blockID",
".",
"Bytes",
"(",
")",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"kbfscrypto",
".",
"BlockCryptKeyServerHalf",
"{",
"}",
",",
"NoPrefetch",
",",
"err",
"\n",
"}",
"\n\n",
"err",
"=",
"serverHalf",
".",
"UnmarshalBinary",
"(",
"res",
".",
"ServerHalf",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"kbfscrypto",
".",
"BlockCryptKeyServerHalf",
"{",
"}",
",",
"NoPrefetch",
",",
"err",
"\n",
"}",
"\n",
"prefetchStatus",
"=",
"PrefetchStatusFromProtocol",
"(",
"res",
".",
"PrefetchStatus",
")",
"\n",
"dbcr",
".",
"statuses",
".",
"Add",
"(",
"blockID",
",",
"prefetchStatus",
")",
"\n",
"return",
"res",
".",
"Buf",
",",
"serverHalf",
",",
"prefetchStatus",
",",
"nil",
"\n",
"}"
] | // Get implements the DiskBlockCache interface for DiskBlockCacheRemote. | [
"Get",
"implements",
"the",
"DiskBlockCache",
"interface",
"for",
"DiskBlockCacheRemote",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L74-L98 |
159,393 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | GetPrefetchStatus | func (dbcr *DiskBlockCacheRemote) GetPrefetchStatus(
ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID,
cacheType DiskBlockCacheType) (
prefetchStatus PrefetchStatus, err error) {
if tmp, ok := dbcr.statuses.Get(blockID); ok {
prefetchStatus := tmp.(PrefetchStatus)
return prefetchStatus, nil
}
dbcr.log.LazyTrace(
ctx, "DiskBlockCacheRemote: GetPrefetchStatus %s", blockID)
defer func() {
dbcr.log.LazyTrace(
ctx, "DiskBlockCacheRemote: GetPrefetchStatus %s done (err=%+v)",
blockID, err)
}()
res, err := dbcr.client.GetPrefetchStatus(
ctx, kbgitkbfs.GetPrefetchStatusArg{
TlfID: tlfID.Bytes(),
BlockID: blockID.Bytes(),
})
if err != nil {
return NoPrefetch, err
}
return PrefetchStatusFromProtocol(res), nil
} | go | func (dbcr *DiskBlockCacheRemote) GetPrefetchStatus(
ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID,
cacheType DiskBlockCacheType) (
prefetchStatus PrefetchStatus, err error) {
if tmp, ok := dbcr.statuses.Get(blockID); ok {
prefetchStatus := tmp.(PrefetchStatus)
return prefetchStatus, nil
}
dbcr.log.LazyTrace(
ctx, "DiskBlockCacheRemote: GetPrefetchStatus %s", blockID)
defer func() {
dbcr.log.LazyTrace(
ctx, "DiskBlockCacheRemote: GetPrefetchStatus %s done (err=%+v)",
blockID, err)
}()
res, err := dbcr.client.GetPrefetchStatus(
ctx, kbgitkbfs.GetPrefetchStatusArg{
TlfID: tlfID.Bytes(),
BlockID: blockID.Bytes(),
})
if err != nil {
return NoPrefetch, err
}
return PrefetchStatusFromProtocol(res), nil
} | [
"func",
"(",
"dbcr",
"*",
"DiskBlockCacheRemote",
")",
"GetPrefetchStatus",
"(",
"ctx",
"context",
".",
"Context",
",",
"tlfID",
"tlf",
".",
"ID",
",",
"blockID",
"kbfsblock",
".",
"ID",
",",
"cacheType",
"DiskBlockCacheType",
")",
"(",
"prefetchStatus",
"PrefetchStatus",
",",
"err",
"error",
")",
"{",
"if",
"tmp",
",",
"ok",
":=",
"dbcr",
".",
"statuses",
".",
"Get",
"(",
"blockID",
")",
";",
"ok",
"{",
"prefetchStatus",
":=",
"tmp",
".",
"(",
"PrefetchStatus",
")",
"\n",
"return",
"prefetchStatus",
",",
"nil",
"\n",
"}",
"\n\n",
"dbcr",
".",
"log",
".",
"LazyTrace",
"(",
"ctx",
",",
"\"",
"\"",
",",
"blockID",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"dbcr",
".",
"log",
".",
"LazyTrace",
"(",
"ctx",
",",
"\"",
"\"",
",",
"blockID",
",",
"err",
")",
"\n",
"}",
"(",
")",
"\n\n",
"res",
",",
"err",
":=",
"dbcr",
".",
"client",
".",
"GetPrefetchStatus",
"(",
"ctx",
",",
"kbgitkbfs",
".",
"GetPrefetchStatusArg",
"{",
"TlfID",
":",
"tlfID",
".",
"Bytes",
"(",
")",
",",
"BlockID",
":",
"blockID",
".",
"Bytes",
"(",
")",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"NoPrefetch",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"PrefetchStatusFromProtocol",
"(",
"res",
")",
",",
"nil",
"\n",
"}"
] | // GetPrefetchStatus implements the DiskBlockCache interface for
// DiskBlockCacheRemote. | [
"GetPrefetchStatus",
"implements",
"the",
"DiskBlockCache",
"interface",
"for",
"DiskBlockCacheRemote",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L102-L129 |
159,394 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | Put | func (dbcr *DiskBlockCacheRemote) Put(ctx context.Context, tlfID tlf.ID,
blockID kbfsblock.ID, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf,
_ DiskBlockCacheType) (err error) {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Put %s", blockID)
defer func() {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Put %s done (err=%+v)", blockID, err)
}()
return dbcr.client.PutBlock(ctx, kbgitkbfs.PutBlockArg{
TlfID: tlfID.Bytes(),
BlockID: blockID.Bytes(),
Buf: buf,
ServerHalf: serverHalf.Bytes(),
})
} | go | func (dbcr *DiskBlockCacheRemote) Put(ctx context.Context, tlfID tlf.ID,
blockID kbfsblock.ID, buf []byte,
serverHalf kbfscrypto.BlockCryptKeyServerHalf,
_ DiskBlockCacheType) (err error) {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Put %s", blockID)
defer func() {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Put %s done (err=%+v)", blockID, err)
}()
return dbcr.client.PutBlock(ctx, kbgitkbfs.PutBlockArg{
TlfID: tlfID.Bytes(),
BlockID: blockID.Bytes(),
Buf: buf,
ServerHalf: serverHalf.Bytes(),
})
} | [
"func",
"(",
"dbcr",
"*",
"DiskBlockCacheRemote",
")",
"Put",
"(",
"ctx",
"context",
".",
"Context",
",",
"tlfID",
"tlf",
".",
"ID",
",",
"blockID",
"kbfsblock",
".",
"ID",
",",
"buf",
"[",
"]",
"byte",
",",
"serverHalf",
"kbfscrypto",
".",
"BlockCryptKeyServerHalf",
",",
"_",
"DiskBlockCacheType",
")",
"(",
"err",
"error",
")",
"{",
"dbcr",
".",
"log",
".",
"LazyTrace",
"(",
"ctx",
",",
"\"",
"\"",
",",
"blockID",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"dbcr",
".",
"log",
".",
"LazyTrace",
"(",
"ctx",
",",
"\"",
"\"",
",",
"blockID",
",",
"err",
")",
"\n",
"}",
"(",
")",
"\n\n",
"return",
"dbcr",
".",
"client",
".",
"PutBlock",
"(",
"ctx",
",",
"kbgitkbfs",
".",
"PutBlockArg",
"{",
"TlfID",
":",
"tlfID",
".",
"Bytes",
"(",
")",
",",
"BlockID",
":",
"blockID",
".",
"Bytes",
"(",
")",
",",
"Buf",
":",
"buf",
",",
"ServerHalf",
":",
"serverHalf",
".",
"Bytes",
"(",
")",
",",
"}",
")",
"\n",
"}"
] | // Put implements the DiskBlockCache interface for DiskBlockCacheRemote. | [
"Put",
"implements",
"the",
"DiskBlockCache",
"interface",
"for",
"DiskBlockCacheRemote",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L132-L147 |
159,395 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | Delete | func (dbcr *DiskBlockCacheRemote) Delete(
ctx context.Context, blockIDs []kbfsblock.ID,
cacheType DiskBlockCacheType) (
numRemoved int, sizeRemoved int64, err error) {
numBlocks := len(blockIDs)
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Delete %s block(s)",
numBlocks)
defer func() {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Delete %s block(s) "+
"done (err=%+v)", numBlocks, err)
}()
blocks := make([][]byte, 0, len(blockIDs))
for _, b := range blockIDs {
blocks = append(blocks, b.Bytes())
}
res, err := dbcr.client.DeleteBlocks(ctx, blocks)
if err != nil {
return 0, 0, err
}
return res.NumRemoved, res.SizeRemoved, nil
} | go | func (dbcr *DiskBlockCacheRemote) Delete(
ctx context.Context, blockIDs []kbfsblock.ID,
cacheType DiskBlockCacheType) (
numRemoved int, sizeRemoved int64, err error) {
numBlocks := len(blockIDs)
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Delete %s block(s)",
numBlocks)
defer func() {
dbcr.log.LazyTrace(ctx, "DiskBlockCacheRemote: Delete %s block(s) "+
"done (err=%+v)", numBlocks, err)
}()
blocks := make([][]byte, 0, len(blockIDs))
for _, b := range blockIDs {
blocks = append(blocks, b.Bytes())
}
res, err := dbcr.client.DeleteBlocks(ctx, blocks)
if err != nil {
return 0, 0, err
}
return res.NumRemoved, res.SizeRemoved, nil
} | [
"func",
"(",
"dbcr",
"*",
"DiskBlockCacheRemote",
")",
"Delete",
"(",
"ctx",
"context",
".",
"Context",
",",
"blockIDs",
"[",
"]",
"kbfsblock",
".",
"ID",
",",
"cacheType",
"DiskBlockCacheType",
")",
"(",
"numRemoved",
"int",
",",
"sizeRemoved",
"int64",
",",
"err",
"error",
")",
"{",
"numBlocks",
":=",
"len",
"(",
"blockIDs",
")",
"\n",
"dbcr",
".",
"log",
".",
"LazyTrace",
"(",
"ctx",
",",
"\"",
"\"",
",",
"numBlocks",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"dbcr",
".",
"log",
".",
"LazyTrace",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"numBlocks",
",",
"err",
")",
"\n",
"}",
"(",
")",
"\n",
"blocks",
":=",
"make",
"(",
"[",
"]",
"[",
"]",
"byte",
",",
"0",
",",
"len",
"(",
"blockIDs",
")",
")",
"\n",
"for",
"_",
",",
"b",
":=",
"range",
"blockIDs",
"{",
"blocks",
"=",
"append",
"(",
"blocks",
",",
"b",
".",
"Bytes",
"(",
")",
")",
"\n",
"}",
"\n",
"res",
",",
"err",
":=",
"dbcr",
".",
"client",
".",
"DeleteBlocks",
"(",
"ctx",
",",
"blocks",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"return",
"res",
".",
"NumRemoved",
",",
"res",
".",
"SizeRemoved",
",",
"nil",
"\n",
"}"
] | // Delete implements the DiskBlockCache interface for DiskBlockCacheRemote. | [
"Delete",
"implements",
"the",
"DiskBlockCache",
"interface",
"for",
"DiskBlockCacheRemote",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L150-L170 |
159,396 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | UpdateMetadata | func (dbcr *DiskBlockCacheRemote) UpdateMetadata(ctx context.Context,
tlfID tlf.ID, blockID kbfsblock.ID, prefetchStatus PrefetchStatus,
_ DiskBlockCacheType) error {
dbcr.statuses.Add(blockID, prefetchStatus)
return dbcr.client.UpdateBlockMetadata(ctx,
kbgitkbfs.UpdateBlockMetadataArg{
TlfID: tlfID.Bytes(),
BlockID: blockID.Bytes(),
PrefetchStatus: prefetchStatus.ToProtocol(),
})
} | go | func (dbcr *DiskBlockCacheRemote) UpdateMetadata(ctx context.Context,
tlfID tlf.ID, blockID kbfsblock.ID, prefetchStatus PrefetchStatus,
_ DiskBlockCacheType) error {
dbcr.statuses.Add(blockID, prefetchStatus)
return dbcr.client.UpdateBlockMetadata(ctx,
kbgitkbfs.UpdateBlockMetadataArg{
TlfID: tlfID.Bytes(),
BlockID: blockID.Bytes(),
PrefetchStatus: prefetchStatus.ToProtocol(),
})
} | [
"func",
"(",
"dbcr",
"*",
"DiskBlockCacheRemote",
")",
"UpdateMetadata",
"(",
"ctx",
"context",
".",
"Context",
",",
"tlfID",
"tlf",
".",
"ID",
",",
"blockID",
"kbfsblock",
".",
"ID",
",",
"prefetchStatus",
"PrefetchStatus",
",",
"_",
"DiskBlockCacheType",
")",
"error",
"{",
"dbcr",
".",
"statuses",
".",
"Add",
"(",
"blockID",
",",
"prefetchStatus",
")",
"\n",
"return",
"dbcr",
".",
"client",
".",
"UpdateBlockMetadata",
"(",
"ctx",
",",
"kbgitkbfs",
".",
"UpdateBlockMetadataArg",
"{",
"TlfID",
":",
"tlfID",
".",
"Bytes",
"(",
")",
",",
"BlockID",
":",
"blockID",
".",
"Bytes",
"(",
")",
",",
"PrefetchStatus",
":",
"prefetchStatus",
".",
"ToProtocol",
"(",
")",
",",
"}",
")",
"\n",
"}"
] | // UpdateMetadata implements the DiskBlockCache interface for
// DiskBlockCacheRemote. | [
"UpdateMetadata",
"implements",
"the",
"DiskBlockCache",
"interface",
"for",
"DiskBlockCacheRemote",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L174-L184 |
159,397 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | GetLastUnrefRev | func (dbcr *DiskBlockCacheRemote) GetLastUnrefRev(
_ context.Context, _ tlf.ID, _ DiskBlockCacheType) (
kbfsmd.Revision, error) {
panic("GetLastUnrefRev() not implemented in DiskBlockCacheRemote")
} | go | func (dbcr *DiskBlockCacheRemote) GetLastUnrefRev(
_ context.Context, _ tlf.ID, _ DiskBlockCacheType) (
kbfsmd.Revision, error) {
panic("GetLastUnrefRev() not implemented in DiskBlockCacheRemote")
} | [
"func",
"(",
"dbcr",
"*",
"DiskBlockCacheRemote",
")",
"GetLastUnrefRev",
"(",
"_",
"context",
".",
"Context",
",",
"_",
"tlf",
".",
"ID",
",",
"_",
"DiskBlockCacheType",
")",
"(",
"kbfsmd",
".",
"Revision",
",",
"error",
")",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}"
] | // GetLastUnrefRev implements the DiskBlockCache interface for
// DiskBlockCacheRemote. | [
"GetLastUnrefRev",
"implements",
"the",
"DiskBlockCache",
"interface",
"for",
"DiskBlockCacheRemote",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L195-L199 |
159,398 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | DoesCacheHaveSpace | func (dbcr *DiskBlockCacheRemote) DoesCacheHaveSpace(
_ context.Context, _ DiskBlockCacheType) (bool, error) {
// We won't be kicking off long syncing prefetching via the remote
// cache, so just pretend the cache has space.
return true, nil
} | go | func (dbcr *DiskBlockCacheRemote) DoesCacheHaveSpace(
_ context.Context, _ DiskBlockCacheType) (bool, error) {
// We won't be kicking off long syncing prefetching via the remote
// cache, so just pretend the cache has space.
return true, nil
} | [
"func",
"(",
"dbcr",
"*",
"DiskBlockCacheRemote",
")",
"DoesCacheHaveSpace",
"(",
"_",
"context",
".",
"Context",
",",
"_",
"DiskBlockCacheType",
")",
"(",
"bool",
",",
"error",
")",
"{",
"// We won't be kicking off long syncing prefetching via the remote",
"// cache, so just pretend the cache has space.",
"return",
"true",
",",
"nil",
"\n",
"}"
] | // DoesCacheHaveSpace implements the DiskBlockCache interface for
// DiskBlockCacheRemote. | [
"DoesCacheHaveSpace",
"implements",
"the",
"DiskBlockCache",
"interface",
"for",
"DiskBlockCacheRemote",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L218-L223 |
159,399 | keybase/client | go/kbfs/libkbfs/disk_block_cache_remote.go | Mark | func (dbcr *DiskBlockCacheRemote) Mark(
_ context.Context, _ kbfsblock.ID, _ string, _ DiskBlockCacheType) error {
panic("Mark() not implemented in DiskBlockCacheRemote")
} | go | func (dbcr *DiskBlockCacheRemote) Mark(
_ context.Context, _ kbfsblock.ID, _ string, _ DiskBlockCacheType) error {
panic("Mark() not implemented in DiskBlockCacheRemote")
} | [
"func",
"(",
"dbcr",
"*",
"DiskBlockCacheRemote",
")",
"Mark",
"(",
"_",
"context",
".",
"Context",
",",
"_",
"kbfsblock",
".",
"ID",
",",
"_",
"string",
",",
"_",
"DiskBlockCacheType",
")",
"error",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}"
] | // Mark implements the DiskBlockCache interface for DiskBlockCacheRemote. | [
"Mark",
"implements",
"the",
"DiskBlockCache",
"interface",
"for",
"DiskBlockCacheRemote",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/disk_block_cache_remote.go#L226-L229 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.