id
int32 0
167k
| repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
sequencelengths 21
1.41k
| docstring
stringlengths 6
2.61k
| docstring_tokens
sequencelengths 3
215
| sha
stringlengths 40
40
| url
stringlengths 85
252
|
---|---|---|---|---|---|---|---|---|---|---|---|
160,500 | keybase/client | go/kbfs/data/dirty_bcache.go | ShouldForceSync | func (d *DirtyBlockCacheStandard) ShouldForceSync(_ tlf.ID) bool {
d.lock.RLock()
defer d.lock.RUnlock()
// TODO: Fill up to likely block boundaries?
return d.waitBufBytes >= d.syncBufferCap
} | go | func (d *DirtyBlockCacheStandard) ShouldForceSync(_ tlf.ID) bool {
d.lock.RLock()
defer d.lock.RUnlock()
// TODO: Fill up to likely block boundaries?
return d.waitBufBytes >= d.syncBufferCap
} | [
"func",
"(",
"d",
"*",
"DirtyBlockCacheStandard",
")",
"ShouldForceSync",
"(",
"_",
"tlf",
".",
"ID",
")",
"bool",
"{",
"d",
".",
"lock",
".",
"RLock",
"(",
")",
"\n",
"defer",
"d",
".",
"lock",
".",
"RUnlock",
"(",
")",
"\n",
"// TODO: Fill up to likely block boundaries?",
"return",
"d",
".",
"waitBufBytes",
">=",
"d",
".",
"syncBufferCap",
"\n",
"}"
] | // ShouldForceSync implements the DirtyBlockCache interface for
// DirtyBlockCacheStandard. | [
"ShouldForceSync",
"implements",
"the",
"DirtyBlockCache",
"interface",
"for",
"DirtyBlockCacheStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/dirty_bcache.go#L644-L649 |
160,501 | keybase/client | go/kbfs/data/dirty_bcache.go | Size | func (d *DirtyBlockCacheStandard) Size() int {
d.lock.RLock()
defer d.lock.RUnlock()
return len(d.cache)
} | go | func (d *DirtyBlockCacheStandard) Size() int {
d.lock.RLock()
defer d.lock.RUnlock()
return len(d.cache)
} | [
"func",
"(",
"d",
"*",
"DirtyBlockCacheStandard",
")",
"Size",
"(",
")",
"int",
"{",
"d",
".",
"lock",
".",
"RLock",
"(",
")",
"\n",
"defer",
"d",
".",
"lock",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"len",
"(",
"d",
".",
"cache",
")",
"\n",
"}"
] | // Size returns the number of blocks currently in the cache. | [
"Size",
"returns",
"the",
"number",
"of",
"blocks",
"currently",
"in",
"the",
"cache",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/dirty_bcache.go#L652-L656 |
160,502 | keybase/client | go/kbfs/data/dirty_bcache.go | Shutdown | func (d *DirtyBlockCacheStandard) Shutdown() error {
func() {
d.shutdownLock.Lock()
defer d.shutdownLock.Unlock()
d.isShutdown = true
close(d.shutdownChan)
}()
d.reqWg.Wait()
close(d.requestsChan)
d.lock.Lock()
defer d.lock.Unlock()
// Clear out the remaining requests
for req := range d.requestsChan {
d.updateWaitBufLocked(req.bytes)
}
if d.syncBufBytes != 0 || d.waitBufBytes != 0 || d.ignoreSyncBytes != 0 {
return fmt.Errorf("Unexpected dirty bytes leftover on shutdown: "+
"syncBuf=%d, waitBuf=%d, ignore=%d",
d.syncBufBytes, d.waitBufBytes, d.ignoreSyncBytes)
}
return nil
} | go | func (d *DirtyBlockCacheStandard) Shutdown() error {
func() {
d.shutdownLock.Lock()
defer d.shutdownLock.Unlock()
d.isShutdown = true
close(d.shutdownChan)
}()
d.reqWg.Wait()
close(d.requestsChan)
d.lock.Lock()
defer d.lock.Unlock()
// Clear out the remaining requests
for req := range d.requestsChan {
d.updateWaitBufLocked(req.bytes)
}
if d.syncBufBytes != 0 || d.waitBufBytes != 0 || d.ignoreSyncBytes != 0 {
return fmt.Errorf("Unexpected dirty bytes leftover on shutdown: "+
"syncBuf=%d, waitBuf=%d, ignore=%d",
d.syncBufBytes, d.waitBufBytes, d.ignoreSyncBytes)
}
return nil
} | [
"func",
"(",
"d",
"*",
"DirtyBlockCacheStandard",
")",
"Shutdown",
"(",
")",
"error",
"{",
"func",
"(",
")",
"{",
"d",
".",
"shutdownLock",
".",
"Lock",
"(",
")",
"\n",
"defer",
"d",
".",
"shutdownLock",
".",
"Unlock",
"(",
")",
"\n",
"d",
".",
"isShutdown",
"=",
"true",
"\n",
"close",
"(",
"d",
".",
"shutdownChan",
")",
"\n",
"}",
"(",
")",
"\n\n",
"d",
".",
"reqWg",
".",
"Wait",
"(",
")",
"\n",
"close",
"(",
"d",
".",
"requestsChan",
")",
"\n",
"d",
".",
"lock",
".",
"Lock",
"(",
")",
"\n",
"defer",
"d",
".",
"lock",
".",
"Unlock",
"(",
")",
"\n",
"// Clear out the remaining requests",
"for",
"req",
":=",
"range",
"d",
".",
"requestsChan",
"{",
"d",
".",
"updateWaitBufLocked",
"(",
"req",
".",
"bytes",
")",
"\n",
"}",
"\n",
"if",
"d",
".",
"syncBufBytes",
"!=",
"0",
"||",
"d",
".",
"waitBufBytes",
"!=",
"0",
"||",
"d",
".",
"ignoreSyncBytes",
"!=",
"0",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"d",
".",
"syncBufBytes",
",",
"d",
".",
"waitBufBytes",
",",
"d",
".",
"ignoreSyncBytes",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Shutdown implements the DirtyBlockCache interface for
// DirtyBlockCacheStandard. | [
"Shutdown",
"implements",
"the",
"DirtyBlockCache",
"interface",
"for",
"DirtyBlockCacheStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/dirty_bcache.go#L660-L682 |
160,503 | keybase/client | go/client/cmd_update.go | NewCmdUpdate | func NewCmdUpdate(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "update",
Usage: "The updater",
ArgumentHelp: "[arguments...]",
HideHelp: true,
Subcommands: []cli.Command{
newCmdUpdateCheck(cl, g), // Deprecated
newCmdUpdateCheckInUse(cl, g),
newCmdUpdateNotify(cl, g),
},
}
} | go | func NewCmdUpdate(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "update",
Usage: "The updater",
ArgumentHelp: "[arguments...]",
HideHelp: true,
Subcommands: []cli.Command{
newCmdUpdateCheck(cl, g), // Deprecated
newCmdUpdateCheckInUse(cl, g),
newCmdUpdateNotify(cl, g),
},
}
} | [
"func",
"NewCmdUpdate",
"(",
"cl",
"*",
"libcmdline",
".",
"CommandLine",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
")",
"cli",
".",
"Command",
"{",
"return",
"cli",
".",
"Command",
"{",
"Name",
":",
"\"",
"\"",
",",
"Usage",
":",
"\"",
"\"",
",",
"ArgumentHelp",
":",
"\"",
"\"",
",",
"HideHelp",
":",
"true",
",",
"Subcommands",
":",
"[",
"]",
"cli",
".",
"Command",
"{",
"newCmdUpdateCheck",
"(",
"cl",
",",
"g",
")",
",",
"// Deprecated",
"newCmdUpdateCheckInUse",
"(",
"cl",
",",
"g",
")",
",",
"newCmdUpdateNotify",
"(",
"cl",
",",
"g",
")",
",",
"}",
",",
"}",
"\n",
"}"
] | // NewCmdUpdate are commands for supporting the updater | [
"NewCmdUpdate",
"are",
"commands",
"for",
"supporting",
"the",
"updater"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/cmd_update.go#L19-L31 |
160,504 | keybase/client | go/client/cmd_update.go | newCmdUpdateCheckInUse | func newCmdUpdateCheckInUse(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "check-in-use",
ArgumentHelp: "",
Usage: "Check if we are in use (safe for restart)",
Action: func(c *cli.Context) {
cl.SetLogForward(libcmdline.LogForwardNone)
cl.SetForkCmd(libcmdline.NoFork)
cl.ChooseCommand(newCmdUpdateCheckInUseRunner(g), "check-in-use", c)
},
}
} | go | func newCmdUpdateCheckInUse(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "check-in-use",
ArgumentHelp: "",
Usage: "Check if we are in use (safe for restart)",
Action: func(c *cli.Context) {
cl.SetLogForward(libcmdline.LogForwardNone)
cl.SetForkCmd(libcmdline.NoFork)
cl.ChooseCommand(newCmdUpdateCheckInUseRunner(g), "check-in-use", c)
},
}
} | [
"func",
"newCmdUpdateCheckInUse",
"(",
"cl",
"*",
"libcmdline",
".",
"CommandLine",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
")",
"cli",
".",
"Command",
"{",
"return",
"cli",
".",
"Command",
"{",
"Name",
":",
"\"",
"\"",
",",
"ArgumentHelp",
":",
"\"",
"\"",
",",
"Usage",
":",
"\"",
"\"",
",",
"Action",
":",
"func",
"(",
"c",
"*",
"cli",
".",
"Context",
")",
"{",
"cl",
".",
"SetLogForward",
"(",
"libcmdline",
".",
"LogForwardNone",
")",
"\n",
"cl",
".",
"SetForkCmd",
"(",
"libcmdline",
".",
"NoFork",
")",
"\n",
"cl",
".",
"ChooseCommand",
"(",
"newCmdUpdateCheckInUseRunner",
"(",
"g",
")",
",",
"\"",
"\"",
",",
"c",
")",
"\n",
"}",
",",
"}",
"\n",
"}"
] | // newCmdUpdateCheckInUse is called by updater to see if Keybase is currently in use | [
"newCmdUpdateCheckInUse",
"is",
"called",
"by",
"updater",
"to",
"see",
"if",
"Keybase",
"is",
"currently",
"in",
"use"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/cmd_update.go#L60-L71 |
160,505 | keybase/client | go/client/cmd_simplefs_ps.go | NewCmdSimpleFSPs | func NewCmdSimpleFSPs(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "ps",
Usage: "list running operations",
Action: func(c *cli.Context) {
cl.ChooseCommand(&CmdSimpleFSPs{Contextified: libkb.NewContextified(g)}, "ps", c)
cl.SetNoStandalone()
},
}
} | go | func NewCmdSimpleFSPs(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {
return cli.Command{
Name: "ps",
Usage: "list running operations",
Action: func(c *cli.Context) {
cl.ChooseCommand(&CmdSimpleFSPs{Contextified: libkb.NewContextified(g)}, "ps", c)
cl.SetNoStandalone()
},
}
} | [
"func",
"NewCmdSimpleFSPs",
"(",
"cl",
"*",
"libcmdline",
".",
"CommandLine",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
")",
"cli",
".",
"Command",
"{",
"return",
"cli",
".",
"Command",
"{",
"Name",
":",
"\"",
"\"",
",",
"Usage",
":",
"\"",
"\"",
",",
"Action",
":",
"func",
"(",
"c",
"*",
"cli",
".",
"Context",
")",
"{",
"cl",
".",
"ChooseCommand",
"(",
"&",
"CmdSimpleFSPs",
"{",
"Contextified",
":",
"libkb",
".",
"NewContextified",
"(",
"g",
")",
"}",
",",
"\"",
"\"",
",",
"c",
")",
"\n",
"cl",
".",
"SetNoStandalone",
"(",
")",
"\n",
"}",
",",
"}",
"\n",
"}"
] | // NewCmdSimpleFSPs creates a new cli.Command. | [
"NewCmdSimpleFSPs",
"creates",
"a",
"new",
"cli",
".",
"Command",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/client/cmd_simplefs_ps.go#L25-L34 |
160,506 | keybase/client | go/kbfs/libkbfs/folder_block_manager.go | archiveUnrefBlocksNoWait | func (fbm *folderBlockManager) archiveUnrefBlocksNoWait(md ReadOnlyRootMetadata) {
// Don't archive for unmerged revisions, because conflict
// resolution might undo some of the unreferences.
if md.MergedStatus() != kbfsmd.Merged {
return
}
if err := isArchivableMDOrError(md); err != nil {
panic(err)
}
fbm.archiveGroup.Add(1)
// Don't block if the channel is full; instead do the send in a
// background goroutine. We've already done the Add above, so the
// wait calls should all work just fine.
select {
case fbm.archiveChan <- md:
return
default:
go func() { fbm.archiveChan <- md }()
}
} | go | func (fbm *folderBlockManager) archiveUnrefBlocksNoWait(md ReadOnlyRootMetadata) {
// Don't archive for unmerged revisions, because conflict
// resolution might undo some of the unreferences.
if md.MergedStatus() != kbfsmd.Merged {
return
}
if err := isArchivableMDOrError(md); err != nil {
panic(err)
}
fbm.archiveGroup.Add(1)
// Don't block if the channel is full; instead do the send in a
// background goroutine. We've already done the Add above, so the
// wait calls should all work just fine.
select {
case fbm.archiveChan <- md:
return
default:
go func() { fbm.archiveChan <- md }()
}
} | [
"func",
"(",
"fbm",
"*",
"folderBlockManager",
")",
"archiveUnrefBlocksNoWait",
"(",
"md",
"ReadOnlyRootMetadata",
")",
"{",
"// Don't archive for unmerged revisions, because conflict",
"// resolution might undo some of the unreferences.",
"if",
"md",
".",
"MergedStatus",
"(",
")",
"!=",
"kbfsmd",
".",
"Merged",
"{",
"return",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"isArchivableMDOrError",
"(",
"md",
")",
";",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"fbm",
".",
"archiveGroup",
".",
"Add",
"(",
"1",
")",
"\n\n",
"// Don't block if the channel is full; instead do the send in a",
"// background goroutine. We've already done the Add above, so the",
"// wait calls should all work just fine.",
"select",
"{",
"case",
"fbm",
".",
"archiveChan",
"<-",
"md",
":",
"return",
"\n",
"default",
":",
"go",
"func",
"(",
")",
"{",
"fbm",
".",
"archiveChan",
"<-",
"md",
"}",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // archiveUnrefBlocksNoWait enqueues the MD for archiving without
// blocking. By the time it returns, the archive group has been
// incremented so future waits will block on this archive. This
// method is for internal use within folderBlockManager only. | [
"archiveUnrefBlocksNoWait",
"enqueues",
"the",
"MD",
"for",
"archiving",
"without",
"blocking",
".",
"By",
"the",
"time",
"it",
"returns",
"the",
"archive",
"group",
"has",
"been",
"incremented",
"so",
"future",
"waits",
"will",
"block",
"on",
"this",
"archive",
".",
"This",
"method",
"is",
"for",
"internal",
"use",
"within",
"folderBlockManager",
"only",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_manager.go#L400-L422 |
160,507 | keybase/client | go/kbfs/libkbfs/folder_block_manager.go | doChunkedDowngrades | func (fbm *folderBlockManager) doChunkedDowngrades(ctx context.Context,
tlfID tlf.ID, ptrs []data.BlockPointer, archive bool) (
[]kbfsblock.ID, error) {
fbm.log.CDebugf(ctx, "Downgrading %d pointers (archive=%t)",
len(ptrs), archive)
bops := fbm.config.BlockOps()
// Round up to find the number of chunks.
numChunks := (len(ptrs) + numPointersToDowngradePerChunk - 1) /
numPointersToDowngradePerChunk
numWorkers := numChunks
if numWorkers > maxParallelBlockPuts {
numWorkers = maxParallelBlockPuts
}
chunks := make(chan []data.BlockPointer, numChunks)
var wg sync.WaitGroup
defer wg.Wait()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
type workerResult struct {
zeroRefCounts []kbfsblock.ID
err error
}
chunkResults := make(chan workerResult, numChunks)
worker := func() {
defer wg.Done()
for chunk := range chunks {
var res workerResult
fbm.log.CDebugf(ctx, "Downgrading chunk of %d pointers", len(chunk))
if archive {
res.err = bops.Archive(ctx, tlfID, chunk)
} else {
var liveCounts map[kbfsblock.ID]int
liveCounts, res.err = bops.Delete(ctx, tlfID, chunk)
if res.err == nil {
for id, count := range liveCounts {
if count == 0 {
res.zeroRefCounts = append(res.zeroRefCounts, id)
}
}
}
}
chunkResults <- res
select {
// return early if the context has been canceled
case <-ctx.Done():
return
default:
}
}
}
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go worker()
}
for start := 0; start < len(ptrs); start += numPointersToDowngradePerChunk {
end := start + numPointersToDowngradePerChunk
if end > len(ptrs) {
end = len(ptrs)
}
chunks <- ptrs[start:end]
}
close(chunks)
var zeroRefCounts []kbfsblock.ID
for i := 0; i < numChunks; i++ {
result := <-chunkResults
if result.err != nil {
// deferred cancel will stop the other workers.
return nil, result.err
}
zeroRefCounts = append(zeroRefCounts, result.zeroRefCounts...)
}
return zeroRefCounts, nil
} | go | func (fbm *folderBlockManager) doChunkedDowngrades(ctx context.Context,
tlfID tlf.ID, ptrs []data.BlockPointer, archive bool) (
[]kbfsblock.ID, error) {
fbm.log.CDebugf(ctx, "Downgrading %d pointers (archive=%t)",
len(ptrs), archive)
bops := fbm.config.BlockOps()
// Round up to find the number of chunks.
numChunks := (len(ptrs) + numPointersToDowngradePerChunk - 1) /
numPointersToDowngradePerChunk
numWorkers := numChunks
if numWorkers > maxParallelBlockPuts {
numWorkers = maxParallelBlockPuts
}
chunks := make(chan []data.BlockPointer, numChunks)
var wg sync.WaitGroup
defer wg.Wait()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
type workerResult struct {
zeroRefCounts []kbfsblock.ID
err error
}
chunkResults := make(chan workerResult, numChunks)
worker := func() {
defer wg.Done()
for chunk := range chunks {
var res workerResult
fbm.log.CDebugf(ctx, "Downgrading chunk of %d pointers", len(chunk))
if archive {
res.err = bops.Archive(ctx, tlfID, chunk)
} else {
var liveCounts map[kbfsblock.ID]int
liveCounts, res.err = bops.Delete(ctx, tlfID, chunk)
if res.err == nil {
for id, count := range liveCounts {
if count == 0 {
res.zeroRefCounts = append(res.zeroRefCounts, id)
}
}
}
}
chunkResults <- res
select {
// return early if the context has been canceled
case <-ctx.Done():
return
default:
}
}
}
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go worker()
}
for start := 0; start < len(ptrs); start += numPointersToDowngradePerChunk {
end := start + numPointersToDowngradePerChunk
if end > len(ptrs) {
end = len(ptrs)
}
chunks <- ptrs[start:end]
}
close(chunks)
var zeroRefCounts []kbfsblock.ID
for i := 0; i < numChunks; i++ {
result := <-chunkResults
if result.err != nil {
// deferred cancel will stop the other workers.
return nil, result.err
}
zeroRefCounts = append(zeroRefCounts, result.zeroRefCounts...)
}
return zeroRefCounts, nil
} | [
"func",
"(",
"fbm",
"*",
"folderBlockManager",
")",
"doChunkedDowngrades",
"(",
"ctx",
"context",
".",
"Context",
",",
"tlfID",
"tlf",
".",
"ID",
",",
"ptrs",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"archive",
"bool",
")",
"(",
"[",
"]",
"kbfsblock",
".",
"ID",
",",
"error",
")",
"{",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"len",
"(",
"ptrs",
")",
",",
"archive",
")",
"\n",
"bops",
":=",
"fbm",
".",
"config",
".",
"BlockOps",
"(",
")",
"\n\n",
"// Round up to find the number of chunks.",
"numChunks",
":=",
"(",
"len",
"(",
"ptrs",
")",
"+",
"numPointersToDowngradePerChunk",
"-",
"1",
")",
"/",
"numPointersToDowngradePerChunk",
"\n",
"numWorkers",
":=",
"numChunks",
"\n",
"if",
"numWorkers",
">",
"maxParallelBlockPuts",
"{",
"numWorkers",
"=",
"maxParallelBlockPuts",
"\n",
"}",
"\n",
"chunks",
":=",
"make",
"(",
"chan",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"numChunks",
")",
"\n\n",
"var",
"wg",
"sync",
".",
"WaitGroup",
"\n",
"defer",
"wg",
".",
"Wait",
"(",
")",
"\n\n",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"ctx",
")",
"\n",
"defer",
"cancel",
"(",
")",
"\n\n",
"type",
"workerResult",
"struct",
"{",
"zeroRefCounts",
"[",
"]",
"kbfsblock",
".",
"ID",
"\n",
"err",
"error",
"\n",
"}",
"\n\n",
"chunkResults",
":=",
"make",
"(",
"chan",
"workerResult",
",",
"numChunks",
")",
"\n",
"worker",
":=",
"func",
"(",
")",
"{",
"defer",
"wg",
".",
"Done",
"(",
")",
"\n",
"for",
"chunk",
":=",
"range",
"chunks",
"{",
"var",
"res",
"workerResult",
"\n",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"len",
"(",
"chunk",
")",
")",
"\n",
"if",
"archive",
"{",
"res",
".",
"err",
"=",
"bops",
".",
"Archive",
"(",
"ctx",
",",
"tlfID",
",",
"chunk",
")",
"\n",
"}",
"else",
"{",
"var",
"liveCounts",
"map",
"[",
"kbfsblock",
".",
"ID",
"]",
"int",
"\n",
"liveCounts",
",",
"res",
".",
"err",
"=",
"bops",
".",
"Delete",
"(",
"ctx",
",",
"tlfID",
",",
"chunk",
")",
"\n",
"if",
"res",
".",
"err",
"==",
"nil",
"{",
"for",
"id",
",",
"count",
":=",
"range",
"liveCounts",
"{",
"if",
"count",
"==",
"0",
"{",
"res",
".",
"zeroRefCounts",
"=",
"append",
"(",
"res",
".",
"zeroRefCounts",
",",
"id",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"chunkResults",
"<-",
"res",
"\n",
"select",
"{",
"// return early if the context has been canceled",
"case",
"<-",
"ctx",
".",
"Done",
"(",
")",
":",
"return",
"\n",
"default",
":",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"numWorkers",
";",
"i",
"++",
"{",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"worker",
"(",
")",
"\n",
"}",
"\n\n",
"for",
"start",
":=",
"0",
";",
"start",
"<",
"len",
"(",
"ptrs",
")",
";",
"start",
"+=",
"numPointersToDowngradePerChunk",
"{",
"end",
":=",
"start",
"+",
"numPointersToDowngradePerChunk",
"\n",
"if",
"end",
">",
"len",
"(",
"ptrs",
")",
"{",
"end",
"=",
"len",
"(",
"ptrs",
")",
"\n",
"}",
"\n",
"chunks",
"<-",
"ptrs",
"[",
"start",
":",
"end",
"]",
"\n",
"}",
"\n",
"close",
"(",
"chunks",
")",
"\n\n",
"var",
"zeroRefCounts",
"[",
"]",
"kbfsblock",
".",
"ID",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"numChunks",
";",
"i",
"++",
"{",
"result",
":=",
"<-",
"chunkResults",
"\n",
"if",
"result",
".",
"err",
"!=",
"nil",
"{",
"// deferred cancel will stop the other workers.",
"return",
"nil",
",",
"result",
".",
"err",
"\n",
"}",
"\n",
"zeroRefCounts",
"=",
"append",
"(",
"zeroRefCounts",
",",
"result",
".",
"zeroRefCounts",
"...",
")",
"\n",
"}",
"\n",
"return",
"zeroRefCounts",
",",
"nil",
"\n",
"}"
] | // doChunkedDowngrades sends batched archive or delete messages to the
// block server for the given block pointers. For deletes, it returns
// a list of block IDs that no longer have any references. | [
"doChunkedDowngrades",
"sends",
"batched",
"archive",
"or",
"delete",
"messages",
"to",
"the",
"block",
"server",
"for",
"the",
"given",
"block",
"pointers",
".",
"For",
"deletes",
"it",
"returns",
"a",
"list",
"of",
"block",
"IDs",
"that",
"no",
"longer",
"have",
"any",
"references",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_manager.go#L454-L533 |
160,508 | keybase/client | go/kbfs/libkbfs/folder_block_manager.go | deleteBlockRefs | func (fbm *folderBlockManager) deleteBlockRefs(ctx context.Context,
tlfID tlf.ID, ptrs []data.BlockPointer) ([]kbfsblock.ID, error) {
return fbm.doChunkedDowngrades(ctx, tlfID, ptrs, false)
} | go | func (fbm *folderBlockManager) deleteBlockRefs(ctx context.Context,
tlfID tlf.ID, ptrs []data.BlockPointer) ([]kbfsblock.ID, error) {
return fbm.doChunkedDowngrades(ctx, tlfID, ptrs, false)
} | [
"func",
"(",
"fbm",
"*",
"folderBlockManager",
")",
"deleteBlockRefs",
"(",
"ctx",
"context",
".",
"Context",
",",
"tlfID",
"tlf",
".",
"ID",
",",
"ptrs",
"[",
"]",
"data",
".",
"BlockPointer",
")",
"(",
"[",
"]",
"kbfsblock",
".",
"ID",
",",
"error",
")",
"{",
"return",
"fbm",
".",
"doChunkedDowngrades",
"(",
"ctx",
",",
"tlfID",
",",
"ptrs",
",",
"false",
")",
"\n",
"}"
] | // deleteBlockRefs sends batched delete messages to the block server
// for the given block pointers. It returns a list of block IDs that
// no longer have any references. | [
"deleteBlockRefs",
"sends",
"batched",
"delete",
"messages",
"to",
"the",
"block",
"server",
"for",
"the",
"given",
"block",
"pointers",
".",
"It",
"returns",
"a",
"list",
"of",
"block",
"IDs",
"that",
"no",
"longer",
"have",
"any",
"references",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_manager.go#L538-L541 |
160,509 | keybase/client | go/kbfs/libkbfs/folder_block_manager.go | getUnrefPointersFromMD | func (fbm *folderBlockManager) getUnrefPointersFromMD(
rmd ReadOnlyRootMetadata, includeGC bool, iter *unrefIterator) (
ptrs []data.BlockPointer, nextIter *unrefIterator) {
currPtr := 0
complete := true
nextPtr := 0
if iter != nil {
nextPtr = iter.nextPtr
}
ptrMap := make(map[data.BlockPointer]bool)
max := fbm.config.Mode().MaxBlockPtrsToManageAtOnce()
opLoop:
for _, op := range rmd.data.Changes.Ops {
if _, ok := op.(*GCOp); !includeGC && ok {
continue
}
for _, ptr := range op.Unrefs() {
currPtr++
// Skip past any ptrs we've already processed.
if currPtr <= nextPtr {
continue
}
// Can be zeroPtr in weird failed sync scenarios.
// See syncInfo.replaceRemovedBlock for an example
// of how this can happen.
if ptr != data.ZeroPtr && !ptrMap[ptr] {
ptrMap[ptr] = true
}
nextPtr++
if max >= 0 && len(ptrMap) >= max {
complete = false
break opLoop
}
}
for _, update := range op.allUpdates() {
currPtr++
// Skip past any ptrs we've already processed.
if currPtr <= nextPtr {
continue
}
// It's legal for there to be an "update" between
// two identical pointers (usually because of
// conflict resolution), so ignore that for quota
// reclamation purposes.
if update.Ref != update.Unref && !ptrMap[update.Unref] {
ptrMap[update.Unref] = true
}
nextPtr++
if max >= 0 && len(ptrMap) >= max {
complete = false
break opLoop
}
}
}
ptrs = make([]data.BlockPointer, 0, len(ptrMap))
for ptr := range ptrMap {
ptrs = append(ptrs, ptr)
}
if !complete {
nextIter = &unrefIterator{nextPtr}
}
return ptrs, nextIter
} | go | func (fbm *folderBlockManager) getUnrefPointersFromMD(
rmd ReadOnlyRootMetadata, includeGC bool, iter *unrefIterator) (
ptrs []data.BlockPointer, nextIter *unrefIterator) {
currPtr := 0
complete := true
nextPtr := 0
if iter != nil {
nextPtr = iter.nextPtr
}
ptrMap := make(map[data.BlockPointer]bool)
max := fbm.config.Mode().MaxBlockPtrsToManageAtOnce()
opLoop:
for _, op := range rmd.data.Changes.Ops {
if _, ok := op.(*GCOp); !includeGC && ok {
continue
}
for _, ptr := range op.Unrefs() {
currPtr++
// Skip past any ptrs we've already processed.
if currPtr <= nextPtr {
continue
}
// Can be zeroPtr in weird failed sync scenarios.
// See syncInfo.replaceRemovedBlock for an example
// of how this can happen.
if ptr != data.ZeroPtr && !ptrMap[ptr] {
ptrMap[ptr] = true
}
nextPtr++
if max >= 0 && len(ptrMap) >= max {
complete = false
break opLoop
}
}
for _, update := range op.allUpdates() {
currPtr++
// Skip past any ptrs we've already processed.
if currPtr <= nextPtr {
continue
}
// It's legal for there to be an "update" between
// two identical pointers (usually because of
// conflict resolution), so ignore that for quota
// reclamation purposes.
if update.Ref != update.Unref && !ptrMap[update.Unref] {
ptrMap[update.Unref] = true
}
nextPtr++
if max >= 0 && len(ptrMap) >= max {
complete = false
break opLoop
}
}
}
ptrs = make([]data.BlockPointer, 0, len(ptrMap))
for ptr := range ptrMap {
ptrs = append(ptrs, ptr)
}
if !complete {
nextIter = &unrefIterator{nextPtr}
}
return ptrs, nextIter
} | [
"func",
"(",
"fbm",
"*",
"folderBlockManager",
")",
"getUnrefPointersFromMD",
"(",
"rmd",
"ReadOnlyRootMetadata",
",",
"includeGC",
"bool",
",",
"iter",
"*",
"unrefIterator",
")",
"(",
"ptrs",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"nextIter",
"*",
"unrefIterator",
")",
"{",
"currPtr",
":=",
"0",
"\n",
"complete",
":=",
"true",
"\n",
"nextPtr",
":=",
"0",
"\n",
"if",
"iter",
"!=",
"nil",
"{",
"nextPtr",
"=",
"iter",
".",
"nextPtr",
"\n",
"}",
"\n",
"ptrMap",
":=",
"make",
"(",
"map",
"[",
"data",
".",
"BlockPointer",
"]",
"bool",
")",
"\n",
"max",
":=",
"fbm",
".",
"config",
".",
"Mode",
"(",
")",
".",
"MaxBlockPtrsToManageAtOnce",
"(",
")",
"\n",
"opLoop",
":",
"for",
"_",
",",
"op",
":=",
"range",
"rmd",
".",
"data",
".",
"Changes",
".",
"Ops",
"{",
"if",
"_",
",",
"ok",
":=",
"op",
".",
"(",
"*",
"GCOp",
")",
";",
"!",
"includeGC",
"&&",
"ok",
"{",
"continue",
"\n",
"}",
"\n",
"for",
"_",
",",
"ptr",
":=",
"range",
"op",
".",
"Unrefs",
"(",
")",
"{",
"currPtr",
"++",
"\n",
"// Skip past any ptrs we've already processed.",
"if",
"currPtr",
"<=",
"nextPtr",
"{",
"continue",
"\n",
"}",
"\n\n",
"// Can be zeroPtr in weird failed sync scenarios.",
"// See syncInfo.replaceRemovedBlock for an example",
"// of how this can happen.",
"if",
"ptr",
"!=",
"data",
".",
"ZeroPtr",
"&&",
"!",
"ptrMap",
"[",
"ptr",
"]",
"{",
"ptrMap",
"[",
"ptr",
"]",
"=",
"true",
"\n",
"}",
"\n",
"nextPtr",
"++",
"\n",
"if",
"max",
">=",
"0",
"&&",
"len",
"(",
"ptrMap",
")",
">=",
"max",
"{",
"complete",
"=",
"false",
"\n",
"break",
"opLoop",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"_",
",",
"update",
":=",
"range",
"op",
".",
"allUpdates",
"(",
")",
"{",
"currPtr",
"++",
"\n",
"// Skip past any ptrs we've already processed.",
"if",
"currPtr",
"<=",
"nextPtr",
"{",
"continue",
"\n",
"}",
"\n\n",
"// It's legal for there to be an \"update\" between",
"// two identical pointers (usually because of",
"// conflict resolution), so ignore that for quota",
"// reclamation purposes.",
"if",
"update",
".",
"Ref",
"!=",
"update",
".",
"Unref",
"&&",
"!",
"ptrMap",
"[",
"update",
".",
"Unref",
"]",
"{",
"ptrMap",
"[",
"update",
".",
"Unref",
"]",
"=",
"true",
"\n",
"}",
"\n",
"nextPtr",
"++",
"\n",
"if",
"max",
">=",
"0",
"&&",
"len",
"(",
"ptrMap",
")",
">=",
"max",
"{",
"complete",
"=",
"false",
"\n",
"break",
"opLoop",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"ptrs",
"=",
"make",
"(",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"0",
",",
"len",
"(",
"ptrMap",
")",
")",
"\n",
"for",
"ptr",
":=",
"range",
"ptrMap",
"{",
"ptrs",
"=",
"append",
"(",
"ptrs",
",",
"ptr",
")",
"\n",
"}",
"\n",
"if",
"!",
"complete",
"{",
"nextIter",
"=",
"&",
"unrefIterator",
"{",
"nextPtr",
"}",
"\n",
"}",
"\n",
"return",
"ptrs",
",",
"nextIter",
"\n",
"}"
] | // getUnrefPointersFromMD returns a slice of BlockPointers that were
// unreferenced by the given `rmd`. If there are too many pointers to
// process, given the current mode, then it will return a partial
// list, plus a non-nil `iter` parameter that can be passed into a
// subsequent call to get the next set of unreferenced BlockPointers
// from the same MD. If a nil `iter` is given, pointers are returned
// from the beginning of the list. | [
"getUnrefPointersFromMD",
"returns",
"a",
"slice",
"of",
"BlockPointers",
"that",
"were",
"unreferenced",
"by",
"the",
"given",
"rmd",
".",
"If",
"there",
"are",
"too",
"many",
"pointers",
"to",
"process",
"given",
"the",
"current",
"mode",
"then",
"it",
"will",
"return",
"a",
"partial",
"list",
"plus",
"a",
"non",
"-",
"nil",
"iter",
"parameter",
"that",
"can",
"be",
"passed",
"into",
"a",
"subsequent",
"call",
"to",
"get",
"the",
"next",
"set",
"of",
"unreferenced",
"BlockPointers",
"from",
"the",
"same",
"MD",
".",
"If",
"a",
"nil",
"iter",
"is",
"given",
"pointers",
"are",
"returned",
"from",
"the",
"beginning",
"of",
"the",
"list",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_manager.go#L706-L770 |
160,510 | keybase/client | go/kbfs/libkbfs/folder_block_manager.go | getMostRecentGCRevision | func (fbm *folderBlockManager) getMostRecentGCRevision(
ctx context.Context, head ReadOnlyRootMetadata) (
lastGCRev kbfsmd.Revision, err error) {
if head.data.LastGCRevision >= kbfsmd.RevisionInitial {
fbm.log.CDebugf(ctx, "Found last gc revision %d in "+
"head MD revision %d", head.data.LastGCRevision,
head.Revision())
return head.data.LastGCRevision, nil
}
// Very old TLFs might not have a filled-in `LastGCRevision`, so
// we need to walk backwards to find the latest gcOp.
endRev := head.Revision()
for {
startRev := endRev - maxMDsAtATime + 1 // (kbfsmd.Revision is signed)
if startRev < kbfsmd.RevisionInitial {
startRev = kbfsmd.RevisionInitial
}
rmds, err := getMDRange(
ctx, fbm.config, fbm.id, kbfsmd.NullBranchID, startRev,
endRev, kbfsmd.Merged, nil)
if err != nil {
return kbfsmd.RevisionUninitialized, err
}
numNew := len(rmds)
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
if rmd.data.LastGCRevision >= kbfsmd.RevisionInitial {
fbm.log.CDebugf(ctx, "Found last gc revision %d in "+
"MD revision %d", rmd.data.LastGCRevision,
rmd.Revision())
return rmd.data.LastGCRevision, nil
}
for j := len(rmd.data.Changes.Ops) - 1; j >= 0; j-- {
GCOp, ok := rmd.data.Changes.Ops[j].(*GCOp)
if !ok || GCOp.LatestRev == kbfsmd.RevisionUninitialized {
continue
}
fbm.log.CDebugf(ctx, "Found last gc op: %s", GCOp)
return GCOp.LatestRev, nil
}
}
if numNew > 0 {
endRev = rmds[0].Revision() - 1
}
if numNew < maxMDsAtATime || endRev < kbfsmd.RevisionInitial {
// Never been GC'd.
return kbfsmd.RevisionUninitialized, nil
}
}
} | go | func (fbm *folderBlockManager) getMostRecentGCRevision(
ctx context.Context, head ReadOnlyRootMetadata) (
lastGCRev kbfsmd.Revision, err error) {
if head.data.LastGCRevision >= kbfsmd.RevisionInitial {
fbm.log.CDebugf(ctx, "Found last gc revision %d in "+
"head MD revision %d", head.data.LastGCRevision,
head.Revision())
return head.data.LastGCRevision, nil
}
// Very old TLFs might not have a filled-in `LastGCRevision`, so
// we need to walk backwards to find the latest gcOp.
endRev := head.Revision()
for {
startRev := endRev - maxMDsAtATime + 1 // (kbfsmd.Revision is signed)
if startRev < kbfsmd.RevisionInitial {
startRev = kbfsmd.RevisionInitial
}
rmds, err := getMDRange(
ctx, fbm.config, fbm.id, kbfsmd.NullBranchID, startRev,
endRev, kbfsmd.Merged, nil)
if err != nil {
return kbfsmd.RevisionUninitialized, err
}
numNew := len(rmds)
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
if rmd.data.LastGCRevision >= kbfsmd.RevisionInitial {
fbm.log.CDebugf(ctx, "Found last gc revision %d in "+
"MD revision %d", rmd.data.LastGCRevision,
rmd.Revision())
return rmd.data.LastGCRevision, nil
}
for j := len(rmd.data.Changes.Ops) - 1; j >= 0; j-- {
GCOp, ok := rmd.data.Changes.Ops[j].(*GCOp)
if !ok || GCOp.LatestRev == kbfsmd.RevisionUninitialized {
continue
}
fbm.log.CDebugf(ctx, "Found last gc op: %s", GCOp)
return GCOp.LatestRev, nil
}
}
if numNew > 0 {
endRev = rmds[0].Revision() - 1
}
if numNew < maxMDsAtATime || endRev < kbfsmd.RevisionInitial {
// Never been GC'd.
return kbfsmd.RevisionUninitialized, nil
}
}
} | [
"func",
"(",
"fbm",
"*",
"folderBlockManager",
")",
"getMostRecentGCRevision",
"(",
"ctx",
"context",
".",
"Context",
",",
"head",
"ReadOnlyRootMetadata",
")",
"(",
"lastGCRev",
"kbfsmd",
".",
"Revision",
",",
"err",
"error",
")",
"{",
"if",
"head",
".",
"data",
".",
"LastGCRevision",
">=",
"kbfsmd",
".",
"RevisionInitial",
"{",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"head",
".",
"data",
".",
"LastGCRevision",
",",
"head",
".",
"Revision",
"(",
")",
")",
"\n",
"return",
"head",
".",
"data",
".",
"LastGCRevision",
",",
"nil",
"\n",
"}",
"\n\n",
"// Very old TLFs might not have a filled-in `LastGCRevision`, so",
"// we need to walk backwards to find the latest gcOp.",
"endRev",
":=",
"head",
".",
"Revision",
"(",
")",
"\n",
"for",
"{",
"startRev",
":=",
"endRev",
"-",
"maxMDsAtATime",
"+",
"1",
"// (kbfsmd.Revision is signed)",
"\n",
"if",
"startRev",
"<",
"kbfsmd",
".",
"RevisionInitial",
"{",
"startRev",
"=",
"kbfsmd",
".",
"RevisionInitial",
"\n",
"}",
"\n\n",
"rmds",
",",
"err",
":=",
"getMDRange",
"(",
"ctx",
",",
"fbm",
".",
"config",
",",
"fbm",
".",
"id",
",",
"kbfsmd",
".",
"NullBranchID",
",",
"startRev",
",",
"endRev",
",",
"kbfsmd",
".",
"Merged",
",",
"nil",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"err",
"\n",
"}",
"\n\n",
"numNew",
":=",
"len",
"(",
"rmds",
")",
"\n",
"for",
"i",
":=",
"len",
"(",
"rmds",
")",
"-",
"1",
";",
"i",
">=",
"0",
";",
"i",
"--",
"{",
"rmd",
":=",
"rmds",
"[",
"i",
"]",
"\n",
"if",
"rmd",
".",
"data",
".",
"LastGCRevision",
">=",
"kbfsmd",
".",
"RevisionInitial",
"{",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"rmd",
".",
"data",
".",
"LastGCRevision",
",",
"rmd",
".",
"Revision",
"(",
")",
")",
"\n",
"return",
"rmd",
".",
"data",
".",
"LastGCRevision",
",",
"nil",
"\n",
"}",
"\n",
"for",
"j",
":=",
"len",
"(",
"rmd",
".",
"data",
".",
"Changes",
".",
"Ops",
")",
"-",
"1",
";",
"j",
">=",
"0",
";",
"j",
"--",
"{",
"GCOp",
",",
"ok",
":=",
"rmd",
".",
"data",
".",
"Changes",
".",
"Ops",
"[",
"j",
"]",
".",
"(",
"*",
"GCOp",
")",
"\n",
"if",
"!",
"ok",
"||",
"GCOp",
".",
"LatestRev",
"==",
"kbfsmd",
".",
"RevisionUninitialized",
"{",
"continue",
"\n",
"}",
"\n",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"GCOp",
")",
"\n",
"return",
"GCOp",
".",
"LatestRev",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n\n",
"if",
"numNew",
">",
"0",
"{",
"endRev",
"=",
"rmds",
"[",
"0",
"]",
".",
"Revision",
"(",
")",
"-",
"1",
"\n",
"}",
"\n\n",
"if",
"numNew",
"<",
"maxMDsAtATime",
"||",
"endRev",
"<",
"kbfsmd",
".",
"RevisionInitial",
"{",
"// Never been GC'd.",
"return",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // getMostRecentGCRevision returns the latest revision that was
// scrubbed by the previous gc op. | [
"getMostRecentGCRevision",
"returns",
"the",
"latest",
"revision",
"that",
"was",
"scrubbed",
"by",
"the",
"previous",
"gc",
"op",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_manager.go#L877-L931 |
160,511 | keybase/client | go/kbfs/libkbfs/folder_block_manager.go | getUnreferencedBlocks | func (fbm *folderBlockManager) getUnreferencedBlocks(
ctx context.Context, earliestRev, mostRecentRev kbfsmd.Revision) (
ptrs []data.BlockPointer, lastRev kbfsmd.Revision,
complete bool, err error) {
fbm.log.CDebugf(ctx, "Getting unreferenced blocks between revisions "+
"%d and %d", earliestRev, mostRecentRev)
defer func() {
if err == nil {
fbm.log.CDebugf(ctx, "Found %d pointers to clean between "+
"revisions %d and %d", len(ptrs), earliestRev, lastRev)
}
}()
// Walk forward, starting from just after earliestRev, until we
// get enough pointers or until we reach the head or a revision
// that's not old enough, gathering pointers to GC.
startRev := earliestRev + 1
outer:
for {
endRev := startRev + maxMDsAtATime
if endRev > mostRecentRev {
endRev = mostRecentRev
}
rmds, err := getMDRange(
ctx, fbm.config, fbm.id, kbfsmd.NullBranchID, startRev,
endRev, kbfsmd.Merged, nil)
if err != nil {
return nil, kbfsmd.RevisionUninitialized, false, err
}
numNew := len(rmds)
for _, rmd := range rmds {
if !fbm.isOldEnough(rmd) {
fbm.log.CDebugf(ctx, "Revision %d is too recent; stopping QR",
rmd.Revision())
complete = true
break outer
}
lastRev = rmd.Revision()
// A garbage-collection op *must* contain all pointers in
// its respective op. If this device can't handle it,
// error the process and let another device take care of
// it.
newPtrs, iter := fbm.getUnrefPointersFromMD(
rmd.ReadOnlyRootMetadata, false, &unrefIterator{0})
if iter != nil {
return nil, kbfsmd.RevisionUninitialized, false, errors.New(
fmt.Sprintf(
"Can't handle the unref'd pointers of revision %d",
lastRev))
}
ptrs = append(ptrs, newPtrs...)
// TODO: when can we clean up the MD's unembedded block
// changes pointer? It's not safe until we know for sure
// that all existing clients have received the latest
// update (and also that there are no outstanding staged
// branches). Let's do that as part of the bigger issue
// KBFS-793 -- for now we have to leak those blocks.
if len(ptrs) > fbm.numPointersPerGCThreshold {
fbm.log.CDebugf(ctx, "Shortening GC range to [%d:%d]",
earliestRev, rmd.Revision())
break outer
}
}
if numNew > 0 {
startRev = rmds[len(rmds)-1].Revision() + 1
}
if numNew < maxMDsAtATime || startRev > mostRecentRev {
complete = true
break
}
}
return ptrs, lastRev, complete, nil
} | go | func (fbm *folderBlockManager) getUnreferencedBlocks(
ctx context.Context, earliestRev, mostRecentRev kbfsmd.Revision) (
ptrs []data.BlockPointer, lastRev kbfsmd.Revision,
complete bool, err error) {
fbm.log.CDebugf(ctx, "Getting unreferenced blocks between revisions "+
"%d and %d", earliestRev, mostRecentRev)
defer func() {
if err == nil {
fbm.log.CDebugf(ctx, "Found %d pointers to clean between "+
"revisions %d and %d", len(ptrs), earliestRev, lastRev)
}
}()
// Walk forward, starting from just after earliestRev, until we
// get enough pointers or until we reach the head or a revision
// that's not old enough, gathering pointers to GC.
startRev := earliestRev + 1
outer:
for {
endRev := startRev + maxMDsAtATime
if endRev > mostRecentRev {
endRev = mostRecentRev
}
rmds, err := getMDRange(
ctx, fbm.config, fbm.id, kbfsmd.NullBranchID, startRev,
endRev, kbfsmd.Merged, nil)
if err != nil {
return nil, kbfsmd.RevisionUninitialized, false, err
}
numNew := len(rmds)
for _, rmd := range rmds {
if !fbm.isOldEnough(rmd) {
fbm.log.CDebugf(ctx, "Revision %d is too recent; stopping QR",
rmd.Revision())
complete = true
break outer
}
lastRev = rmd.Revision()
// A garbage-collection op *must* contain all pointers in
// its respective op. If this device can't handle it,
// error the process and let another device take care of
// it.
newPtrs, iter := fbm.getUnrefPointersFromMD(
rmd.ReadOnlyRootMetadata, false, &unrefIterator{0})
if iter != nil {
return nil, kbfsmd.RevisionUninitialized, false, errors.New(
fmt.Sprintf(
"Can't handle the unref'd pointers of revision %d",
lastRev))
}
ptrs = append(ptrs, newPtrs...)
// TODO: when can we clean up the MD's unembedded block
// changes pointer? It's not safe until we know for sure
// that all existing clients have received the latest
// update (and also that there are no outstanding staged
// branches). Let's do that as part of the bigger issue
// KBFS-793 -- for now we have to leak those blocks.
if len(ptrs) > fbm.numPointersPerGCThreshold {
fbm.log.CDebugf(ctx, "Shortening GC range to [%d:%d]",
earliestRev, rmd.Revision())
break outer
}
}
if numNew > 0 {
startRev = rmds[len(rmds)-1].Revision() + 1
}
if numNew < maxMDsAtATime || startRev > mostRecentRev {
complete = true
break
}
}
return ptrs, lastRev, complete, nil
} | [
"func",
"(",
"fbm",
"*",
"folderBlockManager",
")",
"getUnreferencedBlocks",
"(",
"ctx",
"context",
".",
"Context",
",",
"earliestRev",
",",
"mostRecentRev",
"kbfsmd",
".",
"Revision",
")",
"(",
"ptrs",
"[",
"]",
"data",
".",
"BlockPointer",
",",
"lastRev",
"kbfsmd",
".",
"Revision",
",",
"complete",
"bool",
",",
"err",
"error",
")",
"{",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"earliestRev",
",",
"mostRecentRev",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"if",
"err",
"==",
"nil",
"{",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"len",
"(",
"ptrs",
")",
",",
"earliestRev",
",",
"lastRev",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"// Walk forward, starting from just after earliestRev, until we",
"// get enough pointers or until we reach the head or a revision",
"// that's not old enough, gathering pointers to GC.",
"startRev",
":=",
"earliestRev",
"+",
"1",
"\n",
"outer",
":",
"for",
"{",
"endRev",
":=",
"startRev",
"+",
"maxMDsAtATime",
"\n",
"if",
"endRev",
">",
"mostRecentRev",
"{",
"endRev",
"=",
"mostRecentRev",
"\n",
"}",
"\n\n",
"rmds",
",",
"err",
":=",
"getMDRange",
"(",
"ctx",
",",
"fbm",
".",
"config",
",",
"fbm",
".",
"id",
",",
"kbfsmd",
".",
"NullBranchID",
",",
"startRev",
",",
"endRev",
",",
"kbfsmd",
".",
"Merged",
",",
"nil",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"false",
",",
"err",
"\n",
"}",
"\n\n",
"numNew",
":=",
"len",
"(",
"rmds",
")",
"\n",
"for",
"_",
",",
"rmd",
":=",
"range",
"rmds",
"{",
"if",
"!",
"fbm",
".",
"isOldEnough",
"(",
"rmd",
")",
"{",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"rmd",
".",
"Revision",
"(",
")",
")",
"\n",
"complete",
"=",
"true",
"\n",
"break",
"outer",
"\n",
"}",
"\n",
"lastRev",
"=",
"rmd",
".",
"Revision",
"(",
")",
"\n",
"// A garbage-collection op *must* contain all pointers in",
"// its respective op. If this device can't handle it,",
"// error the process and let another device take care of",
"// it.",
"newPtrs",
",",
"iter",
":=",
"fbm",
".",
"getUnrefPointersFromMD",
"(",
"rmd",
".",
"ReadOnlyRootMetadata",
",",
"false",
",",
"&",
"unrefIterator",
"{",
"0",
"}",
")",
"\n",
"if",
"iter",
"!=",
"nil",
"{",
"return",
"nil",
",",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"false",
",",
"errors",
".",
"New",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"lastRev",
")",
")",
"\n",
"}",
"\n",
"ptrs",
"=",
"append",
"(",
"ptrs",
",",
"newPtrs",
"...",
")",
"\n",
"// TODO: when can we clean up the MD's unembedded block",
"// changes pointer? It's not safe until we know for sure",
"// that all existing clients have received the latest",
"// update (and also that there are no outstanding staged",
"// branches). Let's do that as part of the bigger issue",
"// KBFS-793 -- for now we have to leak those blocks.",
"if",
"len",
"(",
"ptrs",
")",
">",
"fbm",
".",
"numPointersPerGCThreshold",
"{",
"fbm",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"earliestRev",
",",
"rmd",
".",
"Revision",
"(",
")",
")",
"\n",
"break",
"outer",
"\n",
"}",
"\n",
"}",
"\n\n",
"if",
"numNew",
">",
"0",
"{",
"startRev",
"=",
"rmds",
"[",
"len",
"(",
"rmds",
")",
"-",
"1",
"]",
".",
"Revision",
"(",
")",
"+",
"1",
"\n",
"}",
"\n\n",
"if",
"numNew",
"<",
"maxMDsAtATime",
"||",
"startRev",
">",
"mostRecentRev",
"{",
"complete",
"=",
"true",
"\n",
"break",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"ptrs",
",",
"lastRev",
",",
"complete",
",",
"nil",
"\n",
"}"
] | // getUnrefBlocks returns a slice containing all the block pointers
// that were unreferenced after the earliestRev, up to and including
// those in latestRev. If the number of pointers is too large, it
// will shorten the range of the revisions being reclaimed, and return
// the latest revision represented in the returned slice of pointers. | [
"getUnrefBlocks",
"returns",
"a",
"slice",
"containing",
"all",
"the",
"block",
"pointers",
"that",
"were",
"unreferenced",
"after",
"the",
"earliestRev",
"up",
"to",
"and",
"including",
"those",
"in",
"latestRev",
".",
"If",
"the",
"number",
"of",
"pointers",
"is",
"too",
"large",
"it",
"will",
"shorten",
"the",
"range",
"of",
"the",
"revisions",
"being",
"reclaimed",
"and",
"return",
"the",
"latest",
"revision",
"represented",
"in",
"the",
"returned",
"slice",
"of",
"pointers",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/folder_block_manager.go#L938-L1015 |
160,512 | keybase/client | go/kbfs/libkey/key_ops.go | GetTLFCryptKeyServerHalf | func (k *KeyOpsStandard) GetTLFCryptKeyServerHalf(
ctx context.Context, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID,
key kbfscrypto.CryptPublicKey) (kbfscrypto.TLFCryptKeyServerHalf, error) {
// get the key half from the server
serverHalf, err := k.config.KeyServer().GetTLFCryptKeyServerHalf(
ctx, serverHalfID, key)
if err != nil {
return kbfscrypto.TLFCryptKeyServerHalf{}, err
}
// get current uid and deviceKID
session, err := k.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return kbfscrypto.TLFCryptKeyServerHalf{}, err
}
// verify we got the expected key
err = kbfscrypto.VerifyTLFCryptKeyServerHalfID(
serverHalfID, session.UID, key, serverHalf)
if err != nil {
return kbfscrypto.TLFCryptKeyServerHalf{}, err
}
return serverHalf, nil
} | go | func (k *KeyOpsStandard) GetTLFCryptKeyServerHalf(
ctx context.Context, serverHalfID kbfscrypto.TLFCryptKeyServerHalfID,
key kbfscrypto.CryptPublicKey) (kbfscrypto.TLFCryptKeyServerHalf, error) {
// get the key half from the server
serverHalf, err := k.config.KeyServer().GetTLFCryptKeyServerHalf(
ctx, serverHalfID, key)
if err != nil {
return kbfscrypto.TLFCryptKeyServerHalf{}, err
}
// get current uid and deviceKID
session, err := k.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return kbfscrypto.TLFCryptKeyServerHalf{}, err
}
// verify we got the expected key
err = kbfscrypto.VerifyTLFCryptKeyServerHalfID(
serverHalfID, session.UID, key, serverHalf)
if err != nil {
return kbfscrypto.TLFCryptKeyServerHalf{}, err
}
return serverHalf, nil
} | [
"func",
"(",
"k",
"*",
"KeyOpsStandard",
")",
"GetTLFCryptKeyServerHalf",
"(",
"ctx",
"context",
".",
"Context",
",",
"serverHalfID",
"kbfscrypto",
".",
"TLFCryptKeyServerHalfID",
",",
"key",
"kbfscrypto",
".",
"CryptPublicKey",
")",
"(",
"kbfscrypto",
".",
"TLFCryptKeyServerHalf",
",",
"error",
")",
"{",
"// get the key half from the server",
"serverHalf",
",",
"err",
":=",
"k",
".",
"config",
".",
"KeyServer",
"(",
")",
".",
"GetTLFCryptKeyServerHalf",
"(",
"ctx",
",",
"serverHalfID",
",",
"key",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"kbfscrypto",
".",
"TLFCryptKeyServerHalf",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"// get current uid and deviceKID",
"session",
",",
"err",
":=",
"k",
".",
"config",
".",
"KBPKI",
"(",
")",
".",
"GetCurrentSession",
"(",
"ctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"kbfscrypto",
".",
"TLFCryptKeyServerHalf",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"// verify we got the expected key",
"err",
"=",
"kbfscrypto",
".",
"VerifyTLFCryptKeyServerHalfID",
"(",
"serverHalfID",
",",
"session",
".",
"UID",
",",
"key",
",",
"serverHalf",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"kbfscrypto",
".",
"TLFCryptKeyServerHalf",
"{",
"}",
",",
"err",
"\n",
"}",
"\n",
"return",
"serverHalf",
",",
"nil",
"\n",
"}"
] | // GetTLFCryptKeyServerHalf is an implementation of the KeyOps interface. | [
"GetTLFCryptKeyServerHalf",
"is",
"an",
"implementation",
"of",
"the",
"KeyOps",
"interface",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkey/key_ops.go#L38-L60 |
160,513 | keybase/client | go/kbfs/libkey/key_ops.go | PutTLFCryptKeyServerHalves | func (k *KeyOpsStandard) PutTLFCryptKeyServerHalves(
ctx context.Context,
keyServerHalves kbfsmd.UserDeviceKeyServerHalves) error {
// upload the keys
return k.config.KeyServer().PutTLFCryptKeyServerHalves(ctx, keyServerHalves)
} | go | func (k *KeyOpsStandard) PutTLFCryptKeyServerHalves(
ctx context.Context,
keyServerHalves kbfsmd.UserDeviceKeyServerHalves) error {
// upload the keys
return k.config.KeyServer().PutTLFCryptKeyServerHalves(ctx, keyServerHalves)
} | [
"func",
"(",
"k",
"*",
"KeyOpsStandard",
")",
"PutTLFCryptKeyServerHalves",
"(",
"ctx",
"context",
".",
"Context",
",",
"keyServerHalves",
"kbfsmd",
".",
"UserDeviceKeyServerHalves",
")",
"error",
"{",
"// upload the keys",
"return",
"k",
".",
"config",
".",
"KeyServer",
"(",
")",
".",
"PutTLFCryptKeyServerHalves",
"(",
"ctx",
",",
"keyServerHalves",
")",
"\n",
"}"
] | // PutTLFCryptKeyServerHalves is an implementation of the KeyOps interface. | [
"PutTLFCryptKeyServerHalves",
"is",
"an",
"implementation",
"of",
"the",
"KeyOps",
"interface",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkey/key_ops.go#L63-L68 |
160,514 | keybase/client | go/kbfs/libkey/key_ops.go | DeleteTLFCryptKeyServerHalf | func (k *KeyOpsStandard) DeleteTLFCryptKeyServerHalf(
ctx context.Context, uid keybase1.UID, key kbfscrypto.CryptPublicKey,
serverHalfID kbfscrypto.TLFCryptKeyServerHalfID) error {
return k.config.KeyServer().DeleteTLFCryptKeyServerHalf(
ctx, uid, key, serverHalfID)
} | go | func (k *KeyOpsStandard) DeleteTLFCryptKeyServerHalf(
ctx context.Context, uid keybase1.UID, key kbfscrypto.CryptPublicKey,
serverHalfID kbfscrypto.TLFCryptKeyServerHalfID) error {
return k.config.KeyServer().DeleteTLFCryptKeyServerHalf(
ctx, uid, key, serverHalfID)
} | [
"func",
"(",
"k",
"*",
"KeyOpsStandard",
")",
"DeleteTLFCryptKeyServerHalf",
"(",
"ctx",
"context",
".",
"Context",
",",
"uid",
"keybase1",
".",
"UID",
",",
"key",
"kbfscrypto",
".",
"CryptPublicKey",
",",
"serverHalfID",
"kbfscrypto",
".",
"TLFCryptKeyServerHalfID",
")",
"error",
"{",
"return",
"k",
".",
"config",
".",
"KeyServer",
"(",
")",
".",
"DeleteTLFCryptKeyServerHalf",
"(",
"ctx",
",",
"uid",
",",
"key",
",",
"serverHalfID",
")",
"\n",
"}"
] | // DeleteTLFCryptKeyServerHalf is an implementation of the KeyOps interface. | [
"DeleteTLFCryptKeyServerHalf",
"is",
"an",
"implementation",
"of",
"the",
"KeyOps",
"interface",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkey/key_ops.go#L71-L76 |
160,515 | keybase/client | go/libkb/full_self_cacher.go | WithSelfForcePoll | func (m *CachedFullSelf) WithSelfForcePoll(ctx context.Context, f func(u *User) error) error {
arg := NewLoadUserArg(m.G()).WithPublicKeyOptional().WithSelf(true).WithNetContext(ctx).WithForcePoll(true)
return m.WithUser(arg, f)
} | go | func (m *CachedFullSelf) WithSelfForcePoll(ctx context.Context, f func(u *User) error) error {
arg := NewLoadUserArg(m.G()).WithPublicKeyOptional().WithSelf(true).WithNetContext(ctx).WithForcePoll(true)
return m.WithUser(arg, f)
} | [
"func",
"(",
"m",
"*",
"CachedFullSelf",
")",
"WithSelfForcePoll",
"(",
"ctx",
"context",
".",
"Context",
",",
"f",
"func",
"(",
"u",
"*",
"User",
")",
"error",
")",
"error",
"{",
"arg",
":=",
"NewLoadUserArg",
"(",
"m",
".",
"G",
"(",
")",
")",
".",
"WithPublicKeyOptional",
"(",
")",
".",
"WithSelf",
"(",
"true",
")",
".",
"WithNetContext",
"(",
"ctx",
")",
".",
"WithForcePoll",
"(",
"true",
")",
"\n",
"return",
"m",
".",
"WithUser",
"(",
"arg",
",",
"f",
")",
"\n",
"}"
] | // WithSelfForcePoll is like WithSelf but forces a poll. I.e., it will always go to the server for
// a merkle check, regardless of when the existing self was cached. | [
"WithSelfForcePoll",
"is",
"like",
"WithSelf",
"but",
"forces",
"a",
"poll",
".",
"I",
".",
"e",
".",
"it",
"will",
"always",
"go",
"to",
"the",
"server",
"for",
"a",
"merkle",
"check",
"regardless",
"of",
"when",
"the",
"existing",
"self",
"was",
"cached",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/full_self_cacher.go#L103-L106 |
160,516 | keybase/client | go/libkb/full_self_cacher.go | WithUser | func (m *CachedFullSelf) WithUser(arg LoadUserArg, f func(u *User) error) (err error) {
ctx := arg.GetNetContext()
ctx = WithLogTag(ctx, "SELF")
arg = arg.WithNetContext(ctx)
m.G().Log.CDebugf(ctx, "+ CachedFullSelf#WithUser(%+v)", arg)
m.Lock()
defer func() {
m.G().Log.CDebugf(ctx, "- CachedFullSelf#WithUser")
m.Unlock()
}()
var u *User
if m.me != nil && m.isSelfLoad(arg) {
m.maybeClearCache(ctx, &arg)
}
if m.me == nil || !m.isSelfLoad(arg) {
if m.TestDeadlocker != nil {
m.TestDeadlocker()
}
u, err = LoadUser(arg)
if err != nil {
return err
}
// WARNING! You can't call m.G().GetMyUID() if this function is called from
// within the Account/LoginState inner loop. Because m.G().GetMyUID() calls
// back into Account, it will deadlock.
if arg.self || u.GetUID().Equal(m.G().GetMyUID()) {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: cache populate")
m.cacheMe(u)
if ldr := m.G().GetUPAKLoader(); ldr != nil {
if err := ldr.PutUserToCache(ctx, u); err != nil {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: continuing past error putting user to cache: %s", err)
}
}
} else {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: other user")
}
} else {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: cache hit")
u = m.me
}
return f(u)
} | go | func (m *CachedFullSelf) WithUser(arg LoadUserArg, f func(u *User) error) (err error) {
ctx := arg.GetNetContext()
ctx = WithLogTag(ctx, "SELF")
arg = arg.WithNetContext(ctx)
m.G().Log.CDebugf(ctx, "+ CachedFullSelf#WithUser(%+v)", arg)
m.Lock()
defer func() {
m.G().Log.CDebugf(ctx, "- CachedFullSelf#WithUser")
m.Unlock()
}()
var u *User
if m.me != nil && m.isSelfLoad(arg) {
m.maybeClearCache(ctx, &arg)
}
if m.me == nil || !m.isSelfLoad(arg) {
if m.TestDeadlocker != nil {
m.TestDeadlocker()
}
u, err = LoadUser(arg)
if err != nil {
return err
}
// WARNING! You can't call m.G().GetMyUID() if this function is called from
// within the Account/LoginState inner loop. Because m.G().GetMyUID() calls
// back into Account, it will deadlock.
if arg.self || u.GetUID().Equal(m.G().GetMyUID()) {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: cache populate")
m.cacheMe(u)
if ldr := m.G().GetUPAKLoader(); ldr != nil {
if err := ldr.PutUserToCache(ctx, u); err != nil {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: continuing past error putting user to cache: %s", err)
}
}
} else {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: other user")
}
} else {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: cache hit")
u = m.me
}
return f(u)
} | [
"func",
"(",
"m",
"*",
"CachedFullSelf",
")",
"WithUser",
"(",
"arg",
"LoadUserArg",
",",
"f",
"func",
"(",
"u",
"*",
"User",
")",
"error",
")",
"(",
"err",
"error",
")",
"{",
"ctx",
":=",
"arg",
".",
"GetNetContext",
"(",
")",
"\n",
"ctx",
"=",
"WithLogTag",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"arg",
"=",
"arg",
".",
"WithNetContext",
"(",
"ctx",
")",
"\n\n",
"m",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"arg",
")",
"\n",
"m",
".",
"Lock",
"(",
")",
"\n\n",
"defer",
"func",
"(",
")",
"{",
"m",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"m",
".",
"Unlock",
"(",
")",
"\n",
"}",
"(",
")",
"\n\n",
"var",
"u",
"*",
"User",
"\n\n",
"if",
"m",
".",
"me",
"!=",
"nil",
"&&",
"m",
".",
"isSelfLoad",
"(",
"arg",
")",
"{",
"m",
".",
"maybeClearCache",
"(",
"ctx",
",",
"&",
"arg",
")",
"\n",
"}",
"\n\n",
"if",
"m",
".",
"me",
"==",
"nil",
"||",
"!",
"m",
".",
"isSelfLoad",
"(",
"arg",
")",
"{",
"if",
"m",
".",
"TestDeadlocker",
"!=",
"nil",
"{",
"m",
".",
"TestDeadlocker",
"(",
")",
"\n",
"}",
"\n\n",
"u",
",",
"err",
"=",
"LoadUser",
"(",
"arg",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"// WARNING! You can't call m.G().GetMyUID() if this function is called from",
"// within the Account/LoginState inner loop. Because m.G().GetMyUID() calls",
"// back into Account, it will deadlock.",
"if",
"arg",
".",
"self",
"||",
"u",
".",
"GetUID",
"(",
")",
".",
"Equal",
"(",
"m",
".",
"G",
"(",
")",
".",
"GetMyUID",
"(",
")",
")",
"{",
"m",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"m",
".",
"cacheMe",
"(",
"u",
")",
"\n",
"if",
"ldr",
":=",
"m",
".",
"G",
"(",
")",
".",
"GetUPAKLoader",
"(",
")",
";",
"ldr",
"!=",
"nil",
"{",
"if",
"err",
":=",
"ldr",
".",
"PutUserToCache",
"(",
"ctx",
",",
"u",
")",
";",
"err",
"!=",
"nil",
"{",
"m",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"else",
"{",
"m",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"m",
".",
"G",
"(",
")",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"u",
"=",
"m",
".",
"me",
"\n",
"}",
"\n",
"return",
"f",
"(",
"u",
")",
"\n",
"}"
] | // WithUser loads any old user. If it happens to be the self user, then it behaves
// as in WithSelf. Otherwise, it will just load the user, and throw it out when done.
// WithUser supports other so that code doesn't need to change if we're doing the
// operation for the user or someone else. | [
"WithUser",
"loads",
"any",
"old",
"user",
".",
"If",
"it",
"happens",
"to",
"be",
"the",
"self",
"user",
"then",
"it",
"behaves",
"as",
"in",
"WithSelf",
".",
"Otherwise",
"it",
"will",
"just",
"load",
"the",
"user",
"and",
"throw",
"it",
"out",
"when",
"done",
".",
"WithUser",
"supports",
"other",
"so",
"that",
"code",
"doesn",
"t",
"need",
"to",
"change",
"if",
"we",
"re",
"doing",
"the",
"operation",
"for",
"the",
"user",
"or",
"someone",
"else",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/full_self_cacher.go#L153-L202 |
160,517 | keybase/client | go/libkb/full_self_cacher.go | HandleUserChanged | func (m *CachedFullSelf) HandleUserChanged(u keybase1.UID) error {
m.Lock()
defer m.Unlock()
if m.me != nil && m.me.GetUID().Equal(u) {
m.G().Log.Debug("| CachedFullSelf#HandleUserChanged: Invalidating me for UID=%s", u)
m.me = nil
} else {
m.G().Log.Debug("| CachedFullSelf#HandleUserChanged: Ignoring cache bust for UID=%s", u)
}
return nil
} | go | func (m *CachedFullSelf) HandleUserChanged(u keybase1.UID) error {
m.Lock()
defer m.Unlock()
if m.me != nil && m.me.GetUID().Equal(u) {
m.G().Log.Debug("| CachedFullSelf#HandleUserChanged: Invalidating me for UID=%s", u)
m.me = nil
} else {
m.G().Log.Debug("| CachedFullSelf#HandleUserChanged: Ignoring cache bust for UID=%s", u)
}
return nil
} | [
"func",
"(",
"m",
"*",
"CachedFullSelf",
")",
"HandleUserChanged",
"(",
"u",
"keybase1",
".",
"UID",
")",
"error",
"{",
"m",
".",
"Lock",
"(",
")",
"\n",
"defer",
"m",
".",
"Unlock",
"(",
")",
"\n",
"if",
"m",
".",
"me",
"!=",
"nil",
"&&",
"m",
".",
"me",
".",
"GetUID",
"(",
")",
".",
"Equal",
"(",
"u",
")",
"{",
"m",
".",
"G",
"(",
")",
".",
"Log",
".",
"Debug",
"(",
"\"",
"\"",
",",
"u",
")",
"\n",
"m",
".",
"me",
"=",
"nil",
"\n",
"}",
"else",
"{",
"m",
".",
"G",
"(",
")",
".",
"Log",
".",
"Debug",
"(",
"\"",
"\"",
",",
"u",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // HandleUserChanged clears the cached self user if it's the UID of the self user. | [
"HandleUserChanged",
"clears",
"the",
"cached",
"self",
"user",
"if",
"it",
"s",
"the",
"UID",
"of",
"the",
"self",
"user",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/full_self_cacher.go#L251-L261 |
160,518 | keybase/client | go/libkb/full_self_cacher.go | OnLogin | func (m *CachedFullSelf) OnLogin(mctx MetaContext) error {
m.Lock()
defer m.Unlock()
if m.me != nil && !m.me.GetUID().Equal(m.G().GetMyUID()) {
m.me = nil
}
return nil
} | go | func (m *CachedFullSelf) OnLogin(mctx MetaContext) error {
m.Lock()
defer m.Unlock()
if m.me != nil && !m.me.GetUID().Equal(m.G().GetMyUID()) {
m.me = nil
}
return nil
} | [
"func",
"(",
"m",
"*",
"CachedFullSelf",
")",
"OnLogin",
"(",
"mctx",
"MetaContext",
")",
"error",
"{",
"m",
".",
"Lock",
"(",
")",
"\n",
"defer",
"m",
".",
"Unlock",
"(",
")",
"\n",
"if",
"m",
".",
"me",
"!=",
"nil",
"&&",
"!",
"m",
".",
"me",
".",
"GetUID",
"(",
")",
".",
"Equal",
"(",
"m",
".",
"G",
"(",
")",
".",
"GetMyUID",
"(",
")",
")",
"{",
"m",
".",
"me",
"=",
"nil",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // OnLogin clears the cached self user if it differs from what's already cached. | [
"OnLogin",
"clears",
"the",
"cached",
"self",
"user",
"if",
"it",
"differs",
"from",
"what",
"s",
"already",
"cached",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/full_self_cacher.go#L264-L271 |
160,519 | keybase/client | go/kbfs/libfs/fs_notifications.go | QueueNotification | func (f *FSNotifications) QueueNotification(fn func()) {
f.notificationMutex.RLock()
defer f.notificationMutex.RUnlock()
if f.notifications == nil {
f.log.Warning("Ignoring notification, no available channel")
return
}
f.notificationGroup.Add(1)
f.notifications.In() <- fn
} | go | func (f *FSNotifications) QueueNotification(fn func()) {
f.notificationMutex.RLock()
defer f.notificationMutex.RUnlock()
if f.notifications == nil {
f.log.Warning("Ignoring notification, no available channel")
return
}
f.notificationGroup.Add(1)
f.notifications.In() <- fn
} | [
"func",
"(",
"f",
"*",
"FSNotifications",
")",
"QueueNotification",
"(",
"fn",
"func",
"(",
")",
")",
"{",
"f",
".",
"notificationMutex",
".",
"RLock",
"(",
")",
"\n",
"defer",
"f",
".",
"notificationMutex",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"f",
".",
"notifications",
"==",
"nil",
"{",
"f",
".",
"log",
".",
"Warning",
"(",
"\"",
"\"",
")",
"\n",
"return",
"\n",
"}",
"\n",
"f",
".",
"notificationGroup",
".",
"Add",
"(",
"1",
")",
"\n",
"f",
".",
"notifications",
".",
"In",
"(",
")",
"<-",
"fn",
"\n",
"}"
] | // QueueNotification queues a notification, which must be
// goroutine-safe. | [
"QueueNotification",
"queues",
"a",
"notification",
"which",
"must",
"be",
"goroutine",
"-",
"safe",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/fs_notifications.go#L73-L82 |
160,520 | keybase/client | go/kbfs/libfs/fs_notifications.go | LaunchProcessor | func (f *FSNotifications) LaunchProcessor(ctx context.Context) {
f.notificationMutex.Lock()
defer f.notificationMutex.Unlock()
f.log.CDebugf(ctx, "Launching notifications channel")
// The notifications channel needs to have "infinite" capacity,
// because otherwise we risk a deadlock between libkbfs and
// libfs. The notification processor sends invalidates to the
// kernel. In osxfuse 3.X, the kernel can call back into userland
// during an invalidate (a GetAttr()) call, which in turn takes
// locks within libkbfs. So if libkbfs ever gets blocked while
// trying to enqueue a notification (while it is holding locks),
// we could have a deadlock. Yes, if there are too many
// outstanding notifications we'll run out of memory and crash,
// but otherwise we risk deadlock. Which is worse?
f.notifications = channels.NewInfiniteChannel()
// start the notification processor
go f.processNotifications(ctx)
} | go | func (f *FSNotifications) LaunchProcessor(ctx context.Context) {
f.notificationMutex.Lock()
defer f.notificationMutex.Unlock()
f.log.CDebugf(ctx, "Launching notifications channel")
// The notifications channel needs to have "infinite" capacity,
// because otherwise we risk a deadlock between libkbfs and
// libfs. The notification processor sends invalidates to the
// kernel. In osxfuse 3.X, the kernel can call back into userland
// during an invalidate (a GetAttr()) call, which in turn takes
// locks within libkbfs. So if libkbfs ever gets blocked while
// trying to enqueue a notification (while it is holding locks),
// we could have a deadlock. Yes, if there are too many
// outstanding notifications we'll run out of memory and crash,
// but otherwise we risk deadlock. Which is worse?
f.notifications = channels.NewInfiniteChannel()
// start the notification processor
go f.processNotifications(ctx)
} | [
"func",
"(",
"f",
"*",
"FSNotifications",
")",
"LaunchProcessor",
"(",
"ctx",
"context",
".",
"Context",
")",
"{",
"f",
".",
"notificationMutex",
".",
"Lock",
"(",
")",
"\n",
"defer",
"f",
".",
"notificationMutex",
".",
"Unlock",
"(",
")",
"\n\n",
"f",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"// The notifications channel needs to have \"infinite\" capacity,",
"// because otherwise we risk a deadlock between libkbfs and",
"// libfs. The notification processor sends invalidates to the",
"// kernel. In osxfuse 3.X, the kernel can call back into userland",
"// during an invalidate (a GetAttr()) call, which in turn takes",
"// locks within libkbfs. So if libkbfs ever gets blocked while",
"// trying to enqueue a notification (while it is holding locks),",
"// we could have a deadlock. Yes, if there are too many",
"// outstanding notifications we'll run out of memory and crash,",
"// but otherwise we risk deadlock. Which is worse?",
"f",
".",
"notifications",
"=",
"channels",
".",
"NewInfiniteChannel",
"(",
")",
"\n\n",
"// start the notification processor",
"go",
"f",
".",
"processNotifications",
"(",
"ctx",
")",
"\n",
"}"
] | // LaunchProcessor launches the notification processor. | [
"LaunchProcessor",
"launches",
"the",
"notification",
"processor",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/fs_notifications.go#L85-L104 |
160,521 | keybase/client | go/phonenumbers/user.go | VerifyPhoneNumber | func VerifyPhoneNumber(mctx libkb.MetaContext, phoneNumber keybase1.PhoneNumber, code string) error {
payload := make(libkb.JSONPayload)
payload["phone_number"] = phoneNumber
payload["verification_code"] = code
arg := libkb.APIArg{
Endpoint: "user/phone_number_verify",
JSONPayload: payload,
SessionType: libkb.APISessionTypeREQUIRED,
}
_, err := mctx.G().API.PostJSON(mctx, arg)
return err
} | go | func VerifyPhoneNumber(mctx libkb.MetaContext, phoneNumber keybase1.PhoneNumber, code string) error {
payload := make(libkb.JSONPayload)
payload["phone_number"] = phoneNumber
payload["verification_code"] = code
arg := libkb.APIArg{
Endpoint: "user/phone_number_verify",
JSONPayload: payload,
SessionType: libkb.APISessionTypeREQUIRED,
}
_, err := mctx.G().API.PostJSON(mctx, arg)
return err
} | [
"func",
"VerifyPhoneNumber",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"phoneNumber",
"keybase1",
".",
"PhoneNumber",
",",
"code",
"string",
")",
"error",
"{",
"payload",
":=",
"make",
"(",
"libkb",
".",
"JSONPayload",
")",
"\n",
"payload",
"[",
"\"",
"\"",
"]",
"=",
"phoneNumber",
"\n",
"payload",
"[",
"\"",
"\"",
"]",
"=",
"code",
"\n\n",
"arg",
":=",
"libkb",
".",
"APIArg",
"{",
"Endpoint",
":",
"\"",
"\"",
",",
"JSONPayload",
":",
"payload",
",",
"SessionType",
":",
"libkb",
".",
"APISessionTypeREQUIRED",
",",
"}",
"\n\n",
"_",
",",
"err",
":=",
"mctx",
".",
"G",
"(",
")",
".",
"API",
".",
"PostJSON",
"(",
"mctx",
",",
"arg",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // VerifyPhoneNumber calls API to verify previously added phone number using
// verification code. | [
"VerifyPhoneNumber",
"calls",
"API",
"to",
"verify",
"previously",
"added",
"phone",
"number",
"using",
"verification",
"code",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/phonenumbers/user.go#L29-L42 |
160,522 | keybase/client | go/phonenumbers/user.go | GetPhoneNumbers | func GetPhoneNumbers(mctx libkb.MetaContext) ([]keybase1.UserPhoneNumber, error) {
arg := libkb.APIArg{
Endpoint: "user/phone_numbers",
SessionType: libkb.APISessionTypeREQUIRED,
}
var resp phoneNumbersResponse
err := mctx.G().API.GetDecode(mctx, arg, &resp)
if err != nil {
return nil, err
}
return resp.PhoneNumbers, nil
} | go | func GetPhoneNumbers(mctx libkb.MetaContext) ([]keybase1.UserPhoneNumber, error) {
arg := libkb.APIArg{
Endpoint: "user/phone_numbers",
SessionType: libkb.APISessionTypeREQUIRED,
}
var resp phoneNumbersResponse
err := mctx.G().API.GetDecode(mctx, arg, &resp)
if err != nil {
return nil, err
}
return resp.PhoneNumbers, nil
} | [
"func",
"GetPhoneNumbers",
"(",
"mctx",
"libkb",
".",
"MetaContext",
")",
"(",
"[",
"]",
"keybase1",
".",
"UserPhoneNumber",
",",
"error",
")",
"{",
"arg",
":=",
"libkb",
".",
"APIArg",
"{",
"Endpoint",
":",
"\"",
"\"",
",",
"SessionType",
":",
"libkb",
".",
"APISessionTypeREQUIRED",
",",
"}",
"\n",
"var",
"resp",
"phoneNumbersResponse",
"\n",
"err",
":=",
"mctx",
".",
"G",
"(",
")",
".",
"API",
".",
"GetDecode",
"(",
"mctx",
",",
"arg",
",",
"&",
"resp",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"resp",
".",
"PhoneNumbers",
",",
"nil",
"\n",
"}"
] | // GetPhoneNumbers calls API to fetch list of phone numbers attached to
// currently logged user. | [
"GetPhoneNumbers",
"calls",
"API",
"to",
"fetch",
"list",
"of",
"phone",
"numbers",
"attached",
"to",
"currently",
"logged",
"user",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/phonenumbers/user.go#L51-L62 |
160,523 | keybase/client | go/protocol/keybase1/paperprovision.go | PaperProvision | func (c PaperprovisionClient) PaperProvision(ctx context.Context, __arg PaperProvisionArg) (err error) {
err = c.Cli.Call(ctx, "keybase.1.paperprovision.paperProvision", []interface{}{__arg}, nil)
return
} | go | func (c PaperprovisionClient) PaperProvision(ctx context.Context, __arg PaperProvisionArg) (err error) {
err = c.Cli.Call(ctx, "keybase.1.paperprovision.paperProvision", []interface{}{__arg}, nil)
return
} | [
"func",
"(",
"c",
"PaperprovisionClient",
")",
"PaperProvision",
"(",
"ctx",
"context",
".",
"Context",
",",
"__arg",
"PaperProvisionArg",
")",
"(",
"err",
"error",
")",
"{",
"err",
"=",
"c",
".",
"Cli",
".",
"Call",
"(",
"ctx",
",",
"\"",
"\"",
",",
"[",
"]",
"interface",
"{",
"}",
"{",
"__arg",
"}",
",",
"nil",
")",
"\n",
"return",
"\n",
"}"
] | // Performs paper provision.
// If the current device isn't provisioned, this function will
// provision it. | [
"Performs",
"paper",
"provision",
".",
"If",
"the",
"current",
"device",
"isn",
"t",
"provisioned",
"this",
"function",
"will",
"provision",
"it",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/protocol/keybase1/paperprovision.go#L55-L58 |
160,524 | keybase/client | go/install/libnativeinstaller/app.go | AppBundleForPath | func AppBundleForPath() (string, error) {
path, err := utils.BinPath()
if err != nil {
return "", err
}
if path == "" {
return "", errors.New("Could not get executable name")
}
paths := strings.SplitN(path, ".app", 2)
// If no match, return ""
if len(paths) <= 1 {
return "", fmt.Errorf("Unable to resolve bundle for valid path: %s", path)
}
appPath := paths[0] + ".app"
if exists, _ := libkb.FileExists(appPath); !exists {
return "", fmt.Errorf("App not found: %s", appPath)
}
return appPath, nil
} | go | func AppBundleForPath() (string, error) {
path, err := utils.BinPath()
if err != nil {
return "", err
}
if path == "" {
return "", errors.New("Could not get executable name")
}
paths := strings.SplitN(path, ".app", 2)
// If no match, return ""
if len(paths) <= 1 {
return "", fmt.Errorf("Unable to resolve bundle for valid path: %s", path)
}
appPath := paths[0] + ".app"
if exists, _ := libkb.FileExists(appPath); !exists {
return "", fmt.Errorf("App not found: %s", appPath)
}
return appPath, nil
} | [
"func",
"AppBundleForPath",
"(",
")",
"(",
"string",
",",
"error",
")",
"{",
"path",
",",
"err",
":=",
"utils",
".",
"BinPath",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"err",
"\n",
"}",
"\n",
"if",
"path",
"==",
"\"",
"\"",
"{",
"return",
"\"",
"\"",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"paths",
":=",
"strings",
".",
"SplitN",
"(",
"path",
",",
"\"",
"\"",
",",
"2",
")",
"\n",
"// If no match, return \"\"",
"if",
"len",
"(",
"paths",
")",
"<=",
"1",
"{",
"return",
"\"",
"\"",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"path",
")",
"\n",
"}",
"\n\n",
"appPath",
":=",
"paths",
"[",
"0",
"]",
"+",
"\"",
"\"",
"\n",
"if",
"exists",
",",
"_",
":=",
"libkb",
".",
"FileExists",
"(",
"appPath",
")",
";",
"!",
"exists",
"{",
"return",
"\"",
"\"",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"appPath",
")",
"\n",
"}",
"\n\n",
"return",
"appPath",
",",
"nil",
"\n",
"}"
] | // AppBundleForPath returns path to app bundle | [
"AppBundleForPath",
"returns",
"path",
"to",
"app",
"bundle"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/install/libnativeinstaller/app.go#L38-L58 |
160,525 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | NewReporterKBPKI | func NewReporterKBPKI(config Config, maxErrors, bufSize int) *ReporterKBPKI {
log := config.MakeLogger("")
r := &ReporterKBPKI{
ReporterSimple: NewReporterSimple(config.Clock(), maxErrors),
config: config,
log: log,
vlog: config.MakeVLogger(log),
notifyBuffer: make(chan *keybase1.FSNotification, bufSize),
onlineStatusBuffer: make(chan bool, bufSize),
notifyPathBuffer: make(chan string, 1),
notifySyncBuffer: make(chan *keybase1.FSPathSyncStatus, 1),
notifyOverallSyncBuffer: make(chan keybase1.FolderSyncStatus, 1),
shutdownCh: make(chan struct{}),
}
var ctx context.Context
ctx, r.canceler = context.WithCancel(context.Background())
go r.send(ctx)
return r
} | go | func NewReporterKBPKI(config Config, maxErrors, bufSize int) *ReporterKBPKI {
log := config.MakeLogger("")
r := &ReporterKBPKI{
ReporterSimple: NewReporterSimple(config.Clock(), maxErrors),
config: config,
log: log,
vlog: config.MakeVLogger(log),
notifyBuffer: make(chan *keybase1.FSNotification, bufSize),
onlineStatusBuffer: make(chan bool, bufSize),
notifyPathBuffer: make(chan string, 1),
notifySyncBuffer: make(chan *keybase1.FSPathSyncStatus, 1),
notifyOverallSyncBuffer: make(chan keybase1.FolderSyncStatus, 1),
shutdownCh: make(chan struct{}),
}
var ctx context.Context
ctx, r.canceler = context.WithCancel(context.Background())
go r.send(ctx)
return r
} | [
"func",
"NewReporterKBPKI",
"(",
"config",
"Config",
",",
"maxErrors",
",",
"bufSize",
"int",
")",
"*",
"ReporterKBPKI",
"{",
"log",
":=",
"config",
".",
"MakeLogger",
"(",
"\"",
"\"",
")",
"\n",
"r",
":=",
"&",
"ReporterKBPKI",
"{",
"ReporterSimple",
":",
"NewReporterSimple",
"(",
"config",
".",
"Clock",
"(",
")",
",",
"maxErrors",
")",
",",
"config",
":",
"config",
",",
"log",
":",
"log",
",",
"vlog",
":",
"config",
".",
"MakeVLogger",
"(",
"log",
")",
",",
"notifyBuffer",
":",
"make",
"(",
"chan",
"*",
"keybase1",
".",
"FSNotification",
",",
"bufSize",
")",
",",
"onlineStatusBuffer",
":",
"make",
"(",
"chan",
"bool",
",",
"bufSize",
")",
",",
"notifyPathBuffer",
":",
"make",
"(",
"chan",
"string",
",",
"1",
")",
",",
"notifySyncBuffer",
":",
"make",
"(",
"chan",
"*",
"keybase1",
".",
"FSPathSyncStatus",
",",
"1",
")",
",",
"notifyOverallSyncBuffer",
":",
"make",
"(",
"chan",
"keybase1",
".",
"FolderSyncStatus",
",",
"1",
")",
",",
"shutdownCh",
":",
"make",
"(",
"chan",
"struct",
"{",
"}",
")",
",",
"}",
"\n",
"var",
"ctx",
"context",
".",
"Context",
"\n",
"ctx",
",",
"r",
".",
"canceler",
"=",
"context",
".",
"WithCancel",
"(",
"context",
".",
"Background",
"(",
")",
")",
"\n",
"go",
"r",
".",
"send",
"(",
"ctx",
")",
"\n",
"return",
"r",
"\n",
"}"
] | // NewReporterKBPKI creates a new ReporterKBPKI. | [
"NewReporterKBPKI",
"creates",
"a",
"new",
"ReporterKBPKI",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L99-L117 |
160,526 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | NotifyOverallSyncStatus | func (r *ReporterKBPKI) NotifyOverallSyncStatus(
ctx context.Context, status keybase1.FolderSyncStatus) {
select {
case r.notifyOverallSyncBuffer <- status:
default:
// If this represents a "complete" status, we can't drop it.
// Instead launch a goroutine to make sure it gets sent
// eventually.
if status.PrefetchStatus == keybase1.PrefetchStatus_COMPLETE {
go func() {
select {
case r.notifyOverallSyncBuffer <- status:
case <-r.shutdownCh:
}
}()
} else {
r.vlog.CLogf(
ctx, libkb.VLog1,
"ReporterKBPKI: notify overall sync buffer dropping %+v",
status)
}
}
} | go | func (r *ReporterKBPKI) NotifyOverallSyncStatus(
ctx context.Context, status keybase1.FolderSyncStatus) {
select {
case r.notifyOverallSyncBuffer <- status:
default:
// If this represents a "complete" status, we can't drop it.
// Instead launch a goroutine to make sure it gets sent
// eventually.
if status.PrefetchStatus == keybase1.PrefetchStatus_COMPLETE {
go func() {
select {
case r.notifyOverallSyncBuffer <- status:
case <-r.shutdownCh:
}
}()
} else {
r.vlog.CLogf(
ctx, libkb.VLog1,
"ReporterKBPKI: notify overall sync buffer dropping %+v",
status)
}
}
} | [
"func",
"(",
"r",
"*",
"ReporterKBPKI",
")",
"NotifyOverallSyncStatus",
"(",
"ctx",
"context",
".",
"Context",
",",
"status",
"keybase1",
".",
"FolderSyncStatus",
")",
"{",
"select",
"{",
"case",
"r",
".",
"notifyOverallSyncBuffer",
"<-",
"status",
":",
"default",
":",
"// If this represents a \"complete\" status, we can't drop it.",
"// Instead launch a goroutine to make sure it gets sent",
"// eventually.",
"if",
"status",
".",
"PrefetchStatus",
"==",
"keybase1",
".",
"PrefetchStatus_COMPLETE",
"{",
"go",
"func",
"(",
")",
"{",
"select",
"{",
"case",
"r",
".",
"notifyOverallSyncBuffer",
"<-",
"status",
":",
"case",
"<-",
"r",
".",
"shutdownCh",
":",
"}",
"\n",
"}",
"(",
")",
"\n",
"}",
"else",
"{",
"r",
".",
"vlog",
".",
"CLogf",
"(",
"ctx",
",",
"libkb",
".",
"VLog1",
",",
"\"",
"\"",
",",
"status",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // NotifyOverallSyncStatus implements the Reporter interface for ReporterKBPKI. | [
"NotifyOverallSyncStatus",
"implements",
"the",
"Reporter",
"interface",
"for",
"ReporterKBPKI",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L284-L306 |
160,527 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | Shutdown | func (r *ReporterKBPKI) Shutdown() {
r.canceler()
close(r.shutdownCh)
close(r.notifyBuffer)
close(r.onlineStatusBuffer)
close(r.notifySyncBuffer)
close(r.notifyOverallSyncBuffer)
} | go | func (r *ReporterKBPKI) Shutdown() {
r.canceler()
close(r.shutdownCh)
close(r.notifyBuffer)
close(r.onlineStatusBuffer)
close(r.notifySyncBuffer)
close(r.notifyOverallSyncBuffer)
} | [
"func",
"(",
"r",
"*",
"ReporterKBPKI",
")",
"Shutdown",
"(",
")",
"{",
"r",
".",
"canceler",
"(",
")",
"\n",
"close",
"(",
"r",
".",
"shutdownCh",
")",
"\n",
"close",
"(",
"r",
".",
"notifyBuffer",
")",
"\n",
"close",
"(",
"r",
".",
"onlineStatusBuffer",
")",
"\n",
"close",
"(",
"r",
".",
"notifySyncBuffer",
")",
"\n",
"close",
"(",
"r",
".",
"notifyOverallSyncBuffer",
")",
"\n",
"}"
] | // Shutdown implements the Reporter interface for ReporterKBPKI. | [
"Shutdown",
"implements",
"the",
"Reporter",
"interface",
"for",
"ReporterKBPKI",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L309-L316 |
160,528 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | send | func (r *ReporterKBPKI) send(ctx context.Context) {
sendTicker := time.NewTicker(reporterSendInterval)
defer sendTicker.Stop()
for {
select {
case notification, ok := <-r.notifyBuffer:
if !ok {
return
}
nt := notification.NotificationType
st := notification.StatusCode
// Only these notifications are used in frontend:
// https://github.com/keybase/client/blob/0d63795105f64289ba4ef20fbefe56aad91bc7e9/shared/util/kbfs-notifications.js#L142-L154
if nt != keybase1.FSNotificationType_REKEYING &&
nt != keybase1.FSNotificationType_INITIALIZED &&
nt != keybase1.FSNotificationType_CONNECTION &&
st != keybase1.FSStatusCode_ERROR {
continue
}
// Send them right away rather than staging it and waiting for the
// ticker, since each of them can be distinct from each other.
if err := r.config.KeybaseService().Notify(ctx,
notification); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"notification: %s", err)
}
case online, ok := <-r.onlineStatusBuffer:
if !ok {
return
}
if err := r.config.KeybaseService().NotifyOnlineStatusChanged(ctx, online); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"NotifyOnlineStatusChanged: %s", err)
}
case <-sendTicker.C:
select {
case path, ok := <-r.notifyPathBuffer:
if !ok {
return
}
if err := r.config.KeybaseService().NotifyPathUpdated(
ctx, path); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"notification for path: %s", err)
}
default:
}
select {
case status, ok := <-r.notifySyncBuffer:
if !ok {
return
}
if err := r.config.KeybaseService().NotifySyncStatus(ctx,
status); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"sync status: %s", err)
}
default:
}
select {
case status, ok := <-r.notifyOverallSyncBuffer:
if !ok {
return
}
if err := r.config.KeybaseService().NotifyOverallSyncStatus(
ctx, status); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"overall sync status: %s", err)
}
default:
}
case <-ctx.Done():
return
}
}
} | go | func (r *ReporterKBPKI) send(ctx context.Context) {
sendTicker := time.NewTicker(reporterSendInterval)
defer sendTicker.Stop()
for {
select {
case notification, ok := <-r.notifyBuffer:
if !ok {
return
}
nt := notification.NotificationType
st := notification.StatusCode
// Only these notifications are used in frontend:
// https://github.com/keybase/client/blob/0d63795105f64289ba4ef20fbefe56aad91bc7e9/shared/util/kbfs-notifications.js#L142-L154
if nt != keybase1.FSNotificationType_REKEYING &&
nt != keybase1.FSNotificationType_INITIALIZED &&
nt != keybase1.FSNotificationType_CONNECTION &&
st != keybase1.FSStatusCode_ERROR {
continue
}
// Send them right away rather than staging it and waiting for the
// ticker, since each of them can be distinct from each other.
if err := r.config.KeybaseService().Notify(ctx,
notification); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"notification: %s", err)
}
case online, ok := <-r.onlineStatusBuffer:
if !ok {
return
}
if err := r.config.KeybaseService().NotifyOnlineStatusChanged(ctx, online); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"NotifyOnlineStatusChanged: %s", err)
}
case <-sendTicker.C:
select {
case path, ok := <-r.notifyPathBuffer:
if !ok {
return
}
if err := r.config.KeybaseService().NotifyPathUpdated(
ctx, path); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"notification for path: %s", err)
}
default:
}
select {
case status, ok := <-r.notifySyncBuffer:
if !ok {
return
}
if err := r.config.KeybaseService().NotifySyncStatus(ctx,
status); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"sync status: %s", err)
}
default:
}
select {
case status, ok := <-r.notifyOverallSyncBuffer:
if !ok {
return
}
if err := r.config.KeybaseService().NotifyOverallSyncStatus(
ctx, status); err != nil {
r.log.CDebugf(ctx, "ReporterDaemon: error sending "+
"overall sync status: %s", err)
}
default:
}
case <-ctx.Done():
return
}
}
} | [
"func",
"(",
"r",
"*",
"ReporterKBPKI",
")",
"send",
"(",
"ctx",
"context",
".",
"Context",
")",
"{",
"sendTicker",
":=",
"time",
".",
"NewTicker",
"(",
"reporterSendInterval",
")",
"\n",
"defer",
"sendTicker",
".",
"Stop",
"(",
")",
"\n\n",
"for",
"{",
"select",
"{",
"case",
"notification",
",",
"ok",
":=",
"<-",
"r",
".",
"notifyBuffer",
":",
"if",
"!",
"ok",
"{",
"return",
"\n",
"}",
"\n",
"nt",
":=",
"notification",
".",
"NotificationType",
"\n",
"st",
":=",
"notification",
".",
"StatusCode",
"\n",
"// Only these notifications are used in frontend:",
"// https://github.com/keybase/client/blob/0d63795105f64289ba4ef20fbefe56aad91bc7e9/shared/util/kbfs-notifications.js#L142-L154",
"if",
"nt",
"!=",
"keybase1",
".",
"FSNotificationType_REKEYING",
"&&",
"nt",
"!=",
"keybase1",
".",
"FSNotificationType_INITIALIZED",
"&&",
"nt",
"!=",
"keybase1",
".",
"FSNotificationType_CONNECTION",
"&&",
"st",
"!=",
"keybase1",
".",
"FSStatusCode_ERROR",
"{",
"continue",
"\n",
"}",
"\n",
"// Send them right away rather than staging it and waiting for the",
"// ticker, since each of them can be distinct from each other.",
"if",
"err",
":=",
"r",
".",
"config",
".",
"KeybaseService",
"(",
")",
".",
"Notify",
"(",
"ctx",
",",
"notification",
")",
";",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"case",
"online",
",",
"ok",
":=",
"<-",
"r",
".",
"onlineStatusBuffer",
":",
"if",
"!",
"ok",
"{",
"return",
"\n",
"}",
"\n",
"if",
"err",
":=",
"r",
".",
"config",
".",
"KeybaseService",
"(",
")",
".",
"NotifyOnlineStatusChanged",
"(",
"ctx",
",",
"online",
")",
";",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"case",
"<-",
"sendTicker",
".",
"C",
":",
"select",
"{",
"case",
"path",
",",
"ok",
":=",
"<-",
"r",
".",
"notifyPathBuffer",
":",
"if",
"!",
"ok",
"{",
"return",
"\n",
"}",
"\n",
"if",
"err",
":=",
"r",
".",
"config",
".",
"KeybaseService",
"(",
")",
".",
"NotifyPathUpdated",
"(",
"ctx",
",",
"path",
")",
";",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"default",
":",
"}",
"\n\n",
"select",
"{",
"case",
"status",
",",
"ok",
":=",
"<-",
"r",
".",
"notifySyncBuffer",
":",
"if",
"!",
"ok",
"{",
"return",
"\n",
"}",
"\n",
"if",
"err",
":=",
"r",
".",
"config",
".",
"KeybaseService",
"(",
")",
".",
"NotifySyncStatus",
"(",
"ctx",
",",
"status",
")",
";",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"default",
":",
"}",
"\n\n",
"select",
"{",
"case",
"status",
",",
"ok",
":=",
"<-",
"r",
".",
"notifyOverallSyncBuffer",
":",
"if",
"!",
"ok",
"{",
"return",
"\n",
"}",
"\n",
"if",
"err",
":=",
"r",
".",
"config",
".",
"KeybaseService",
"(",
")",
".",
"NotifyOverallSyncStatus",
"(",
"ctx",
",",
"status",
")",
";",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
"+",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"default",
":",
"}",
"\n",
"case",
"<-",
"ctx",
".",
"Done",
"(",
")",
":",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // send takes notifications out of notifyBuffer, notifyPathBuffer, and
// notifySyncBuffer and sends them to the keybase daemon. | [
"send",
"takes",
"notifications",
"out",
"of",
"notifyBuffer",
"notifyPathBuffer",
"and",
"notifySyncBuffer",
"and",
"sends",
"them",
"to",
"the",
"keybase",
"daemon",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L322-L400 |
160,529 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | writeNotification | func writeNotification(file data.Path, finish bool) *keybase1.FSNotification {
n := baseNotification(file, finish)
if file.Tlf.Type() == tlf.Public {
n.NotificationType = keybase1.FSNotificationType_SIGNING
} else {
n.NotificationType = keybase1.FSNotificationType_ENCRYPTING
}
return n
} | go | func writeNotification(file data.Path, finish bool) *keybase1.FSNotification {
n := baseNotification(file, finish)
if file.Tlf.Type() == tlf.Public {
n.NotificationType = keybase1.FSNotificationType_SIGNING
} else {
n.NotificationType = keybase1.FSNotificationType_ENCRYPTING
}
return n
} | [
"func",
"writeNotification",
"(",
"file",
"data",
".",
"Path",
",",
"finish",
"bool",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"n",
":=",
"baseNotification",
"(",
"file",
",",
"finish",
")",
"\n",
"if",
"file",
".",
"Tlf",
".",
"Type",
"(",
")",
"==",
"tlf",
".",
"Public",
"{",
"n",
".",
"NotificationType",
"=",
"keybase1",
".",
"FSNotificationType_SIGNING",
"\n",
"}",
"else",
"{",
"n",
".",
"NotificationType",
"=",
"keybase1",
".",
"FSNotificationType_ENCRYPTING",
"\n",
"}",
"\n",
"return",
"n",
"\n",
"}"
] | // writeNotification creates FSNotifications from paths for file
// write events. | [
"writeNotification",
"creates",
"FSNotifications",
"from",
"paths",
"for",
"file",
"write",
"events",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L404-L412 |
160,530 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | readNotification | func readNotification(file data.Path, finish bool) *keybase1.FSNotification {
n := baseNotification(file, finish)
if file.Tlf.Type() == tlf.Public {
n.NotificationType = keybase1.FSNotificationType_VERIFYING
} else {
n.NotificationType = keybase1.FSNotificationType_DECRYPTING
}
return n
} | go | func readNotification(file data.Path, finish bool) *keybase1.FSNotification {
n := baseNotification(file, finish)
if file.Tlf.Type() == tlf.Public {
n.NotificationType = keybase1.FSNotificationType_VERIFYING
} else {
n.NotificationType = keybase1.FSNotificationType_DECRYPTING
}
return n
} | [
"func",
"readNotification",
"(",
"file",
"data",
".",
"Path",
",",
"finish",
"bool",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"n",
":=",
"baseNotification",
"(",
"file",
",",
"finish",
")",
"\n",
"if",
"file",
".",
"Tlf",
".",
"Type",
"(",
")",
"==",
"tlf",
".",
"Public",
"{",
"n",
".",
"NotificationType",
"=",
"keybase1",
".",
"FSNotificationType_VERIFYING",
"\n",
"}",
"else",
"{",
"n",
".",
"NotificationType",
"=",
"keybase1",
".",
"FSNotificationType_DECRYPTING",
"\n",
"}",
"\n",
"return",
"n",
"\n",
"}"
] | // readNotification creates FSNotifications from paths for file
// read events. | [
"readNotification",
"creates",
"FSNotifications",
"from",
"paths",
"for",
"file",
"read",
"events",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L416-L424 |
160,531 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | rekeyNotification | func rekeyNotification(ctx context.Context, config Config, handle *tlfhandle.Handle, finish bool) *keybase1.FSNotification {
code := keybase1.FSStatusCode_START
if finish {
code = keybase1.FSStatusCode_FINISH
}
return &keybase1.FSNotification{
FolderType: handle.Type().FolderType(),
Filename: string(handle.GetCanonicalPath()),
StatusCode: code,
NotificationType: keybase1.FSNotificationType_REKEYING,
}
} | go | func rekeyNotification(ctx context.Context, config Config, handle *tlfhandle.Handle, finish bool) *keybase1.FSNotification {
code := keybase1.FSStatusCode_START
if finish {
code = keybase1.FSStatusCode_FINISH
}
return &keybase1.FSNotification{
FolderType: handle.Type().FolderType(),
Filename: string(handle.GetCanonicalPath()),
StatusCode: code,
NotificationType: keybase1.FSNotificationType_REKEYING,
}
} | [
"func",
"rekeyNotification",
"(",
"ctx",
"context",
".",
"Context",
",",
"config",
"Config",
",",
"handle",
"*",
"tlfhandle",
".",
"Handle",
",",
"finish",
"bool",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"code",
":=",
"keybase1",
".",
"FSStatusCode_START",
"\n",
"if",
"finish",
"{",
"code",
"=",
"keybase1",
".",
"FSStatusCode_FINISH",
"\n",
"}",
"\n\n",
"return",
"&",
"keybase1",
".",
"FSNotification",
"{",
"FolderType",
":",
"handle",
".",
"Type",
"(",
")",
".",
"FolderType",
"(",
")",
",",
"Filename",
":",
"string",
"(",
"handle",
".",
"GetCanonicalPath",
"(",
")",
")",
",",
"StatusCode",
":",
"code",
",",
"NotificationType",
":",
"keybase1",
".",
"FSNotificationType_REKEYING",
",",
"}",
"\n",
"}"
] | // rekeyNotification creates FSNotifications from TlfHandles for rekey
// events. | [
"rekeyNotification",
"creates",
"FSNotifications",
"from",
"TlfHandles",
"for",
"rekey",
"events",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L428-L440 |
160,532 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | fileCreateNotification | func fileCreateNotification(file data.Path, writer keybase1.UID,
localTime time.Time) *keybase1.FSNotification {
n := baseFileEditNotification(file, writer, localTime)
n.NotificationType = keybase1.FSNotificationType_FILE_CREATED
return n
} | go | func fileCreateNotification(file data.Path, writer keybase1.UID,
localTime time.Time) *keybase1.FSNotification {
n := baseFileEditNotification(file, writer, localTime)
n.NotificationType = keybase1.FSNotificationType_FILE_CREATED
return n
} | [
"func",
"fileCreateNotification",
"(",
"file",
"data",
".",
"Path",
",",
"writer",
"keybase1",
".",
"UID",
",",
"localTime",
"time",
".",
"Time",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"n",
":=",
"baseFileEditNotification",
"(",
"file",
",",
"writer",
",",
"localTime",
")",
"\n",
"n",
".",
"NotificationType",
"=",
"keybase1",
".",
"FSNotificationType_FILE_CREATED",
"\n",
"return",
"n",
"\n",
"}"
] | // fileCreateNotification creates FSNotifications from paths for file
// create events. | [
"fileCreateNotification",
"creates",
"FSNotifications",
"from",
"paths",
"for",
"file",
"create",
"events",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L452-L457 |
160,533 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | fileModifyNotification | func fileModifyNotification(file data.Path, writer keybase1.UID,
localTime time.Time) *keybase1.FSNotification {
n := baseFileEditNotification(file, writer, localTime)
n.NotificationType = keybase1.FSNotificationType_FILE_MODIFIED
return n
} | go | func fileModifyNotification(file data.Path, writer keybase1.UID,
localTime time.Time) *keybase1.FSNotification {
n := baseFileEditNotification(file, writer, localTime)
n.NotificationType = keybase1.FSNotificationType_FILE_MODIFIED
return n
} | [
"func",
"fileModifyNotification",
"(",
"file",
"data",
".",
"Path",
",",
"writer",
"keybase1",
".",
"UID",
",",
"localTime",
"time",
".",
"Time",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"n",
":=",
"baseFileEditNotification",
"(",
"file",
",",
"writer",
",",
"localTime",
")",
"\n",
"n",
".",
"NotificationType",
"=",
"keybase1",
".",
"FSNotificationType_FILE_MODIFIED",
"\n",
"return",
"n",
"\n",
"}"
] | // fileModifyNotification creates FSNotifications from paths for file
// modification events. | [
"fileModifyNotification",
"creates",
"FSNotifications",
"from",
"paths",
"for",
"file",
"modification",
"events",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L461-L466 |
160,534 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | fileDeleteNotification | func fileDeleteNotification(file data.Path, writer keybase1.UID,
localTime time.Time) *keybase1.FSNotification {
n := baseFileEditNotification(file, writer, localTime)
n.NotificationType = keybase1.FSNotificationType_FILE_DELETED
return n
} | go | func fileDeleteNotification(file data.Path, writer keybase1.UID,
localTime time.Time) *keybase1.FSNotification {
n := baseFileEditNotification(file, writer, localTime)
n.NotificationType = keybase1.FSNotificationType_FILE_DELETED
return n
} | [
"func",
"fileDeleteNotification",
"(",
"file",
"data",
".",
"Path",
",",
"writer",
"keybase1",
".",
"UID",
",",
"localTime",
"time",
".",
"Time",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"n",
":=",
"baseFileEditNotification",
"(",
"file",
",",
"writer",
",",
"localTime",
")",
"\n",
"n",
".",
"NotificationType",
"=",
"keybase1",
".",
"FSNotificationType_FILE_DELETED",
"\n",
"return",
"n",
"\n",
"}"
] | // fileDeleteNotification creates FSNotifications from paths for file
// delete events. | [
"fileDeleteNotification",
"creates",
"FSNotifications",
"from",
"paths",
"for",
"file",
"delete",
"events",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L470-L475 |
160,535 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | fileRenameNotification | func fileRenameNotification(oldFile data.Path, newFile data.Path, writer keybase1.UID,
localTime time.Time) *keybase1.FSNotification {
n := baseFileEditNotification(newFile, writer, localTime)
n.NotificationType = keybase1.FSNotificationType_FILE_RENAMED
n.Params = map[string]string{errorParamRenameOldFilename: oldFile.CanonicalPathString()}
return n
} | go | func fileRenameNotification(oldFile data.Path, newFile data.Path, writer keybase1.UID,
localTime time.Time) *keybase1.FSNotification {
n := baseFileEditNotification(newFile, writer, localTime)
n.NotificationType = keybase1.FSNotificationType_FILE_RENAMED
n.Params = map[string]string{errorParamRenameOldFilename: oldFile.CanonicalPathString()}
return n
} | [
"func",
"fileRenameNotification",
"(",
"oldFile",
"data",
".",
"Path",
",",
"newFile",
"data",
".",
"Path",
",",
"writer",
"keybase1",
".",
"UID",
",",
"localTime",
"time",
".",
"Time",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"n",
":=",
"baseFileEditNotification",
"(",
"newFile",
",",
"writer",
",",
"localTime",
")",
"\n",
"n",
".",
"NotificationType",
"=",
"keybase1",
".",
"FSNotificationType_FILE_RENAMED",
"\n",
"n",
".",
"Params",
"=",
"map",
"[",
"string",
"]",
"string",
"{",
"errorParamRenameOldFilename",
":",
"oldFile",
".",
"CanonicalPathString",
"(",
")",
"}",
"\n",
"return",
"n",
"\n",
"}"
] | // fileRenameNotification creates FSNotifications from paths for file
// rename events. | [
"fileRenameNotification",
"creates",
"FSNotifications",
"from",
"paths",
"for",
"file",
"rename",
"events",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L479-L485 |
160,536 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | connectionNotification | func connectionNotification(status keybase1.FSStatusCode) *keybase1.FSNotification {
// TODO finish placeholder
return &keybase1.FSNotification{
NotificationType: keybase1.FSNotificationType_CONNECTION,
StatusCode: status,
}
} | go | func connectionNotification(status keybase1.FSStatusCode) *keybase1.FSNotification {
// TODO finish placeholder
return &keybase1.FSNotification{
NotificationType: keybase1.FSNotificationType_CONNECTION,
StatusCode: status,
}
} | [
"func",
"connectionNotification",
"(",
"status",
"keybase1",
".",
"FSStatusCode",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"// TODO finish placeholder",
"return",
"&",
"keybase1",
".",
"FSNotification",
"{",
"NotificationType",
":",
"keybase1",
".",
"FSNotificationType_CONNECTION",
",",
"StatusCode",
":",
"status",
",",
"}",
"\n",
"}"
] | // connectionNotification creates FSNotifications based on whether
// or not KBFS is online. | [
"connectionNotification",
"creates",
"FSNotifications",
"based",
"on",
"whether",
"or",
"not",
"KBFS",
"is",
"online",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L489-L495 |
160,537 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | baseNotification | func baseNotification(file data.Path, finish bool) *keybase1.FSNotification {
code := keybase1.FSStatusCode_START
if finish {
code = keybase1.FSStatusCode_FINISH
}
return &keybase1.FSNotification{
Filename: file.CanonicalPathString(),
StatusCode: code,
}
} | go | func baseNotification(file data.Path, finish bool) *keybase1.FSNotification {
code := keybase1.FSStatusCode_START
if finish {
code = keybase1.FSStatusCode_FINISH
}
return &keybase1.FSNotification{
Filename: file.CanonicalPathString(),
StatusCode: code,
}
} | [
"func",
"baseNotification",
"(",
"file",
"data",
".",
"Path",
",",
"finish",
"bool",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"code",
":=",
"keybase1",
".",
"FSStatusCode_START",
"\n",
"if",
"finish",
"{",
"code",
"=",
"keybase1",
".",
"FSStatusCode_FINISH",
"\n",
"}",
"\n\n",
"return",
"&",
"keybase1",
".",
"FSNotification",
"{",
"Filename",
":",
"file",
".",
"CanonicalPathString",
"(",
")",
",",
"StatusCode",
":",
"code",
",",
"}",
"\n",
"}"
] | // baseNotification creates a basic FSNotification without a
// NotificationType from a path. | [
"baseNotification",
"creates",
"a",
"basic",
"FSNotification",
"without",
"a",
"NotificationType",
"from",
"a",
"path",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L499-L509 |
160,538 | keybase/client | go/kbfs/libkbfs/reporter_kbpki.go | errorNotification | func errorNotification(err error, errType keybase1.FSErrorType,
tlfName tlf.CanonicalName, t tlf.Type, mode ErrorModeType,
filename string, params map[string]string) *keybase1.FSNotification {
if tlfName != "" {
params[errorParamTlf] = string(tlfName)
}
var nType keybase1.FSNotificationType
switch mode {
case ReadMode:
params[errorParamMode] = errorModeRead
if t == tlf.Public {
nType = keybase1.FSNotificationType_VERIFYING
} else {
nType = keybase1.FSNotificationType_DECRYPTING
}
case WriteMode:
params[errorParamMode] = errorModeWrite
if t == tlf.Public {
nType = keybase1.FSNotificationType_SIGNING
} else {
nType = keybase1.FSNotificationType_ENCRYPTING
}
default:
panic(fmt.Sprintf("Unknown mode: %v", mode))
}
return &keybase1.FSNotification{
FolderType: t.FolderType(),
Filename: filename,
StatusCode: keybase1.FSStatusCode_ERROR,
Status: err.Error(),
ErrorType: errType,
Params: params,
NotificationType: nType,
}
} | go | func errorNotification(err error, errType keybase1.FSErrorType,
tlfName tlf.CanonicalName, t tlf.Type, mode ErrorModeType,
filename string, params map[string]string) *keybase1.FSNotification {
if tlfName != "" {
params[errorParamTlf] = string(tlfName)
}
var nType keybase1.FSNotificationType
switch mode {
case ReadMode:
params[errorParamMode] = errorModeRead
if t == tlf.Public {
nType = keybase1.FSNotificationType_VERIFYING
} else {
nType = keybase1.FSNotificationType_DECRYPTING
}
case WriteMode:
params[errorParamMode] = errorModeWrite
if t == tlf.Public {
nType = keybase1.FSNotificationType_SIGNING
} else {
nType = keybase1.FSNotificationType_ENCRYPTING
}
default:
panic(fmt.Sprintf("Unknown mode: %v", mode))
}
return &keybase1.FSNotification{
FolderType: t.FolderType(),
Filename: filename,
StatusCode: keybase1.FSStatusCode_ERROR,
Status: err.Error(),
ErrorType: errType,
Params: params,
NotificationType: nType,
}
} | [
"func",
"errorNotification",
"(",
"err",
"error",
",",
"errType",
"keybase1",
".",
"FSErrorType",
",",
"tlfName",
"tlf",
".",
"CanonicalName",
",",
"t",
"tlf",
".",
"Type",
",",
"mode",
"ErrorModeType",
",",
"filename",
"string",
",",
"params",
"map",
"[",
"string",
"]",
"string",
")",
"*",
"keybase1",
".",
"FSNotification",
"{",
"if",
"tlfName",
"!=",
"\"",
"\"",
"{",
"params",
"[",
"errorParamTlf",
"]",
"=",
"string",
"(",
"tlfName",
")",
"\n",
"}",
"\n",
"var",
"nType",
"keybase1",
".",
"FSNotificationType",
"\n",
"switch",
"mode",
"{",
"case",
"ReadMode",
":",
"params",
"[",
"errorParamMode",
"]",
"=",
"errorModeRead",
"\n",
"if",
"t",
"==",
"tlf",
".",
"Public",
"{",
"nType",
"=",
"keybase1",
".",
"FSNotificationType_VERIFYING",
"\n",
"}",
"else",
"{",
"nType",
"=",
"keybase1",
".",
"FSNotificationType_DECRYPTING",
"\n",
"}",
"\n",
"case",
"WriteMode",
":",
"params",
"[",
"errorParamMode",
"]",
"=",
"errorModeWrite",
"\n",
"if",
"t",
"==",
"tlf",
".",
"Public",
"{",
"nType",
"=",
"keybase1",
".",
"FSNotificationType_SIGNING",
"\n",
"}",
"else",
"{",
"nType",
"=",
"keybase1",
".",
"FSNotificationType_ENCRYPTING",
"\n",
"}",
"\n",
"default",
":",
"panic",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"mode",
")",
")",
"\n",
"}",
"\n",
"return",
"&",
"keybase1",
".",
"FSNotification",
"{",
"FolderType",
":",
"t",
".",
"FolderType",
"(",
")",
",",
"Filename",
":",
"filename",
",",
"StatusCode",
":",
"keybase1",
".",
"FSStatusCode_ERROR",
",",
"Status",
":",
"err",
".",
"Error",
"(",
")",
",",
"ErrorType",
":",
"errType",
",",
"Params",
":",
"params",
",",
"NotificationType",
":",
"nType",
",",
"}",
"\n",
"}"
] | // errorNotification creates FSNotifications for errors. | [
"errorNotification",
"creates",
"FSNotifications",
"for",
"errors",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/reporter_kbpki.go#L512-L546 |
160,539 | keybase/client | go/chat/keyfinder.go | NewKeyFinder | func NewKeyFinder(g *globals.Context) types.KeyFinder {
return &KeyFinderImpl{
Contextified: globals.NewContextified(g),
DebugLabeler: utils.NewDebugLabeler(g.GetLog(), "KeyFinder", false),
keys: make(map[string]*types.NameInfo),
decKeys: make(map[string]types.CryptKey),
encKeys: make(map[string]encItem),
}
} | go | func NewKeyFinder(g *globals.Context) types.KeyFinder {
return &KeyFinderImpl{
Contextified: globals.NewContextified(g),
DebugLabeler: utils.NewDebugLabeler(g.GetLog(), "KeyFinder", false),
keys: make(map[string]*types.NameInfo),
decKeys: make(map[string]types.CryptKey),
encKeys: make(map[string]encItem),
}
} | [
"func",
"NewKeyFinder",
"(",
"g",
"*",
"globals",
".",
"Context",
")",
"types",
".",
"KeyFinder",
"{",
"return",
"&",
"KeyFinderImpl",
"{",
"Contextified",
":",
"globals",
".",
"NewContextified",
"(",
"g",
")",
",",
"DebugLabeler",
":",
"utils",
".",
"NewDebugLabeler",
"(",
"g",
".",
"GetLog",
"(",
")",
",",
"\"",
"\"",
",",
"false",
")",
",",
"keys",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"types",
".",
"NameInfo",
")",
",",
"decKeys",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"types",
".",
"CryptKey",
")",
",",
"encKeys",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"encItem",
")",
",",
"}",
"\n",
"}"
] | // NewKeyFinder creates a KeyFinder. | [
"NewKeyFinder",
"creates",
"a",
"KeyFinder",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/keyfinder.go#L34-L42 |
160,540 | keybase/client | go/chat/keyfinder.go | FindForEncryption | func (k *KeyFinderImpl) FindForEncryption(ctx context.Context, tlfName string, tlfID chat1.TLFID,
membersType chat1.ConversationMembersType, public bool) (res types.CryptKey, ni types.NameInfo, err error) {
ckey := k.encCacheKey(tlfName, tlfID, membersType, public)
existing, ok := k.lookupEncKey(ckey)
if ok {
return existing.key, existing.ni, nil
}
defer func() {
if err == nil {
k.writeEncKey(ckey, encItem{
key: res,
ni: ni,
})
}
}()
return k.createNameInfoSource(ctx, membersType).EncryptionKey(ctx, tlfName, tlfID,
membersType, public)
} | go | func (k *KeyFinderImpl) FindForEncryption(ctx context.Context, tlfName string, tlfID chat1.TLFID,
membersType chat1.ConversationMembersType, public bool) (res types.CryptKey, ni types.NameInfo, err error) {
ckey := k.encCacheKey(tlfName, tlfID, membersType, public)
existing, ok := k.lookupEncKey(ckey)
if ok {
return existing.key, existing.ni, nil
}
defer func() {
if err == nil {
k.writeEncKey(ckey, encItem{
key: res,
ni: ni,
})
}
}()
return k.createNameInfoSource(ctx, membersType).EncryptionKey(ctx, tlfName, tlfID,
membersType, public)
} | [
"func",
"(",
"k",
"*",
"KeyFinderImpl",
")",
"FindForEncryption",
"(",
"ctx",
"context",
".",
"Context",
",",
"tlfName",
"string",
",",
"tlfID",
"chat1",
".",
"TLFID",
",",
"membersType",
"chat1",
".",
"ConversationMembersType",
",",
"public",
"bool",
")",
"(",
"res",
"types",
".",
"CryptKey",
",",
"ni",
"types",
".",
"NameInfo",
",",
"err",
"error",
")",
"{",
"ckey",
":=",
"k",
".",
"encCacheKey",
"(",
"tlfName",
",",
"tlfID",
",",
"membersType",
",",
"public",
")",
"\n",
"existing",
",",
"ok",
":=",
"k",
".",
"lookupEncKey",
"(",
"ckey",
")",
"\n",
"if",
"ok",
"{",
"return",
"existing",
".",
"key",
",",
"existing",
".",
"ni",
",",
"nil",
"\n",
"}",
"\n",
"defer",
"func",
"(",
")",
"{",
"if",
"err",
"==",
"nil",
"{",
"k",
".",
"writeEncKey",
"(",
"ckey",
",",
"encItem",
"{",
"key",
":",
"res",
",",
"ni",
":",
"ni",
",",
"}",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"return",
"k",
".",
"createNameInfoSource",
"(",
"ctx",
",",
"membersType",
")",
".",
"EncryptionKey",
"(",
"ctx",
",",
"tlfName",
",",
"tlfID",
",",
"membersType",
",",
"public",
")",
"\n",
"}"
] | // FindForEncryption finds keys up-to-date enough for encrypting.
// Ignores tlfName or teamID based on membersType. | [
"FindForEncryption",
"finds",
"keys",
"up",
"-",
"to",
"-",
"date",
"enough",
"for",
"encrypting",
".",
"Ignores",
"tlfName",
"or",
"teamID",
"based",
"on",
"membersType",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/keyfinder.go#L110-L129 |
160,541 | keybase/client | go/chat/keyfinder.go | FindForDecryption | func (k *KeyFinderImpl) FindForDecryption(ctx context.Context,
tlfName string, tlfID chat1.TLFID,
membersType chat1.ConversationMembersType, public bool,
keyGeneration int, kbfsEncrypted bool) (res types.CryptKey, err error) {
ckey := k.decCacheKey(tlfName, tlfID, membersType, keyGeneration, public, kbfsEncrypted)
existing, ok := k.lookupDecKey(ckey)
if ok {
return existing, nil
}
defer func() {
if err == nil {
k.writeDecKey(ckey, res)
}
}()
return k.createNameInfoSource(ctx, membersType).DecryptionKey(ctx, tlfName, tlfID,
membersType, public, keyGeneration, kbfsEncrypted)
} | go | func (k *KeyFinderImpl) FindForDecryption(ctx context.Context,
tlfName string, tlfID chat1.TLFID,
membersType chat1.ConversationMembersType, public bool,
keyGeneration int, kbfsEncrypted bool) (res types.CryptKey, err error) {
ckey := k.decCacheKey(tlfName, tlfID, membersType, keyGeneration, public, kbfsEncrypted)
existing, ok := k.lookupDecKey(ckey)
if ok {
return existing, nil
}
defer func() {
if err == nil {
k.writeDecKey(ckey, res)
}
}()
return k.createNameInfoSource(ctx, membersType).DecryptionKey(ctx, tlfName, tlfID,
membersType, public, keyGeneration, kbfsEncrypted)
} | [
"func",
"(",
"k",
"*",
"KeyFinderImpl",
")",
"FindForDecryption",
"(",
"ctx",
"context",
".",
"Context",
",",
"tlfName",
"string",
",",
"tlfID",
"chat1",
".",
"TLFID",
",",
"membersType",
"chat1",
".",
"ConversationMembersType",
",",
"public",
"bool",
",",
"keyGeneration",
"int",
",",
"kbfsEncrypted",
"bool",
")",
"(",
"res",
"types",
".",
"CryptKey",
",",
"err",
"error",
")",
"{",
"ckey",
":=",
"k",
".",
"decCacheKey",
"(",
"tlfName",
",",
"tlfID",
",",
"membersType",
",",
"keyGeneration",
",",
"public",
",",
"kbfsEncrypted",
")",
"\n",
"existing",
",",
"ok",
":=",
"k",
".",
"lookupDecKey",
"(",
"ckey",
")",
"\n",
"if",
"ok",
"{",
"return",
"existing",
",",
"nil",
"\n",
"}",
"\n",
"defer",
"func",
"(",
")",
"{",
"if",
"err",
"==",
"nil",
"{",
"k",
".",
"writeDecKey",
"(",
"ckey",
",",
"res",
")",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"return",
"k",
".",
"createNameInfoSource",
"(",
"ctx",
",",
"membersType",
")",
".",
"DecryptionKey",
"(",
"ctx",
",",
"tlfName",
",",
"tlfID",
",",
"membersType",
",",
"public",
",",
"keyGeneration",
",",
"kbfsEncrypted",
")",
"\n",
"}"
] | // FindForDecryption ignores tlfName or teamID based on membersType. | [
"FindForDecryption",
"ignores",
"tlfName",
"or",
"teamID",
"based",
"on",
"membersType",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/keyfinder.go#L132-L148 |
160,542 | keybase/client | go/chat/signencrypt/seeker.go | getChunksFromCache | func (r *decodingReadSeeker) getChunksFromCache(chunks []chunkSpec) (res []byte, ok bool) {
for _, c := range chunks {
if pt, ok := r.chunks.Get(c.index); ok {
res = append(res, pt.([]byte)...)
r.Debug(r.ctx, "getChunksFromCache: added: index: %d len: %v", c.index, len(pt.([]byte)))
} else {
r.Debug(r.ctx, "getChunksFromCache: missed: %v", c.index)
return res, false
}
}
return res, true
} | go | func (r *decodingReadSeeker) getChunksFromCache(chunks []chunkSpec) (res []byte, ok bool) {
for _, c := range chunks {
if pt, ok := r.chunks.Get(c.index); ok {
res = append(res, pt.([]byte)...)
r.Debug(r.ctx, "getChunksFromCache: added: index: %d len: %v", c.index, len(pt.([]byte)))
} else {
r.Debug(r.ctx, "getChunksFromCache: missed: %v", c.index)
return res, false
}
}
return res, true
} | [
"func",
"(",
"r",
"*",
"decodingReadSeeker",
")",
"getChunksFromCache",
"(",
"chunks",
"[",
"]",
"chunkSpec",
")",
"(",
"res",
"[",
"]",
"byte",
",",
"ok",
"bool",
")",
"{",
"for",
"_",
",",
"c",
":=",
"range",
"chunks",
"{",
"if",
"pt",
",",
"ok",
":=",
"r",
".",
"chunks",
".",
"Get",
"(",
"c",
".",
"index",
")",
";",
"ok",
"{",
"res",
"=",
"append",
"(",
"res",
",",
"pt",
".",
"(",
"[",
"]",
"byte",
")",
"...",
")",
"\n",
"r",
".",
"Debug",
"(",
"r",
".",
"ctx",
",",
"\"",
"\"",
",",
"c",
".",
"index",
",",
"len",
"(",
"pt",
".",
"(",
"[",
"]",
"byte",
")",
")",
")",
"\n",
"}",
"else",
"{",
"r",
".",
"Debug",
"(",
"r",
".",
"ctx",
",",
"\"",
"\"",
",",
"c",
".",
"index",
")",
"\n",
"return",
"res",
",",
"false",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"res",
",",
"true",
"\n",
"}"
] | // getChunksFromCache returns the plaintext bytes for a set of chunks iff we have each chunk
// in our cache | [
"getChunksFromCache",
"returns",
"the",
"plaintext",
"bytes",
"for",
"a",
"set",
"of",
"chunks",
"iff",
"we",
"have",
"each",
"chunk",
"in",
"our",
"cache"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/signencrypt/seeker.go#L51-L62 |
160,543 | keybase/client | go/chat/signencrypt/seeker.go | getReadaheadFactor | func (r *decodingReadSeeker) getReadaheadFactor() int64 {
mb := int64(1 << 20)
switch {
case r.size >= 1000*mb:
return 16
case r.size >= 500*mb:
return 8
default:
return 4
}
} | go | func (r *decodingReadSeeker) getReadaheadFactor() int64 {
mb := int64(1 << 20)
switch {
case r.size >= 1000*mb:
return 16
case r.size >= 500*mb:
return 8
default:
return 4
}
} | [
"func",
"(",
"r",
"*",
"decodingReadSeeker",
")",
"getReadaheadFactor",
"(",
")",
"int64",
"{",
"mb",
":=",
"int64",
"(",
"1",
"<<",
"20",
")",
"\n",
"switch",
"{",
"case",
"r",
".",
"size",
">=",
"1000",
"*",
"mb",
":",
"return",
"16",
"\n",
"case",
"r",
".",
"size",
">=",
"500",
"*",
"mb",
":",
"return",
"8",
"\n",
"default",
":",
"return",
"4",
"\n",
"}",
"\n",
"}"
] | // getReadaheadFactor gives the number of chunks we should read at minimum from the source. For larger
// files we try to read more so we don't make too many underlying requests. | [
"getReadaheadFactor",
"gives",
"the",
"number",
"of",
"chunks",
"we",
"should",
"read",
"at",
"minimum",
"from",
"the",
"source",
".",
"For",
"larger",
"files",
"we",
"try",
"to",
"read",
"more",
"so",
"we",
"don",
"t",
"make",
"too",
"many",
"underlying",
"requests",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/signencrypt/seeker.go#L116-L126 |
160,544 | keybase/client | go/ephemeral/device_ek_storage.go | ekLogf | func (s *DeviceEKStorage) ekLogf(mctx libkb.MetaContext, format string, args ...interface{}) {
mctx.Debug(format, args...)
if s.logger != nil {
s.logger.Printf(format, args...)
}
} | go | func (s *DeviceEKStorage) ekLogf(mctx libkb.MetaContext, format string, args ...interface{}) {
mctx.Debug(format, args...)
if s.logger != nil {
s.logger.Printf(format, args...)
}
} | [
"func",
"(",
"s",
"*",
"DeviceEKStorage",
")",
"ekLogf",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"format",
"string",
",",
"args",
"...",
"interface",
"{",
"}",
")",
"{",
"mctx",
".",
"Debug",
"(",
"format",
",",
"args",
"...",
")",
"\n",
"if",
"s",
".",
"logger",
"!=",
"nil",
"{",
"s",
".",
"logger",
".",
"Printf",
"(",
"format",
",",
"args",
"...",
")",
"\n",
"}",
"\n",
"}"
] | // Log sensitive deletion actions to a separate log file so we don't lose the
// logs during normal rotation. | [
"Log",
"sensitive",
"deletion",
"actions",
"to",
"a",
"separate",
"log",
"file",
"so",
"we",
"don",
"t",
"lose",
"the",
"logs",
"during",
"normal",
"rotation",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/ephemeral/device_ek_storage.go#L116-L121 |
160,545 | keybase/client | go/ephemeral/device_ek_storage.go | keyToEldestSeqno | func (s *DeviceEKStorage) keyToEldestSeqno(mctx libkb.MetaContext, key string) keybase1.Seqno {
if !strings.HasPrefix(key, deviceEKPrefix) {
return -1
}
parts := strings.Split(key, "-")
if len(parts) != 4 {
return -1
}
// Make sure this key is for our current user and not a different one.
username := mctx.ActiveDevice().Username(mctx)
if parts[1] != username.String() {
return -1
}
e, err := strconv.ParseUint(parts[2], 10, 64)
if err != nil {
return -1
}
return keybase1.Seqno(e)
} | go | func (s *DeviceEKStorage) keyToEldestSeqno(mctx libkb.MetaContext, key string) keybase1.Seqno {
if !strings.HasPrefix(key, deviceEKPrefix) {
return -1
}
parts := strings.Split(key, "-")
if len(parts) != 4 {
return -1
}
// Make sure this key is for our current user and not a different one.
username := mctx.ActiveDevice().Username(mctx)
if parts[1] != username.String() {
return -1
}
e, err := strconv.ParseUint(parts[2], 10, 64)
if err != nil {
return -1
}
return keybase1.Seqno(e)
} | [
"func",
"(",
"s",
"*",
"DeviceEKStorage",
")",
"keyToEldestSeqno",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"key",
"string",
")",
"keybase1",
".",
"Seqno",
"{",
"if",
"!",
"strings",
".",
"HasPrefix",
"(",
"key",
",",
"deviceEKPrefix",
")",
"{",
"return",
"-",
"1",
"\n",
"}",
"\n",
"parts",
":=",
"strings",
".",
"Split",
"(",
"key",
",",
"\"",
"\"",
")",
"\n",
"if",
"len",
"(",
"parts",
")",
"!=",
"4",
"{",
"return",
"-",
"1",
"\n",
"}",
"\n",
"// Make sure this key is for our current user and not a different one.",
"username",
":=",
"mctx",
".",
"ActiveDevice",
"(",
")",
".",
"Username",
"(",
"mctx",
")",
"\n",
"if",
"parts",
"[",
"1",
"]",
"!=",
"username",
".",
"String",
"(",
")",
"{",
"return",
"-",
"1",
"\n",
"}",
"\n",
"e",
",",
"err",
":=",
"strconv",
".",
"ParseUint",
"(",
"parts",
"[",
"2",
"]",
",",
"10",
",",
"64",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"-",
"1",
"\n",
"}",
"\n",
"return",
"keybase1",
".",
"Seqno",
"(",
"e",
")",
"\n",
"}"
] | // keyToEldestSeqno parses out the `eldestSeqno` from a key of the form
// deviceEKPrefix-username-eldestSeqno-generation.ek. If we have a key for a
// eldestSeqno that is not our current, we purge it since we don't want the
// ephemeral key to stick around if we've reset. If we are unable to parse out
// the value, the key is not valid, or not for the logged in user we return -1 | [
"keyToEldestSeqno",
"parses",
"out",
"the",
"eldestSeqno",
"from",
"a",
"key",
"of",
"the",
"form",
"deviceEKPrefix",
"-",
"username",
"-",
"eldestSeqno",
"-",
"generation",
".",
"ek",
".",
"If",
"we",
"have",
"a",
"key",
"for",
"a",
"eldestSeqno",
"that",
"is",
"not",
"our",
"current",
"we",
"purge",
"it",
"since",
"we",
"don",
"t",
"want",
"the",
"ephemeral",
"key",
"to",
"stick",
"around",
"if",
"we",
"ve",
"reset",
".",
"If",
"we",
"are",
"unable",
"to",
"parse",
"out",
"the",
"value",
"the",
"key",
"is",
"not",
"valid",
"or",
"not",
"for",
"the",
"logged",
"in",
"user",
"we",
"return",
"-",
"1"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/ephemeral/device_ek_storage.go#L156-L174 |
160,546 | keybase/client | go/ephemeral/device_ek_storage.go | keyToGeneration | func (s *DeviceEKStorage) keyToGeneration(mctx libkb.MetaContext, key string) keybase1.EkGeneration {
prefix, err := s.keyPrefix(mctx)
if err != nil {
mctx.Debug("keyToGeneration: unable to get keyPrefix: %v", err)
return -1
}
if !strings.HasPrefix(key, prefix) || !strings.HasSuffix(key, deviceEKSuffix) {
mctx.Debug("keyToGeneration: key missing prefix: %v or suffix: %s", prefix, deviceEKSuffix)
return -1
}
key = strings.TrimSuffix(key, deviceEKSuffix)
parts := strings.Split(key, prefix)
if len(parts) != 2 {
mctx.Debug("keyToGeneration: unexpected parts: %v, prefix: %v", parts)
return -1
}
g, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
mctx.Debug("keyToGeneration: unable to parseUint: %v", err)
return -1
}
return keybase1.EkGeneration(g)
} | go | func (s *DeviceEKStorage) keyToGeneration(mctx libkb.MetaContext, key string) keybase1.EkGeneration {
prefix, err := s.keyPrefix(mctx)
if err != nil {
mctx.Debug("keyToGeneration: unable to get keyPrefix: %v", err)
return -1
}
if !strings.HasPrefix(key, prefix) || !strings.HasSuffix(key, deviceEKSuffix) {
mctx.Debug("keyToGeneration: key missing prefix: %v or suffix: %s", prefix, deviceEKSuffix)
return -1
}
key = strings.TrimSuffix(key, deviceEKSuffix)
parts := strings.Split(key, prefix)
if len(parts) != 2 {
mctx.Debug("keyToGeneration: unexpected parts: %v, prefix: %v", parts)
return -1
}
g, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
mctx.Debug("keyToGeneration: unable to parseUint: %v", err)
return -1
}
return keybase1.EkGeneration(g)
} | [
"func",
"(",
"s",
"*",
"DeviceEKStorage",
")",
"keyToGeneration",
"(",
"mctx",
"libkb",
".",
"MetaContext",
",",
"key",
"string",
")",
"keybase1",
".",
"EkGeneration",
"{",
"prefix",
",",
"err",
":=",
"s",
".",
"keyPrefix",
"(",
"mctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"-",
"1",
"\n",
"}",
"\n",
"if",
"!",
"strings",
".",
"HasPrefix",
"(",
"key",
",",
"prefix",
")",
"||",
"!",
"strings",
".",
"HasSuffix",
"(",
"key",
",",
"deviceEKSuffix",
")",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"prefix",
",",
"deviceEKSuffix",
")",
"\n",
"return",
"-",
"1",
"\n",
"}",
"\n\n",
"key",
"=",
"strings",
".",
"TrimSuffix",
"(",
"key",
",",
"deviceEKSuffix",
")",
"\n",
"parts",
":=",
"strings",
".",
"Split",
"(",
"key",
",",
"prefix",
")",
"\n",
"if",
"len",
"(",
"parts",
")",
"!=",
"2",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"parts",
")",
"\n",
"return",
"-",
"1",
"\n",
"}",
"\n",
"g",
",",
"err",
":=",
"strconv",
".",
"ParseUint",
"(",
"parts",
"[",
"1",
"]",
",",
"10",
",",
"64",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"-",
"1",
"\n",
"}",
"\n",
"return",
"keybase1",
".",
"EkGeneration",
"(",
"g",
")",
"\n",
"}"
] | // keyToEldestSeqno parses out the `generation` from a key of the form
// deviceEKPrefix-username-eldestSeqno-generation.ek. Unparseable keys return a
// generation of -1 and should be ignored. | [
"keyToEldestSeqno",
"parses",
"out",
"the",
"generation",
"from",
"a",
"key",
"of",
"the",
"form",
"deviceEKPrefix",
"-",
"username",
"-",
"eldestSeqno",
"-",
"generation",
".",
"ek",
".",
"Unparseable",
"keys",
"return",
"a",
"generation",
"of",
"-",
"1",
"and",
"should",
"be",
"ignored",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/ephemeral/device_ek_storage.go#L179-L202 |
160,547 | keybase/client | go/ephemeral/device_ek_storage.go | ListAllForUser | func (s *DeviceEKStorage) ListAllForUser(mctx libkb.MetaContext) (all []string, err error) {
defer mctx.TraceTimed("DeviceEKStorage#ListAllForUser", func() error { return err })()
s.Lock()
defer s.Unlock()
return s.listAllForUser(mctx, mctx.ActiveDevice().Username(mctx))
} | go | func (s *DeviceEKStorage) ListAllForUser(mctx libkb.MetaContext) (all []string, err error) {
defer mctx.TraceTimed("DeviceEKStorage#ListAllForUser", func() error { return err })()
s.Lock()
defer s.Unlock()
return s.listAllForUser(mctx, mctx.ActiveDevice().Username(mctx))
} | [
"func",
"(",
"s",
"*",
"DeviceEKStorage",
")",
"ListAllForUser",
"(",
"mctx",
"libkb",
".",
"MetaContext",
")",
"(",
"all",
"[",
"]",
"string",
",",
"err",
"error",
")",
"{",
"defer",
"mctx",
".",
"TraceTimed",
"(",
"\"",
"\"",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
")",
"(",
")",
"\n\n",
"s",
".",
"Lock",
"(",
")",
"\n",
"defer",
"s",
".",
"Unlock",
"(",
")",
"\n\n",
"return",
"s",
".",
"listAllForUser",
"(",
"mctx",
",",
"mctx",
".",
"ActiveDevice",
"(",
")",
".",
"Username",
"(",
"mctx",
")",
")",
"\n",
"}"
] | // ListAllForUser lists the internal storage name of deviceEKs of the logged in
// user. This is used for logsend purposes to debug ek state. | [
"ListAllForUser",
"lists",
"the",
"internal",
"storage",
"name",
"of",
"deviceEKs",
"of",
"the",
"logged",
"in",
"user",
".",
"This",
"is",
"used",
"for",
"logsend",
"purposes",
"to",
"debug",
"ek",
"state",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/ephemeral/device_ek_storage.go#L410-L417 |
160,548 | keybase/client | go/kbfs/libfs/util.go | RecursiveDelete | func RecursiveDelete(
ctx context.Context, fs billy.Filesystem, fi os.FileInfo) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if !fi.IsDir() {
// Delete regular files and symlinks directly.
return fs.Remove(fi.Name())
}
subdirFS, err := fs.Chroot(fi.Name())
if err != nil {
return err
}
children, err := subdirFS.ReadDir("/")
if err != nil {
return err
}
for _, childFI := range children {
if childFI.Name() == "." {
continue
}
err := RecursiveDelete(ctx, subdirFS, childFI)
if err != nil {
return err
}
}
return fs.Remove(fi.Name())
} | go | func RecursiveDelete(
ctx context.Context, fs billy.Filesystem, fi os.FileInfo) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if !fi.IsDir() {
// Delete regular files and symlinks directly.
return fs.Remove(fi.Name())
}
subdirFS, err := fs.Chroot(fi.Name())
if err != nil {
return err
}
children, err := subdirFS.ReadDir("/")
if err != nil {
return err
}
for _, childFI := range children {
if childFI.Name() == "." {
continue
}
err := RecursiveDelete(ctx, subdirFS, childFI)
if err != nil {
return err
}
}
return fs.Remove(fi.Name())
} | [
"func",
"RecursiveDelete",
"(",
"ctx",
"context",
".",
"Context",
",",
"fs",
"billy",
".",
"Filesystem",
",",
"fi",
"os",
".",
"FileInfo",
")",
"error",
"{",
"select",
"{",
"case",
"<-",
"ctx",
".",
"Done",
"(",
")",
":",
"return",
"ctx",
".",
"Err",
"(",
")",
"\n",
"default",
":",
"}",
"\n\n",
"if",
"!",
"fi",
".",
"IsDir",
"(",
")",
"{",
"// Delete regular files and symlinks directly.",
"return",
"fs",
".",
"Remove",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n\n",
"subdirFS",
",",
"err",
":=",
"fs",
".",
"Chroot",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"children",
",",
"err",
":=",
"subdirFS",
".",
"ReadDir",
"(",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"for",
"_",
",",
"childFI",
":=",
"range",
"children",
"{",
"if",
"childFI",
".",
"Name",
"(",
")",
"==",
"\"",
"\"",
"{",
"continue",
"\n",
"}",
"\n",
"err",
":=",
"RecursiveDelete",
"(",
"ctx",
",",
"subdirFS",
",",
"childFI",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"fs",
".",
"Remove",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"}"
] | // RecursiveDelete deletes the given entry from the given filesystem.
// If it's a directory, first all the items in the directory are
// deleted recursively. | [
"RecursiveDelete",
"deletes",
"the",
"given",
"entry",
"from",
"the",
"given",
"filesystem",
".",
"If",
"it",
"s",
"a",
"directory",
"first",
"all",
"the",
"items",
"in",
"the",
"directory",
"are",
"deleted",
"recursively",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/util.go#L17-L50 |
160,549 | keybase/client | go/kbnm/main.go | process | func process(h *handler, in nativemessaging.JSONDecoder, out nativemessaging.JSONEncoder) error {
var resp Response
var req Request
// If input fails to parse, we can't guarantee future inputs will
// get into a parseable state so we abort after sending an error
// response.
abortErr := in.Decode(&req)
var err error
if abortErr == nil {
resp.Result, err = h.Handle(&req)
}
if err == io.EOF {
// Closed
return err
} else if err != nil {
resp.Status = "error"
resp.Message = err.Error()
} else {
// Success
resp.Status = "ok"
}
resp.Client = req.Client
err = out.Encode(resp)
if err != nil {
// TODO: Log this somewhere?
fmt.Fprintf(os.Stderr, "error: %s", err)
os.Exit(1)
}
return abortErr
} | go | func process(h *handler, in nativemessaging.JSONDecoder, out nativemessaging.JSONEncoder) error {
var resp Response
var req Request
// If input fails to parse, we can't guarantee future inputs will
// get into a parseable state so we abort after sending an error
// response.
abortErr := in.Decode(&req)
var err error
if abortErr == nil {
resp.Result, err = h.Handle(&req)
}
if err == io.EOF {
// Closed
return err
} else if err != nil {
resp.Status = "error"
resp.Message = err.Error()
} else {
// Success
resp.Status = "ok"
}
resp.Client = req.Client
err = out.Encode(resp)
if err != nil {
// TODO: Log this somewhere?
fmt.Fprintf(os.Stderr, "error: %s", err)
os.Exit(1)
}
return abortErr
} | [
"func",
"process",
"(",
"h",
"*",
"handler",
",",
"in",
"nativemessaging",
".",
"JSONDecoder",
",",
"out",
"nativemessaging",
".",
"JSONEncoder",
")",
"error",
"{",
"var",
"resp",
"Response",
"\n",
"var",
"req",
"Request",
"\n\n",
"// If input fails to parse, we can't guarantee future inputs will",
"// get into a parseable state so we abort after sending an error",
"// response.",
"abortErr",
":=",
"in",
".",
"Decode",
"(",
"&",
"req",
")",
"\n\n",
"var",
"err",
"error",
"\n",
"if",
"abortErr",
"==",
"nil",
"{",
"resp",
".",
"Result",
",",
"err",
"=",
"h",
".",
"Handle",
"(",
"&",
"req",
")",
"\n",
"}",
"\n\n",
"if",
"err",
"==",
"io",
".",
"EOF",
"{",
"// Closed",
"return",
"err",
"\n",
"}",
"else",
"if",
"err",
"!=",
"nil",
"{",
"resp",
".",
"Status",
"=",
"\"",
"\"",
"\n",
"resp",
".",
"Message",
"=",
"err",
".",
"Error",
"(",
")",
"\n",
"}",
"else",
"{",
"// Success",
"resp",
".",
"Status",
"=",
"\"",
"\"",
"\n",
"}",
"\n",
"resp",
".",
"Client",
"=",
"req",
".",
"Client",
"\n\n",
"err",
"=",
"out",
".",
"Encode",
"(",
"resp",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"// TODO: Log this somewhere?",
"fmt",
".",
"Fprintf",
"(",
"os",
".",
"Stderr",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"os",
".",
"Exit",
"(",
"1",
")",
"\n",
"}",
"\n\n",
"return",
"abortErr",
"\n",
"}"
] | // process consumes a single message | [
"process",
"consumes",
"a",
"single",
"message"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbnm/main.go#L41-L75 |
160,550 | keybase/client | go/kbfs/libgit/init.go | Params | func Params(kbCtx libkbfs.Context,
storageRoot string, paramsBase *libkbfs.InitParams) (
params libkbfs.InitParams, tempDir string, err error) {
tempDir, err = ioutil.TempDir(storageRoot, libkbfs.GitStorageRootPrefix)
if err != nil {
return libkbfs.InitParams{}, "", err
}
if paramsBase != nil {
params = *paramsBase
} else {
params = libkbfs.DefaultInitParams(kbCtx)
}
params.LogToFile = true
// Set the debug default to true only if the env variable isn't
// explicitly set to a false option.
envDebug := os.Getenv("KBFSGIT_DEBUG")
if envDebug != "0" && envDebug != "false" && envDebug != "no" {
params.Debug = true
}
// This is set to false in docker tests for now, but we need it. So
// override it to true here.
params.EnableJournal = true
params.DiskCacheMode = libkbfs.DiskCacheModeRemote
params.StorageRoot = tempDir
params.Mode = libkbfs.InitSingleOpString
params.TLFJournalBackgroundWorkStatus =
libkbfs.TLFJournalSingleOpBackgroundWorkEnabled
if baddr := os.Getenv(paramKeybaseGitBServerAddr); len(baddr) > 0 {
params.BServerAddr = baddr
}
if mdaddr := os.Getenv(paramKeybaseGitMDServerAddr); len(mdaddr) > 0 {
params.MDServerAddr = mdaddr
}
return params, tempDir, nil
} | go | func Params(kbCtx libkbfs.Context,
storageRoot string, paramsBase *libkbfs.InitParams) (
params libkbfs.InitParams, tempDir string, err error) {
tempDir, err = ioutil.TempDir(storageRoot, libkbfs.GitStorageRootPrefix)
if err != nil {
return libkbfs.InitParams{}, "", err
}
if paramsBase != nil {
params = *paramsBase
} else {
params = libkbfs.DefaultInitParams(kbCtx)
}
params.LogToFile = true
// Set the debug default to true only if the env variable isn't
// explicitly set to a false option.
envDebug := os.Getenv("KBFSGIT_DEBUG")
if envDebug != "0" && envDebug != "false" && envDebug != "no" {
params.Debug = true
}
// This is set to false in docker tests for now, but we need it. So
// override it to true here.
params.EnableJournal = true
params.DiskCacheMode = libkbfs.DiskCacheModeRemote
params.StorageRoot = tempDir
params.Mode = libkbfs.InitSingleOpString
params.TLFJournalBackgroundWorkStatus =
libkbfs.TLFJournalSingleOpBackgroundWorkEnabled
if baddr := os.Getenv(paramKeybaseGitBServerAddr); len(baddr) > 0 {
params.BServerAddr = baddr
}
if mdaddr := os.Getenv(paramKeybaseGitMDServerAddr); len(mdaddr) > 0 {
params.MDServerAddr = mdaddr
}
return params, tempDir, nil
} | [
"func",
"Params",
"(",
"kbCtx",
"libkbfs",
".",
"Context",
",",
"storageRoot",
"string",
",",
"paramsBase",
"*",
"libkbfs",
".",
"InitParams",
")",
"(",
"params",
"libkbfs",
".",
"InitParams",
",",
"tempDir",
"string",
",",
"err",
"error",
")",
"{",
"tempDir",
",",
"err",
"=",
"ioutil",
".",
"TempDir",
"(",
"storageRoot",
",",
"libkbfs",
".",
"GitStorageRootPrefix",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"libkbfs",
".",
"InitParams",
"{",
"}",
",",
"\"",
"\"",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"paramsBase",
"!=",
"nil",
"{",
"params",
"=",
"*",
"paramsBase",
"\n",
"}",
"else",
"{",
"params",
"=",
"libkbfs",
".",
"DefaultInitParams",
"(",
"kbCtx",
")",
"\n",
"}",
"\n",
"params",
".",
"LogToFile",
"=",
"true",
"\n",
"// Set the debug default to true only if the env variable isn't",
"// explicitly set to a false option.",
"envDebug",
":=",
"os",
".",
"Getenv",
"(",
"\"",
"\"",
")",
"\n",
"if",
"envDebug",
"!=",
"\"",
"\"",
"&&",
"envDebug",
"!=",
"\"",
"\"",
"&&",
"envDebug",
"!=",
"\"",
"\"",
"{",
"params",
".",
"Debug",
"=",
"true",
"\n",
"}",
"\n",
"// This is set to false in docker tests for now, but we need it. So",
"// override it to true here.",
"params",
".",
"EnableJournal",
"=",
"true",
"\n",
"params",
".",
"DiskCacheMode",
"=",
"libkbfs",
".",
"DiskCacheModeRemote",
"\n",
"params",
".",
"StorageRoot",
"=",
"tempDir",
"\n",
"params",
".",
"Mode",
"=",
"libkbfs",
".",
"InitSingleOpString",
"\n",
"params",
".",
"TLFJournalBackgroundWorkStatus",
"=",
"libkbfs",
".",
"TLFJournalSingleOpBackgroundWorkEnabled",
"\n\n",
"if",
"baddr",
":=",
"os",
".",
"Getenv",
"(",
"paramKeybaseGitBServerAddr",
")",
";",
"len",
"(",
"baddr",
")",
">",
"0",
"{",
"params",
".",
"BServerAddr",
"=",
"baddr",
"\n",
"}",
"\n",
"if",
"mdaddr",
":=",
"os",
".",
"Getenv",
"(",
"paramKeybaseGitMDServerAddr",
")",
";",
"len",
"(",
"mdaddr",
")",
">",
"0",
"{",
"params",
".",
"MDServerAddr",
"=",
"mdaddr",
"\n",
"}",
"\n\n",
"return",
"params",
",",
"tempDir",
",",
"nil",
"\n",
"}"
] | // Params returns a set of default parameters for git-related
// operations, along with a temp directory that should be cleaned
// after the git work is complete. | [
"Params",
"returns",
"a",
"set",
"of",
"default",
"parameters",
"for",
"git",
"-",
"related",
"operations",
"along",
"with",
"a",
"temp",
"directory",
"that",
"should",
"be",
"cleaned",
"after",
"the",
"git",
"work",
"is",
"complete",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libgit/init.go#L38-L75 |
160,551 | keybase/client | go/kbfs/libgit/init.go | Init | func Init(ctx context.Context, gitKBFSParams libkbfs.InitParams,
kbCtx libkbfs.Context, keybaseServiceCn libkbfs.KeybaseServiceCn,
defaultLogPath string, vlogLevel string) (
context.Context, libkbfs.Config, error) {
log, err := libkbfs.InitLogWithPrefix(
gitKBFSParams, kbCtx, "git", defaultLogPath)
if err != nil {
return ctx, nil, err
}
// Assign a unique ID to each remote-helper instance, since
// they'll all share the same log.
ctx, err = libcontext.NewContextWithCancellationDelayer(
libkbfs.CtxWithRandomIDReplayable(
ctx, ctxGitIDKey, ctxGitOpID, log))
if err != nil {
return ctx, nil, err
}
log.CDebugf(ctx, "Initialized new git config")
config, err := libkbfs.InitWithLogPrefix(
ctx, kbCtx, gitKBFSParams, keybaseServiceCn, nil, log, "git")
if err != nil {
return ctx, nil, err
}
config.SetVLogLevel(vlogLevel)
// Make any blocks written by via this config charged to the git
// quota.
config.SetDefaultBlockType(keybase1.BlockType_GIT)
config.MakeDiskBlockCacheIfNotExists()
return ctx, config, nil
} | go | func Init(ctx context.Context, gitKBFSParams libkbfs.InitParams,
kbCtx libkbfs.Context, keybaseServiceCn libkbfs.KeybaseServiceCn,
defaultLogPath string, vlogLevel string) (
context.Context, libkbfs.Config, error) {
log, err := libkbfs.InitLogWithPrefix(
gitKBFSParams, kbCtx, "git", defaultLogPath)
if err != nil {
return ctx, nil, err
}
// Assign a unique ID to each remote-helper instance, since
// they'll all share the same log.
ctx, err = libcontext.NewContextWithCancellationDelayer(
libkbfs.CtxWithRandomIDReplayable(
ctx, ctxGitIDKey, ctxGitOpID, log))
if err != nil {
return ctx, nil, err
}
log.CDebugf(ctx, "Initialized new git config")
config, err := libkbfs.InitWithLogPrefix(
ctx, kbCtx, gitKBFSParams, keybaseServiceCn, nil, log, "git")
if err != nil {
return ctx, nil, err
}
config.SetVLogLevel(vlogLevel)
// Make any blocks written by via this config charged to the git
// quota.
config.SetDefaultBlockType(keybase1.BlockType_GIT)
config.MakeDiskBlockCacheIfNotExists()
return ctx, config, nil
} | [
"func",
"Init",
"(",
"ctx",
"context",
".",
"Context",
",",
"gitKBFSParams",
"libkbfs",
".",
"InitParams",
",",
"kbCtx",
"libkbfs",
".",
"Context",
",",
"keybaseServiceCn",
"libkbfs",
".",
"KeybaseServiceCn",
",",
"defaultLogPath",
"string",
",",
"vlogLevel",
"string",
")",
"(",
"context",
".",
"Context",
",",
"libkbfs",
".",
"Config",
",",
"error",
")",
"{",
"log",
",",
"err",
":=",
"libkbfs",
".",
"InitLogWithPrefix",
"(",
"gitKBFSParams",
",",
"kbCtx",
",",
"\"",
"\"",
",",
"defaultLogPath",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"ctx",
",",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"// Assign a unique ID to each remote-helper instance, since",
"// they'll all share the same log.",
"ctx",
",",
"err",
"=",
"libcontext",
".",
"NewContextWithCancellationDelayer",
"(",
"libkbfs",
".",
"CtxWithRandomIDReplayable",
"(",
"ctx",
",",
"ctxGitIDKey",
",",
"ctxGitOpID",
",",
"log",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"ctx",
",",
"nil",
",",
"err",
"\n",
"}",
"\n",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n\n",
"config",
",",
"err",
":=",
"libkbfs",
".",
"InitWithLogPrefix",
"(",
"ctx",
",",
"kbCtx",
",",
"gitKBFSParams",
",",
"keybaseServiceCn",
",",
"nil",
",",
"log",
",",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"ctx",
",",
"nil",
",",
"err",
"\n",
"}",
"\n",
"config",
".",
"SetVLogLevel",
"(",
"vlogLevel",
")",
"\n\n",
"// Make any blocks written by via this config charged to the git",
"// quota.",
"config",
".",
"SetDefaultBlockType",
"(",
"keybase1",
".",
"BlockType_GIT",
")",
"\n\n",
"config",
".",
"MakeDiskBlockCacheIfNotExists",
"(",
")",
"\n\n",
"return",
"ctx",
",",
"config",
",",
"nil",
"\n",
"}"
] | // Init initializes a context and a libkbfs.Config for git operations.
// The config should be shutdown when it is done being used. | [
"Init",
"initializes",
"a",
"context",
"and",
"a",
"libkbfs",
".",
"Config",
"for",
"git",
"operations",
".",
"The",
"config",
"should",
"be",
"shutdown",
"when",
"it",
"is",
"done",
"being",
"used",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libgit/init.go#L79-L113 |
160,552 | keybase/client | go/kbfs/libfs/profilelist.go | ProfileGet | func ProfileGet(name string) func(context.Context) ([]byte, time.Time, error) {
p := pprof.Lookup(name)
if p == nil {
return nil
}
// See https://golang.org/pkg/runtime/pprof/#Profile.WriteTo
// for the meaning of debug.
debug := 1
if name == "goroutine" {
debug = 2
}
return profileRead(p, debug)
} | go | func ProfileGet(name string) func(context.Context) ([]byte, time.Time, error) {
p := pprof.Lookup(name)
if p == nil {
return nil
}
// See https://golang.org/pkg/runtime/pprof/#Profile.WriteTo
// for the meaning of debug.
debug := 1
if name == "goroutine" {
debug = 2
}
return profileRead(p, debug)
} | [
"func",
"ProfileGet",
"(",
"name",
"string",
")",
"func",
"(",
"context",
".",
"Context",
")",
"(",
"[",
"]",
"byte",
",",
"time",
".",
"Time",
",",
"error",
")",
"{",
"p",
":=",
"pprof",
".",
"Lookup",
"(",
"name",
")",
"\n",
"if",
"p",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"// See https://golang.org/pkg/runtime/pprof/#Profile.WriteTo",
"// for the meaning of debug.",
"debug",
":=",
"1",
"\n",
"if",
"name",
"==",
"\"",
"\"",
"{",
"debug",
"=",
"2",
"\n",
"}",
"\n",
"return",
"profileRead",
"(",
"p",
",",
"debug",
")",
"\n",
"}"
] | // ProfileGet gets the relevant read function for the profile or nil if it doesn't exist. | [
"ProfileGet",
"gets",
"the",
"relevant",
"read",
"function",
"for",
"the",
"profile",
"or",
"nil",
"if",
"it",
"doesn",
"t",
"exist",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/profilelist.go#L21-L34 |
160,553 | keybase/client | go/kbfs/libfs/profilelist.go | profileRead | func profileRead(p *pprof.Profile, debug int) func(context.Context) ([]byte, time.Time, error) {
return func(_ context.Context) ([]byte, time.Time, error) {
var b bytes.Buffer
err := p.WriteTo(&b, debug)
if err != nil {
return nil, time.Time{}, err
}
return b.Bytes(), time.Now(), nil
}
} | go | func profileRead(p *pprof.Profile, debug int) func(context.Context) ([]byte, time.Time, error) {
return func(_ context.Context) ([]byte, time.Time, error) {
var b bytes.Buffer
err := p.WriteTo(&b, debug)
if err != nil {
return nil, time.Time{}, err
}
return b.Bytes(), time.Now(), nil
}
} | [
"func",
"profileRead",
"(",
"p",
"*",
"pprof",
".",
"Profile",
",",
"debug",
"int",
")",
"func",
"(",
"context",
".",
"Context",
")",
"(",
"[",
"]",
"byte",
",",
"time",
".",
"Time",
",",
"error",
")",
"{",
"return",
"func",
"(",
"_",
"context",
".",
"Context",
")",
"(",
"[",
"]",
"byte",
",",
"time",
".",
"Time",
",",
"error",
")",
"{",
"var",
"b",
"bytes",
".",
"Buffer",
"\n",
"err",
":=",
"p",
".",
"WriteTo",
"(",
"&",
"b",
",",
"debug",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"time",
".",
"Time",
"{",
"}",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"b",
".",
"Bytes",
"(",
")",
",",
"time",
".",
"Now",
"(",
")",
",",
"nil",
"\n",
"}",
"\n",
"}"
] | // profileRead reads from a Profile. | [
"profileRead",
"reads",
"from",
"a",
"Profile",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/profilelist.go#L37-L47 |
160,554 | keybase/client | go/chat/search/utils.go | tokenize | func tokenize(msgText string) tokenMap {
if msgText == "" {
return nil
}
// split the message text up on basic punctuation/spaces
tokens := splitExpr.Split(msgText, -1)
tokenMap := tokenMap{}
for _, token := range tokens {
if len(token) < MinTokenLength {
continue
}
token = strings.ToLower(token)
if _, ok := tokenMap[token]; !ok {
tokenMap[token] = map[string]chat1.EmptyStruct{}
}
// strip separators to raw tokens which we count as an alias to the
// original token
stripped := stripExpr.Split(token, -1)
for _, s := range stripped {
if s == "" {
continue
}
tokenMap[token][s] = chat1.EmptyStruct{}
// add the stem as an alias
stemmed := porterstemmer.StemWithoutLowerCasing([]rune(s))
tokenMap[token][string(stemmed)] = chat1.EmptyStruct{}
// calculate prefixes to alias to the token
for _, prefix := range prefixes(s) {
tokenMap[token][prefix] = chat1.EmptyStruct{}
}
}
// drop the original token from the set of aliases
delete(tokenMap[token], token)
}
return tokenMap
} | go | func tokenize(msgText string) tokenMap {
if msgText == "" {
return nil
}
// split the message text up on basic punctuation/spaces
tokens := splitExpr.Split(msgText, -1)
tokenMap := tokenMap{}
for _, token := range tokens {
if len(token) < MinTokenLength {
continue
}
token = strings.ToLower(token)
if _, ok := tokenMap[token]; !ok {
tokenMap[token] = map[string]chat1.EmptyStruct{}
}
// strip separators to raw tokens which we count as an alias to the
// original token
stripped := stripExpr.Split(token, -1)
for _, s := range stripped {
if s == "" {
continue
}
tokenMap[token][s] = chat1.EmptyStruct{}
// add the stem as an alias
stemmed := porterstemmer.StemWithoutLowerCasing([]rune(s))
tokenMap[token][string(stemmed)] = chat1.EmptyStruct{}
// calculate prefixes to alias to the token
for _, prefix := range prefixes(s) {
tokenMap[token][prefix] = chat1.EmptyStruct{}
}
}
// drop the original token from the set of aliases
delete(tokenMap[token], token)
}
return tokenMap
} | [
"func",
"tokenize",
"(",
"msgText",
"string",
")",
"tokenMap",
"{",
"if",
"msgText",
"==",
"\"",
"\"",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"// split the message text up on basic punctuation/spaces",
"tokens",
":=",
"splitExpr",
".",
"Split",
"(",
"msgText",
",",
"-",
"1",
")",
"\n",
"tokenMap",
":=",
"tokenMap",
"{",
"}",
"\n",
"for",
"_",
",",
"token",
":=",
"range",
"tokens",
"{",
"if",
"len",
"(",
"token",
")",
"<",
"MinTokenLength",
"{",
"continue",
"\n",
"}",
"\n\n",
"token",
"=",
"strings",
".",
"ToLower",
"(",
"token",
")",
"\n",
"if",
"_",
",",
"ok",
":=",
"tokenMap",
"[",
"token",
"]",
";",
"!",
"ok",
"{",
"tokenMap",
"[",
"token",
"]",
"=",
"map",
"[",
"string",
"]",
"chat1",
".",
"EmptyStruct",
"{",
"}",
"\n",
"}",
"\n\n",
"// strip separators to raw tokens which we count as an alias to the",
"// original token",
"stripped",
":=",
"stripExpr",
".",
"Split",
"(",
"token",
",",
"-",
"1",
")",
"\n",
"for",
"_",
",",
"s",
":=",
"range",
"stripped",
"{",
"if",
"s",
"==",
"\"",
"\"",
"{",
"continue",
"\n",
"}",
"\n",
"tokenMap",
"[",
"token",
"]",
"[",
"s",
"]",
"=",
"chat1",
".",
"EmptyStruct",
"{",
"}",
"\n\n",
"// add the stem as an alias",
"stemmed",
":=",
"porterstemmer",
".",
"StemWithoutLowerCasing",
"(",
"[",
"]",
"rune",
"(",
"s",
")",
")",
"\n",
"tokenMap",
"[",
"token",
"]",
"[",
"string",
"(",
"stemmed",
")",
"]",
"=",
"chat1",
".",
"EmptyStruct",
"{",
"}",
"\n\n",
"// calculate prefixes to alias to the token",
"for",
"_",
",",
"prefix",
":=",
"range",
"prefixes",
"(",
"s",
")",
"{",
"tokenMap",
"[",
"token",
"]",
"[",
"prefix",
"]",
"=",
"chat1",
".",
"EmptyStruct",
"{",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"// drop the original token from the set of aliases",
"delete",
"(",
"tokenMap",
"[",
"token",
"]",
",",
"token",
")",
"\n",
"}",
"\n",
"return",
"tokenMap",
"\n",
"}"
] | // getIndexTokens splits the content of the given message on whitespace and
// special characters returning a map of tokens to aliases normalized to lowercase. | [
"getIndexTokens",
"splits",
"the",
"content",
"of",
"the",
"given",
"message",
"on",
"whitespace",
"and",
"special",
"characters",
"returning",
"a",
"map",
"of",
"tokens",
"to",
"aliases",
"normalized",
"to",
"lowercase",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/search/utils.go#L62-L102 |
160,555 | keybase/client | go/chat/search/utils.go | getUIMsgs | func getUIMsgs(ctx context.Context, g *globals.Context, convID chat1.ConversationID,
uid gregor1.UID, msgs []chat1.MessageUnboxed) (uiMsgs []chat1.UIMessage) {
for i := len(msgs) - 1; i >= 0; i-- {
msg := msgs[i]
uiMsg := utils.PresentMessageUnboxed(ctx, g, msg, uid, convID)
uiMsgs = append(uiMsgs, uiMsg)
}
return uiMsgs
} | go | func getUIMsgs(ctx context.Context, g *globals.Context, convID chat1.ConversationID,
uid gregor1.UID, msgs []chat1.MessageUnboxed) (uiMsgs []chat1.UIMessage) {
for i := len(msgs) - 1; i >= 0; i-- {
msg := msgs[i]
uiMsg := utils.PresentMessageUnboxed(ctx, g, msg, uid, convID)
uiMsgs = append(uiMsgs, uiMsg)
}
return uiMsgs
} | [
"func",
"getUIMsgs",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"globals",
".",
"Context",
",",
"convID",
"chat1",
".",
"ConversationID",
",",
"uid",
"gregor1",
".",
"UID",
",",
"msgs",
"[",
"]",
"chat1",
".",
"MessageUnboxed",
")",
"(",
"uiMsgs",
"[",
"]",
"chat1",
".",
"UIMessage",
")",
"{",
"for",
"i",
":=",
"len",
"(",
"msgs",
")",
"-",
"1",
";",
"i",
">=",
"0",
";",
"i",
"--",
"{",
"msg",
":=",
"msgs",
"[",
"i",
"]",
"\n",
"uiMsg",
":=",
"utils",
".",
"PresentMessageUnboxed",
"(",
"ctx",
",",
"g",
",",
"msg",
",",
"uid",
",",
"convID",
")",
"\n",
"uiMsgs",
"=",
"append",
"(",
"uiMsgs",
",",
"uiMsg",
")",
"\n",
"}",
"\n",
"return",
"uiMsgs",
"\n",
"}"
] | // Order messages ascending by ID for presentation | [
"Order",
"messages",
"ascending",
"by",
"ID",
"for",
"presentation"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/search/utils.go#L145-L153 |
160,556 | keybase/client | go/git/errors.go | HumanizeGitErrors | func HumanizeGitErrors(ctx context.Context, g *libkb.GlobalContext, err error) error {
switch e := err.(type) {
case libkb.RepoAlreadyExistsError:
g.Log.CDebugf(ctx, "replacing error: %v", err)
return fmt.Errorf("A repo named %q already exists.", e.ExistingName)
case libkb.InvalidRepoNameError:
g.Log.CDebugf(ctx, "replacing error: %v", err)
return fmt.Errorf("%q isn't a valid repo name.", e.Name)
default:
return err
}
} | go | func HumanizeGitErrors(ctx context.Context, g *libkb.GlobalContext, err error) error {
switch e := err.(type) {
case libkb.RepoAlreadyExistsError:
g.Log.CDebugf(ctx, "replacing error: %v", err)
return fmt.Errorf("A repo named %q already exists.", e.ExistingName)
case libkb.InvalidRepoNameError:
g.Log.CDebugf(ctx, "replacing error: %v", err)
return fmt.Errorf("%q isn't a valid repo name.", e.Name)
default:
return err
}
} | [
"func",
"HumanizeGitErrors",
"(",
"ctx",
"context",
".",
"Context",
",",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"err",
"error",
")",
"error",
"{",
"switch",
"e",
":=",
"err",
".",
"(",
"type",
")",
"{",
"case",
"libkb",
".",
"RepoAlreadyExistsError",
":",
"g",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"e",
".",
"ExistingName",
")",
"\n",
"case",
"libkb",
".",
"InvalidRepoNameError",
":",
"g",
".",
"Log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"e",
".",
"Name",
")",
"\n",
"default",
":",
"return",
"err",
"\n",
"}",
"\n",
"}"
] | // For errors that expect, replace them with nice strings for the user. The GUI
// will show these directly. | [
"For",
"errors",
"that",
"expect",
"replace",
"them",
"with",
"nice",
"strings",
"for",
"the",
"user",
".",
"The",
"GUI",
"will",
"show",
"these",
"directly",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/git/errors.go#L12-L23 |
160,557 | keybase/client | go/libkb/passphrase_stream.go | Clone | func (ps *PassphraseStream) Clone() *PassphraseStream {
if ps == nil {
return nil
}
arr := make([]byte, len(ps.stream))
copy(arr, ps.stream)
return &PassphraseStream{
stream: arr,
gen: ps.gen,
}
} | go | func (ps *PassphraseStream) Clone() *PassphraseStream {
if ps == nil {
return nil
}
arr := make([]byte, len(ps.stream))
copy(arr, ps.stream)
return &PassphraseStream{
stream: arr,
gen: ps.gen,
}
} | [
"func",
"(",
"ps",
"*",
"PassphraseStream",
")",
"Clone",
"(",
")",
"*",
"PassphraseStream",
"{",
"if",
"ps",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"arr",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"len",
"(",
"ps",
".",
"stream",
")",
")",
"\n",
"copy",
"(",
"arr",
",",
"ps",
".",
"stream",
")",
"\n",
"return",
"&",
"PassphraseStream",
"{",
"stream",
":",
"arr",
",",
"gen",
":",
"ps",
".",
"gen",
",",
"}",
"\n",
"}"
] | // Clone a passphrase stream and return a copy. | [
"Clone",
"a",
"passphrase",
"stream",
"and",
"return",
"a",
"copy",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/passphrase_stream.go#L136-L146 |
160,558 | keybase/client | go/ephemeral/init.go | NewEphemeralStorageAndInstall | func NewEphemeralStorageAndInstall(mctx libkb.MetaContext) {
mctx.G().SetDeviceEKStorage(NewDeviceEKStorage(mctx))
mctx.G().SetUserEKBoxStorage(NewUserEKBoxStorage())
mctx.G().SetTeamEKBoxStorage(NewTeamEKBoxStorage())
ekLib := NewEKLib(mctx)
mctx.G().SetEKLib(ekLib)
mctx.G().AddLoginHook(ekLib)
mctx.G().AddLogoutHook(ekLib, "ekLib")
mctx.G().AddDbNukeHook(ekLib, "ekLib")
mctx.G().PushShutdownHook(func() error {
mctx.Debug("stopping background eklib loop")
ekLib.Shutdown()
return nil
})
} | go | func NewEphemeralStorageAndInstall(mctx libkb.MetaContext) {
mctx.G().SetDeviceEKStorage(NewDeviceEKStorage(mctx))
mctx.G().SetUserEKBoxStorage(NewUserEKBoxStorage())
mctx.G().SetTeamEKBoxStorage(NewTeamEKBoxStorage())
ekLib := NewEKLib(mctx)
mctx.G().SetEKLib(ekLib)
mctx.G().AddLoginHook(ekLib)
mctx.G().AddLogoutHook(ekLib, "ekLib")
mctx.G().AddDbNukeHook(ekLib, "ekLib")
mctx.G().PushShutdownHook(func() error {
mctx.Debug("stopping background eklib loop")
ekLib.Shutdown()
return nil
})
} | [
"func",
"NewEphemeralStorageAndInstall",
"(",
"mctx",
"libkb",
".",
"MetaContext",
")",
"{",
"mctx",
".",
"G",
"(",
")",
".",
"SetDeviceEKStorage",
"(",
"NewDeviceEKStorage",
"(",
"mctx",
")",
")",
"\n",
"mctx",
".",
"G",
"(",
")",
".",
"SetUserEKBoxStorage",
"(",
"NewUserEKBoxStorage",
"(",
")",
")",
"\n",
"mctx",
".",
"G",
"(",
")",
".",
"SetTeamEKBoxStorage",
"(",
"NewTeamEKBoxStorage",
"(",
")",
")",
"\n",
"ekLib",
":=",
"NewEKLib",
"(",
"mctx",
")",
"\n",
"mctx",
".",
"G",
"(",
")",
".",
"SetEKLib",
"(",
"ekLib",
")",
"\n",
"mctx",
".",
"G",
"(",
")",
".",
"AddLoginHook",
"(",
"ekLib",
")",
"\n",
"mctx",
".",
"G",
"(",
")",
".",
"AddLogoutHook",
"(",
"ekLib",
",",
"\"",
"\"",
")",
"\n",
"mctx",
".",
"G",
"(",
")",
".",
"AddDbNukeHook",
"(",
"ekLib",
",",
"\"",
"\"",
")",
"\n",
"mctx",
".",
"G",
"(",
")",
".",
"PushShutdownHook",
"(",
"func",
"(",
")",
"error",
"{",
"mctx",
".",
"Debug",
"(",
"\"",
"\"",
")",
"\n",
"ekLib",
".",
"Shutdown",
"(",
")",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"}"
] | // Creates a ephemeral key storage and installs it into G. | [
"Creates",
"a",
"ephemeral",
"key",
"storage",
"and",
"installs",
"it",
"into",
"G",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/ephemeral/init.go#L8-L22 |
160,559 | keybase/client | go/kbfs/data/data_types.go | Ref | func (p BlockPointer) Ref() BlockRef {
return BlockRef{
ID: p.ID,
RefNonce: p.RefNonce,
}
} | go | func (p BlockPointer) Ref() BlockRef {
return BlockRef{
ID: p.ID,
RefNonce: p.RefNonce,
}
} | [
"func",
"(",
"p",
"BlockPointer",
")",
"Ref",
"(",
")",
"BlockRef",
"{",
"return",
"BlockRef",
"{",
"ID",
":",
"p",
".",
"ID",
",",
"RefNonce",
":",
"p",
".",
"RefNonce",
",",
"}",
"\n",
"}"
] | // Ref returns the BlockRef equivalent of this pointer. | [
"Ref",
"returns",
"the",
"BlockRef",
"equivalent",
"of",
"this",
"pointer",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/data_types.go#L209-L214 |
160,560 | keybase/client | go/kbfs/data/data_types.go | EntryInfoFromFileInfo | func EntryInfoFromFileInfo(fi os.FileInfo) EntryInfo {
t := File
if fi.IsDir() {
t = Dir
} else if fi.Mode()&os.ModeSymlink != 0 {
t = Sym
} else if fi.Mode()&0100 != 0 {
t = Exec
}
mtime := fi.ModTime().UnixNano()
return EntryInfo{
Type: t,
Size: uint64(fi.Size()), // TODO: deal with negatives?
Mtime: mtime,
Ctime: mtime,
// Leave TeamWriter and PrevRevisions empty
}
} | go | func EntryInfoFromFileInfo(fi os.FileInfo) EntryInfo {
t := File
if fi.IsDir() {
t = Dir
} else if fi.Mode()&os.ModeSymlink != 0 {
t = Sym
} else if fi.Mode()&0100 != 0 {
t = Exec
}
mtime := fi.ModTime().UnixNano()
return EntryInfo{
Type: t,
Size: uint64(fi.Size()), // TODO: deal with negatives?
Mtime: mtime,
Ctime: mtime,
// Leave TeamWriter and PrevRevisions empty
}
} | [
"func",
"EntryInfoFromFileInfo",
"(",
"fi",
"os",
".",
"FileInfo",
")",
"EntryInfo",
"{",
"t",
":=",
"File",
"\n",
"if",
"fi",
".",
"IsDir",
"(",
")",
"{",
"t",
"=",
"Dir",
"\n",
"}",
"else",
"if",
"fi",
".",
"Mode",
"(",
")",
"&",
"os",
".",
"ModeSymlink",
"!=",
"0",
"{",
"t",
"=",
"Sym",
"\n",
"}",
"else",
"if",
"fi",
".",
"Mode",
"(",
")",
"&",
"0100",
"!=",
"0",
"{",
"t",
"=",
"Exec",
"\n",
"}",
"\n",
"mtime",
":=",
"fi",
".",
"ModTime",
"(",
")",
".",
"UnixNano",
"(",
")",
"\n",
"return",
"EntryInfo",
"{",
"Type",
":",
"t",
",",
"Size",
":",
"uint64",
"(",
"fi",
".",
"Size",
"(",
")",
")",
",",
"// TODO: deal with negatives?",
"Mtime",
":",
"mtime",
",",
"Ctime",
":",
"mtime",
",",
"// Leave TeamWriter and PrevRevisions empty",
"}",
"\n",
"}"
] | // EntryInfoFromFileInfo converts an `os.FileInfo` into an
// `EntryInfo`, to the best of our ability to do so. The caller is
// responsible for filling in `EntryInfo.SymPath`, if needed. | [
"EntryInfoFromFileInfo",
"converts",
"an",
"os",
".",
"FileInfo",
"into",
"an",
"EntryInfo",
"to",
"the",
"best",
"of",
"our",
"ability",
"to",
"do",
"so",
".",
"The",
"caller",
"is",
"responsible",
"for",
"filling",
"in",
"EntryInfo",
".",
"SymPath",
"if",
"needed",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/data_types.go#L287-L304 |
160,561 | keybase/client | go/kbfs/data/data_types.go | Eq | func (ei EntryInfo) Eq(other EntryInfo) bool {
eq := ei.Type == other.Type &&
ei.Size == other.Size &&
ei.SymPath == other.SymPath &&
ei.Mtime == other.Mtime &&
ei.Ctime == other.Ctime &&
ei.TeamWriter == other.TeamWriter &&
len(ei.PrevRevisions) == len(other.PrevRevisions)
if !eq {
return false
}
for i, pr := range ei.PrevRevisions {
otherPR := other.PrevRevisions[i]
if pr.Revision != otherPR.Revision || pr.Count != otherPR.Count {
return false
}
}
return true
} | go | func (ei EntryInfo) Eq(other EntryInfo) bool {
eq := ei.Type == other.Type &&
ei.Size == other.Size &&
ei.SymPath == other.SymPath &&
ei.Mtime == other.Mtime &&
ei.Ctime == other.Ctime &&
ei.TeamWriter == other.TeamWriter &&
len(ei.PrevRevisions) == len(other.PrevRevisions)
if !eq {
return false
}
for i, pr := range ei.PrevRevisions {
otherPR := other.PrevRevisions[i]
if pr.Revision != otherPR.Revision || pr.Count != otherPR.Count {
return false
}
}
return true
} | [
"func",
"(",
"ei",
"EntryInfo",
")",
"Eq",
"(",
"other",
"EntryInfo",
")",
"bool",
"{",
"eq",
":=",
"ei",
".",
"Type",
"==",
"other",
".",
"Type",
"&&",
"ei",
".",
"Size",
"==",
"other",
".",
"Size",
"&&",
"ei",
".",
"SymPath",
"==",
"other",
".",
"SymPath",
"&&",
"ei",
".",
"Mtime",
"==",
"other",
".",
"Mtime",
"&&",
"ei",
".",
"Ctime",
"==",
"other",
".",
"Ctime",
"&&",
"ei",
".",
"TeamWriter",
"==",
"other",
".",
"TeamWriter",
"&&",
"len",
"(",
"ei",
".",
"PrevRevisions",
")",
"==",
"len",
"(",
"other",
".",
"PrevRevisions",
")",
"\n",
"if",
"!",
"eq",
"{",
"return",
"false",
"\n",
"}",
"\n",
"for",
"i",
",",
"pr",
":=",
"range",
"ei",
".",
"PrevRevisions",
"{",
"otherPR",
":=",
"other",
".",
"PrevRevisions",
"[",
"i",
"]",
"\n",
"if",
"pr",
".",
"Revision",
"!=",
"otherPR",
".",
"Revision",
"||",
"pr",
".",
"Count",
"!=",
"otherPR",
".",
"Count",
"{",
"return",
"false",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}"
] | // Eq returns true if `other` is equal to `ei`. | [
"Eq",
"returns",
"true",
"if",
"other",
"is",
"equal",
"to",
"ei",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/data_types.go#L307-L325 |
160,562 | keybase/client | go/kbfs/data/data_types.go | MakeRevBranchName | func MakeRevBranchName(rev kbfsmd.Revision) BranchName {
return BranchName(branchRevPrefix + strconv.FormatInt(int64(rev), 10))
} | go | func MakeRevBranchName(rev kbfsmd.Revision) BranchName {
return BranchName(branchRevPrefix + strconv.FormatInt(int64(rev), 10))
} | [
"func",
"MakeRevBranchName",
"(",
"rev",
"kbfsmd",
".",
"Revision",
")",
"BranchName",
"{",
"return",
"BranchName",
"(",
"branchRevPrefix",
"+",
"strconv",
".",
"FormatInt",
"(",
"int64",
"(",
"rev",
")",
",",
"10",
")",
")",
"\n",
"}"
] | // MakeRevBranchName returns a branch name specifying an archive
// branch pinned to the given revision number. | [
"MakeRevBranchName",
"returns",
"a",
"branch",
"name",
"specifying",
"an",
"archive",
"branch",
"pinned",
"to",
"the",
"given",
"revision",
"number",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/data_types.go#L386-L388 |
160,563 | keybase/client | go/kbfs/data/data_types.go | RevisionIfSpecified | func (bn BranchName) RevisionIfSpecified() (kbfsmd.Revision, bool) {
if !bn.IsArchived() {
return kbfsmd.RevisionUninitialized, false
}
i, err := strconv.ParseInt(string(bn[len(branchRevPrefix):]), 10, 64)
if err != nil {
return kbfsmd.RevisionUninitialized, false
}
return kbfsmd.Revision(i), true
} | go | func (bn BranchName) RevisionIfSpecified() (kbfsmd.Revision, bool) {
if !bn.IsArchived() {
return kbfsmd.RevisionUninitialized, false
}
i, err := strconv.ParseInt(string(bn[len(branchRevPrefix):]), 10, 64)
if err != nil {
return kbfsmd.RevisionUninitialized, false
}
return kbfsmd.Revision(i), true
} | [
"func",
"(",
"bn",
"BranchName",
")",
"RevisionIfSpecified",
"(",
")",
"(",
"kbfsmd",
".",
"Revision",
",",
"bool",
")",
"{",
"if",
"!",
"bn",
".",
"IsArchived",
"(",
")",
"{",
"return",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"false",
"\n",
"}",
"\n\n",
"i",
",",
"err",
":=",
"strconv",
".",
"ParseInt",
"(",
"string",
"(",
"bn",
"[",
"len",
"(",
"branchRevPrefix",
")",
":",
"]",
")",
",",
"10",
",",
"64",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"kbfsmd",
".",
"RevisionUninitialized",
",",
"false",
"\n",
"}",
"\n\n",
"return",
"kbfsmd",
".",
"Revision",
"(",
"i",
")",
",",
"true",
"\n",
"}"
] | // RevisionIfSpecified returns a valid revision number and true if
// `bn` is a revision branch. | [
"RevisionIfSpecified",
"returns",
"a",
"valid",
"revision",
"number",
"and",
"true",
"if",
"bn",
"is",
"a",
"revision",
"branch",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/data/data_types.go#L397-L408 |
160,564 | keybase/client | go/kbfs/kbfsmd/key_bundle_cache.go | GetTLFReaderKeyBundle | func (k *KeyBundleCacheStandard) GetTLFReaderKeyBundle(
bundleID TLFReaderKeyBundleID) (*TLFReaderKeyBundleV3, error) {
if entry, ok := k.cache.Get(bundleID); ok {
if rkb, ok := entry.(TLFReaderKeyBundleV3); ok {
return &rkb, nil
}
// Shouldn't be possible.
return nil, errors.New("Invalid key bundle type")
}
return nil, nil
} | go | func (k *KeyBundleCacheStandard) GetTLFReaderKeyBundle(
bundleID TLFReaderKeyBundleID) (*TLFReaderKeyBundleV3, error) {
if entry, ok := k.cache.Get(bundleID); ok {
if rkb, ok := entry.(TLFReaderKeyBundleV3); ok {
return &rkb, nil
}
// Shouldn't be possible.
return nil, errors.New("Invalid key bundle type")
}
return nil, nil
} | [
"func",
"(",
"k",
"*",
"KeyBundleCacheStandard",
")",
"GetTLFReaderKeyBundle",
"(",
"bundleID",
"TLFReaderKeyBundleID",
")",
"(",
"*",
"TLFReaderKeyBundleV3",
",",
"error",
")",
"{",
"if",
"entry",
",",
"ok",
":=",
"k",
".",
"cache",
".",
"Get",
"(",
"bundleID",
")",
";",
"ok",
"{",
"if",
"rkb",
",",
"ok",
":=",
"entry",
".",
"(",
"TLFReaderKeyBundleV3",
")",
";",
"ok",
"{",
"return",
"&",
"rkb",
",",
"nil",
"\n",
"}",
"\n",
"// Shouldn't be possible.",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"nil",
",",
"nil",
"\n",
"}"
] | // GetTLFReaderKeyBundle implements the KeyBundleCache interface for KeyBundleCacheStandard. | [
"GetTLFReaderKeyBundle",
"implements",
"the",
"KeyBundleCache",
"interface",
"for",
"KeyBundleCacheStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/key_bundle_cache.go#L47-L57 |
160,565 | keybase/client | go/kbfs/kbfsmd/key_bundle_cache.go | GetTLFWriterKeyBundle | func (k *KeyBundleCacheStandard) GetTLFWriterKeyBundle(
bundleID TLFWriterKeyBundleID) (*TLFWriterKeyBundleV3, error) {
if entry, ok := k.cache.Get(bundleID); ok {
if wkb, ok := entry.(TLFWriterKeyBundleV3); ok {
return &wkb, nil
}
// Shouldn't be possible.
return nil, errors.New("Invalid key bundle type")
}
return nil, nil
} | go | func (k *KeyBundleCacheStandard) GetTLFWriterKeyBundle(
bundleID TLFWriterKeyBundleID) (*TLFWriterKeyBundleV3, error) {
if entry, ok := k.cache.Get(bundleID); ok {
if wkb, ok := entry.(TLFWriterKeyBundleV3); ok {
return &wkb, nil
}
// Shouldn't be possible.
return nil, errors.New("Invalid key bundle type")
}
return nil, nil
} | [
"func",
"(",
"k",
"*",
"KeyBundleCacheStandard",
")",
"GetTLFWriterKeyBundle",
"(",
"bundleID",
"TLFWriterKeyBundleID",
")",
"(",
"*",
"TLFWriterKeyBundleV3",
",",
"error",
")",
"{",
"if",
"entry",
",",
"ok",
":=",
"k",
".",
"cache",
".",
"Get",
"(",
"bundleID",
")",
";",
"ok",
"{",
"if",
"wkb",
",",
"ok",
":=",
"entry",
".",
"(",
"TLFWriterKeyBundleV3",
")",
";",
"ok",
"{",
"return",
"&",
"wkb",
",",
"nil",
"\n",
"}",
"\n",
"// Shouldn't be possible.",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"nil",
",",
"nil",
"\n",
"}"
] | // GetTLFWriterKeyBundle implements the KeyBundleCache interface for KeyBundleCacheStandard. | [
"GetTLFWriterKeyBundle",
"implements",
"the",
"KeyBundleCache",
"interface",
"for",
"KeyBundleCacheStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/key_bundle_cache.go#L60-L70 |
160,566 | keybase/client | go/kbfs/kbfsmd/key_bundle_cache.go | PutTLFReaderKeyBundle | func (k *KeyBundleCacheStandard) PutTLFReaderKeyBundle(
bundleID TLFReaderKeyBundleID, rkb TLFReaderKeyBundleV3) {
k.cache.Add(bundleID, rkb)
} | go | func (k *KeyBundleCacheStandard) PutTLFReaderKeyBundle(
bundleID TLFReaderKeyBundleID, rkb TLFReaderKeyBundleV3) {
k.cache.Add(bundleID, rkb)
} | [
"func",
"(",
"k",
"*",
"KeyBundleCacheStandard",
")",
"PutTLFReaderKeyBundle",
"(",
"bundleID",
"TLFReaderKeyBundleID",
",",
"rkb",
"TLFReaderKeyBundleV3",
")",
"{",
"k",
".",
"cache",
".",
"Add",
"(",
"bundleID",
",",
"rkb",
")",
"\n",
"}"
] | // PutTLFReaderKeyBundle implements the KeyBundleCache interface for KeyBundleCacheStandard. | [
"PutTLFReaderKeyBundle",
"implements",
"the",
"KeyBundleCache",
"interface",
"for",
"KeyBundleCacheStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/key_bundle_cache.go#L73-L76 |
160,567 | keybase/client | go/kbfs/kbfsmd/key_bundle_cache.go | PutTLFWriterKeyBundle | func (k *KeyBundleCacheStandard) PutTLFWriterKeyBundle(
bundleID TLFWriterKeyBundleID, wkb TLFWriterKeyBundleV3) {
k.cache.Add(bundleID, wkb)
} | go | func (k *KeyBundleCacheStandard) PutTLFWriterKeyBundle(
bundleID TLFWriterKeyBundleID, wkb TLFWriterKeyBundleV3) {
k.cache.Add(bundleID, wkb)
} | [
"func",
"(",
"k",
"*",
"KeyBundleCacheStandard",
")",
"PutTLFWriterKeyBundle",
"(",
"bundleID",
"TLFWriterKeyBundleID",
",",
"wkb",
"TLFWriterKeyBundleV3",
")",
"{",
"k",
".",
"cache",
".",
"Add",
"(",
"bundleID",
",",
"wkb",
")",
"\n",
"}"
] | // PutTLFWriterKeyBundle implements the KeyBundleCache interface for KeyBundleCacheStandard. | [
"PutTLFWriterKeyBundle",
"implements",
"the",
"KeyBundleCache",
"interface",
"for",
"KeyBundleCacheStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsmd/key_bundle_cache.go#L79-L82 |
160,568 | keybase/client | go/kbfs/kbfscodec/codec.go | Update | func Update(c Codec, dstPtr interface{}, src interface{}) error {
buf, err := c.Encode(src)
if err != nil {
return err
}
err = c.Decode(buf, dstPtr)
if err != nil {
return err
}
return nil
} | go | func Update(c Codec, dstPtr interface{}, src interface{}) error {
buf, err := c.Encode(src)
if err != nil {
return err
}
err = c.Decode(buf, dstPtr)
if err != nil {
return err
}
return nil
} | [
"func",
"Update",
"(",
"c",
"Codec",
",",
"dstPtr",
"interface",
"{",
"}",
",",
"src",
"interface",
"{",
"}",
")",
"error",
"{",
"buf",
",",
"err",
":=",
"c",
".",
"Encode",
"(",
"src",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"err",
"=",
"c",
".",
"Decode",
"(",
"buf",
",",
"dstPtr",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Update encodes src into a byte string, and then decode it into the
// object pointed to by dstPtr. | [
"Update",
"encodes",
"src",
"into",
"a",
"byte",
"string",
"and",
"then",
"decode",
"it",
"into",
"the",
"object",
"pointed",
"to",
"by",
"dstPtr",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfscodec/codec.go#L67-L77 |
160,569 | keybase/client | go/kbfs/kbfscodec/codec.go | SerializeToFile | func SerializeToFile(c Codec, obj interface{}, path string) error {
err := ioutil.MkdirAll(filepath.Dir(path), 0700)
if err != nil {
return err
}
buf, err := c.Encode(obj)
if err != nil {
return err
}
return ioutil.WriteSerializedFile(path, buf, 0600)
} | go | func SerializeToFile(c Codec, obj interface{}, path string) error {
err := ioutil.MkdirAll(filepath.Dir(path), 0700)
if err != nil {
return err
}
buf, err := c.Encode(obj)
if err != nil {
return err
}
return ioutil.WriteSerializedFile(path, buf, 0600)
} | [
"func",
"SerializeToFile",
"(",
"c",
"Codec",
",",
"obj",
"interface",
"{",
"}",
",",
"path",
"string",
")",
"error",
"{",
"err",
":=",
"ioutil",
".",
"MkdirAll",
"(",
"filepath",
".",
"Dir",
"(",
"path",
")",
",",
"0700",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"buf",
",",
"err",
":=",
"c",
".",
"Encode",
"(",
"obj",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"return",
"ioutil",
".",
"WriteSerializedFile",
"(",
"path",
",",
"buf",
",",
"0600",
")",
"\n",
"}"
] | // SerializeToFile serializes the given object and writes it to the
// given file, making its parent directory first if necessary. | [
"SerializeToFile",
"serializes",
"the",
"given",
"object",
"and",
"writes",
"it",
"to",
"the",
"given",
"file",
"making",
"its",
"parent",
"directory",
"first",
"if",
"necessary",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfscodec/codec.go#L81-L93 |
160,570 | keybase/client | go/kbfs/kbfscodec/codec.go | SerializeToFileIfNotExist | func SerializeToFileIfNotExist(c Codec, obj interface{}, path string) error {
_, err := ioutil.Stat(path)
if ioutil.IsExist(err) {
return nil
} else if ioutil.IsNotExist(err) {
// Continue.
} else if err != nil {
return err
}
return SerializeToFile(c, obj, path)
} | go | func SerializeToFileIfNotExist(c Codec, obj interface{}, path string) error {
_, err := ioutil.Stat(path)
if ioutil.IsExist(err) {
return nil
} else if ioutil.IsNotExist(err) {
// Continue.
} else if err != nil {
return err
}
return SerializeToFile(c, obj, path)
} | [
"func",
"SerializeToFileIfNotExist",
"(",
"c",
"Codec",
",",
"obj",
"interface",
"{",
"}",
",",
"path",
"string",
")",
"error",
"{",
"_",
",",
"err",
":=",
"ioutil",
".",
"Stat",
"(",
"path",
")",
"\n",
"if",
"ioutil",
".",
"IsExist",
"(",
"err",
")",
"{",
"return",
"nil",
"\n",
"}",
"else",
"if",
"ioutil",
".",
"IsNotExist",
"(",
"err",
")",
"{",
"// Continue.",
"}",
"else",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"return",
"SerializeToFile",
"(",
"c",
",",
"obj",
",",
"path",
")",
"\n",
"}"
] | // SerializeToFileIfNotExist is like SerializeToFile, but does nothing
// if the file already exists. | [
"SerializeToFileIfNotExist",
"is",
"like",
"SerializeToFile",
"but",
"does",
"nothing",
"if",
"the",
"file",
"already",
"exists",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfscodec/codec.go#L97-L108 |
160,571 | keybase/client | go/chat/flipmanager.go | LoadFlip | func (m *FlipManager) LoadFlip(ctx context.Context, uid gregor1.UID, hostConvID chat1.ConversationID,
hostMsgID chat1.MessageID, flipConvID chat1.ConversationID, gameID chat1.FlipGameID) {
defer m.Trace(ctx, func() error { return nil }, "LoadFlip")()
stored, ok := m.games.Get(gameID.String())
if ok {
switch stored.(chat1.UICoinFlipStatus).Phase {
case chat1.UICoinFlipPhase_ERROR:
// do nothing here, just replay if we are storing an error
default:
m.queueDirtyGameID(ctx, gameID, true)
return
}
}
// If we miss the in-memory game storage, attempt to replay the game
job := loadGameJob{
uid: uid,
hostConvID: hostConvID,
hostMsgID: hostMsgID,
flipConvID: flipConvID,
gameID: gameID,
}
select {
case m.loadGameCh <- job:
default:
m.Debug(ctx, "LoadFlip: queue full: gameID: %s hostConvID %s flipConvID: %s", gameID, hostConvID,
flipConvID)
}
} | go | func (m *FlipManager) LoadFlip(ctx context.Context, uid gregor1.UID, hostConvID chat1.ConversationID,
hostMsgID chat1.MessageID, flipConvID chat1.ConversationID, gameID chat1.FlipGameID) {
defer m.Trace(ctx, func() error { return nil }, "LoadFlip")()
stored, ok := m.games.Get(gameID.String())
if ok {
switch stored.(chat1.UICoinFlipStatus).Phase {
case chat1.UICoinFlipPhase_ERROR:
// do nothing here, just replay if we are storing an error
default:
m.queueDirtyGameID(ctx, gameID, true)
return
}
}
// If we miss the in-memory game storage, attempt to replay the game
job := loadGameJob{
uid: uid,
hostConvID: hostConvID,
hostMsgID: hostMsgID,
flipConvID: flipConvID,
gameID: gameID,
}
select {
case m.loadGameCh <- job:
default:
m.Debug(ctx, "LoadFlip: queue full: gameID: %s hostConvID %s flipConvID: %s", gameID, hostConvID,
flipConvID)
}
} | [
"func",
"(",
"m",
"*",
"FlipManager",
")",
"LoadFlip",
"(",
"ctx",
"context",
".",
"Context",
",",
"uid",
"gregor1",
".",
"UID",
",",
"hostConvID",
"chat1",
".",
"ConversationID",
",",
"hostMsgID",
"chat1",
".",
"MessageID",
",",
"flipConvID",
"chat1",
".",
"ConversationID",
",",
"gameID",
"chat1",
".",
"FlipGameID",
")",
"{",
"defer",
"m",
".",
"Trace",
"(",
"ctx",
",",
"func",
"(",
")",
"error",
"{",
"return",
"nil",
"}",
",",
"\"",
"\"",
")",
"(",
")",
"\n",
"stored",
",",
"ok",
":=",
"m",
".",
"games",
".",
"Get",
"(",
"gameID",
".",
"String",
"(",
")",
")",
"\n",
"if",
"ok",
"{",
"switch",
"stored",
".",
"(",
"chat1",
".",
"UICoinFlipStatus",
")",
".",
"Phase",
"{",
"case",
"chat1",
".",
"UICoinFlipPhase_ERROR",
":",
"// do nothing here, just replay if we are storing an error",
"default",
":",
"m",
".",
"queueDirtyGameID",
"(",
"ctx",
",",
"gameID",
",",
"true",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"// If we miss the in-memory game storage, attempt to replay the game",
"job",
":=",
"loadGameJob",
"{",
"uid",
":",
"uid",
",",
"hostConvID",
":",
"hostConvID",
",",
"hostMsgID",
":",
"hostMsgID",
",",
"flipConvID",
":",
"flipConvID",
",",
"gameID",
":",
"gameID",
",",
"}",
"\n",
"select",
"{",
"case",
"m",
".",
"loadGameCh",
"<-",
"job",
":",
"default",
":",
"m",
".",
"Debug",
"(",
"ctx",
",",
"\"",
"\"",
",",
"gameID",
",",
"hostConvID",
",",
"flipConvID",
")",
"\n",
"}",
"\n",
"}"
] | // LoadFlip implements the types.CoinFlipManager interface | [
"LoadFlip",
"implements",
"the",
"types",
".",
"CoinFlipManager",
"interface"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/flipmanager.go#L1380-L1407 |
160,572 | keybase/client | go/chat/flipmanager.go | CLogf | func (m *FlipManager) CLogf(ctx context.Context, fmt string, args ...interface{}) {
m.Debug(ctx, fmt, args...)
} | go | func (m *FlipManager) CLogf(ctx context.Context, fmt string, args ...interface{}) {
m.Debug(ctx, fmt, args...)
} | [
"func",
"(",
"m",
"*",
"FlipManager",
")",
"CLogf",
"(",
"ctx",
"context",
".",
"Context",
",",
"fmt",
"string",
",",
"args",
"...",
"interface",
"{",
"}",
")",
"{",
"m",
".",
"Debug",
"(",
"ctx",
",",
"fmt",
",",
"args",
"...",
")",
"\n",
"}"
] | // CLogf implements the flip.DealersHelper interface | [
"CLogf",
"implements",
"the",
"flip",
".",
"DealersHelper",
"interface"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/flipmanager.go#L1423-L1425 |
160,573 | keybase/client | go/chat/flipmanager.go | ServerTime | func (m *FlipManager) ServerTime(ctx context.Context) (res time.Time, err error) {
ctx = globals.ChatCtx(ctx, m.G(), keybase1.TLFIdentifyBehavior_CHAT_SKIP, nil, nil)
defer m.Trace(ctx, func() error { return err }, "ServerTime")()
if m.testingServerClock != nil {
return m.testingServerClock.Now(), nil
}
sres, err := m.ri().ServerNow(ctx)
if err != nil {
return res, err
}
return sres.Now.Time(), nil
} | go | func (m *FlipManager) ServerTime(ctx context.Context) (res time.Time, err error) {
ctx = globals.ChatCtx(ctx, m.G(), keybase1.TLFIdentifyBehavior_CHAT_SKIP, nil, nil)
defer m.Trace(ctx, func() error { return err }, "ServerTime")()
if m.testingServerClock != nil {
return m.testingServerClock.Now(), nil
}
sres, err := m.ri().ServerNow(ctx)
if err != nil {
return res, err
}
return sres.Now.Time(), nil
} | [
"func",
"(",
"m",
"*",
"FlipManager",
")",
"ServerTime",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"res",
"time",
".",
"Time",
",",
"err",
"error",
")",
"{",
"ctx",
"=",
"globals",
".",
"ChatCtx",
"(",
"ctx",
",",
"m",
".",
"G",
"(",
")",
",",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_SKIP",
",",
"nil",
",",
"nil",
")",
"\n",
"defer",
"m",
".",
"Trace",
"(",
"ctx",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
",",
"\"",
"\"",
")",
"(",
")",
"\n",
"if",
"m",
".",
"testingServerClock",
"!=",
"nil",
"{",
"return",
"m",
".",
"testingServerClock",
".",
"Now",
"(",
")",
",",
"nil",
"\n",
"}",
"\n",
"sres",
",",
"err",
":=",
"m",
".",
"ri",
"(",
")",
".",
"ServerNow",
"(",
"ctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"res",
",",
"err",
"\n",
"}",
"\n",
"return",
"sres",
".",
"Now",
".",
"Time",
"(",
")",
",",
"nil",
"\n",
"}"
] | // ServerTime implements the flip.DealersHelper interface | [
"ServerTime",
"implements",
"the",
"flip",
".",
"DealersHelper",
"interface"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/flipmanager.go#L1433-L1444 |
160,574 | keybase/client | go/chat/flipmanager.go | SendChat | func (m *FlipManager) SendChat(ctx context.Context, convID chat1.ConversationID, gameID chat1.FlipGameID,
msg flip.GameMessageEncoded) (err error) {
ctx = globals.ChatCtx(ctx, m.G(), keybase1.TLFIdentifyBehavior_CHAT_SKIP, nil, nil)
defer m.Trace(ctx, func() error { return err }, "SendChat: convID: %s", convID)()
uid, err := utils.AssertLoggedInUID(ctx, m.G())
if err != nil {
return err
}
conv, err := utils.GetVerifiedConv(ctx, m.G(), uid, convID, types.InboxSourceDataSourceAll)
if err != nil {
return err
}
outboxID, err := storage.NewOutboxID()
if err != nil {
return err
}
m.registerSentOutboxID(ctx, gameID, outboxID)
return m.sendNonblock(ctx, convID, msg.String(), conv.Info.TlfName, outboxID, gameID,
chat1.TopicType_DEV)
} | go | func (m *FlipManager) SendChat(ctx context.Context, convID chat1.ConversationID, gameID chat1.FlipGameID,
msg flip.GameMessageEncoded) (err error) {
ctx = globals.ChatCtx(ctx, m.G(), keybase1.TLFIdentifyBehavior_CHAT_SKIP, nil, nil)
defer m.Trace(ctx, func() error { return err }, "SendChat: convID: %s", convID)()
uid, err := utils.AssertLoggedInUID(ctx, m.G())
if err != nil {
return err
}
conv, err := utils.GetVerifiedConv(ctx, m.G(), uid, convID, types.InboxSourceDataSourceAll)
if err != nil {
return err
}
outboxID, err := storage.NewOutboxID()
if err != nil {
return err
}
m.registerSentOutboxID(ctx, gameID, outboxID)
return m.sendNonblock(ctx, convID, msg.String(), conv.Info.TlfName, outboxID, gameID,
chat1.TopicType_DEV)
} | [
"func",
"(",
"m",
"*",
"FlipManager",
")",
"SendChat",
"(",
"ctx",
"context",
".",
"Context",
",",
"convID",
"chat1",
".",
"ConversationID",
",",
"gameID",
"chat1",
".",
"FlipGameID",
",",
"msg",
"flip",
".",
"GameMessageEncoded",
")",
"(",
"err",
"error",
")",
"{",
"ctx",
"=",
"globals",
".",
"ChatCtx",
"(",
"ctx",
",",
"m",
".",
"G",
"(",
")",
",",
"keybase1",
".",
"TLFIdentifyBehavior_CHAT_SKIP",
",",
"nil",
",",
"nil",
")",
"\n",
"defer",
"m",
".",
"Trace",
"(",
"ctx",
",",
"func",
"(",
")",
"error",
"{",
"return",
"err",
"}",
",",
"\"",
"\"",
",",
"convID",
")",
"(",
")",
"\n",
"uid",
",",
"err",
":=",
"utils",
".",
"AssertLoggedInUID",
"(",
"ctx",
",",
"m",
".",
"G",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"conv",
",",
"err",
":=",
"utils",
".",
"GetVerifiedConv",
"(",
"ctx",
",",
"m",
".",
"G",
"(",
")",
",",
"uid",
",",
"convID",
",",
"types",
".",
"InboxSourceDataSourceAll",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"outboxID",
",",
"err",
":=",
"storage",
".",
"NewOutboxID",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"m",
".",
"registerSentOutboxID",
"(",
"ctx",
",",
"gameID",
",",
"outboxID",
")",
"\n",
"return",
"m",
".",
"sendNonblock",
"(",
"ctx",
",",
"convID",
",",
"msg",
".",
"String",
"(",
")",
",",
"conv",
".",
"Info",
".",
"TlfName",
",",
"outboxID",
",",
"gameID",
",",
"chat1",
".",
"TopicType_DEV",
")",
"\n",
"}"
] | // SendChat implements the flip.DealersHelper interface | [
"SendChat",
"implements",
"the",
"flip",
".",
"DealersHelper",
"interface"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/flipmanager.go#L1490-L1509 |
160,575 | keybase/client | go/chat/flipmanager.go | Me | func (m *FlipManager) Me() flip.UserDevice {
ad := m.G().ActiveDevice
did := ad.DeviceID()
hdid := make([]byte, libkb.DeviceIDLen)
if err := did.ToBytes(hdid); err != nil {
return flip.UserDevice{}
}
return flip.UserDevice{
U: gregor1.UID(ad.UID().ToBytes()),
D: gregor1.DeviceID(hdid),
}
} | go | func (m *FlipManager) Me() flip.UserDevice {
ad := m.G().ActiveDevice
did := ad.DeviceID()
hdid := make([]byte, libkb.DeviceIDLen)
if err := did.ToBytes(hdid); err != nil {
return flip.UserDevice{}
}
return flip.UserDevice{
U: gregor1.UID(ad.UID().ToBytes()),
D: gregor1.DeviceID(hdid),
}
} | [
"func",
"(",
"m",
"*",
"FlipManager",
")",
"Me",
"(",
")",
"flip",
".",
"UserDevice",
"{",
"ad",
":=",
"m",
".",
"G",
"(",
")",
".",
"ActiveDevice",
"\n",
"did",
":=",
"ad",
".",
"DeviceID",
"(",
")",
"\n",
"hdid",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"libkb",
".",
"DeviceIDLen",
")",
"\n",
"if",
"err",
":=",
"did",
".",
"ToBytes",
"(",
"hdid",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"flip",
".",
"UserDevice",
"{",
"}",
"\n",
"}",
"\n",
"return",
"flip",
".",
"UserDevice",
"{",
"U",
":",
"gregor1",
".",
"UID",
"(",
"ad",
".",
"UID",
"(",
")",
".",
"ToBytes",
"(",
")",
")",
",",
"D",
":",
"gregor1",
".",
"DeviceID",
"(",
"hdid",
")",
",",
"}",
"\n",
"}"
] | // Me implements the flip.DealersHelper interface | [
"Me",
"implements",
"the",
"flip",
".",
"DealersHelper",
"interface"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/chat/flipmanager.go#L1512-L1523 |
160,576 | keybase/client | go/install/fuse_status_darwin.go | KeybaseFuseStatus | func KeybaseFuseStatus(bundleVersion string, log Log) keybase1.FuseStatus {
st := keybase1.FuseStatus{
BundleVersion: bundleVersion,
InstallStatus: keybase1.InstallStatus_UNKNOWN,
InstallAction: keybase1.InstallAction_UNKNOWN,
}
var kextInfo *kext.Info
if _, err := os.Stat(installPath); err == nil {
st.Path = installPath
kextID := "com.github.kbfuse.filesystems.kbfuse"
var loadErr error
kextInfo, loadErr = kext.LoadInfo(kextID)
if loadErr != nil {
st.InstallStatus = keybase1.InstallStatus_ERROR
st.InstallAction = keybase1.InstallAction_REINSTALL
st.Status = keybase1.Status{Code: libkb.SCGeneric, Name: "INSTALL_ERROR", Desc: fmt.Sprintf("Error loading kext info: %s", loadErr)}
return st
}
if kextInfo == nil {
log.Debug("No kext info available (kext not loaded)")
// This means the kext isn't loaded, which is ok, kbfs will call
// load_kbfuse when it starts up.
// We have to get the version from the installed plist.
installedVersion, fivErr := fuseInstallVersion(log)
if fivErr != nil {
st.InstallStatus = keybase1.InstallStatus_ERROR
st.InstallAction = keybase1.InstallAction_REINSTALL
st.Status = keybase1.Status{Code: libkb.SCGeneric, Name: "INSTALL_ERROR", Desc: fmt.Sprintf("Error loading (plist) info: %s", fivErr)}
return st
}
if installedVersion != "" {
kextInfo = &kext.Info{
Version: installedVersion,
Started: false,
}
}
}
// Installed
st.KextID = kextID
}
// If neither is found, we have no install
if st.KextID == "" || kextInfo == nil {
st.InstallStatus = keybase1.InstallStatus_NOT_INSTALLED
st.InstallAction = keybase1.InstallAction_INSTALL
return st
}
// Try to get mount info, it's non-critical if we fail though.
mountInfos, err := mountInfo("kbfuse")
if err != nil {
log.Errorf("Error trying to read mount info: %s", err)
}
st.MountInfos = mountInfos
st.Version = kextInfo.Version
st.KextStarted = kextInfo.Started
installStatus, installAction, status := ResolveInstallStatus(st.Version, st.BundleVersion, "", log)
st.InstallStatus = installStatus
st.InstallAction = installAction
st.Status = status
return st
} | go | func KeybaseFuseStatus(bundleVersion string, log Log) keybase1.FuseStatus {
st := keybase1.FuseStatus{
BundleVersion: bundleVersion,
InstallStatus: keybase1.InstallStatus_UNKNOWN,
InstallAction: keybase1.InstallAction_UNKNOWN,
}
var kextInfo *kext.Info
if _, err := os.Stat(installPath); err == nil {
st.Path = installPath
kextID := "com.github.kbfuse.filesystems.kbfuse"
var loadErr error
kextInfo, loadErr = kext.LoadInfo(kextID)
if loadErr != nil {
st.InstallStatus = keybase1.InstallStatus_ERROR
st.InstallAction = keybase1.InstallAction_REINSTALL
st.Status = keybase1.Status{Code: libkb.SCGeneric, Name: "INSTALL_ERROR", Desc: fmt.Sprintf("Error loading kext info: %s", loadErr)}
return st
}
if kextInfo == nil {
log.Debug("No kext info available (kext not loaded)")
// This means the kext isn't loaded, which is ok, kbfs will call
// load_kbfuse when it starts up.
// We have to get the version from the installed plist.
installedVersion, fivErr := fuseInstallVersion(log)
if fivErr != nil {
st.InstallStatus = keybase1.InstallStatus_ERROR
st.InstallAction = keybase1.InstallAction_REINSTALL
st.Status = keybase1.Status{Code: libkb.SCGeneric, Name: "INSTALL_ERROR", Desc: fmt.Sprintf("Error loading (plist) info: %s", fivErr)}
return st
}
if installedVersion != "" {
kextInfo = &kext.Info{
Version: installedVersion,
Started: false,
}
}
}
// Installed
st.KextID = kextID
}
// If neither is found, we have no install
if st.KextID == "" || kextInfo == nil {
st.InstallStatus = keybase1.InstallStatus_NOT_INSTALLED
st.InstallAction = keybase1.InstallAction_INSTALL
return st
}
// Try to get mount info, it's non-critical if we fail though.
mountInfos, err := mountInfo("kbfuse")
if err != nil {
log.Errorf("Error trying to read mount info: %s", err)
}
st.MountInfos = mountInfos
st.Version = kextInfo.Version
st.KextStarted = kextInfo.Started
installStatus, installAction, status := ResolveInstallStatus(st.Version, st.BundleVersion, "", log)
st.InstallStatus = installStatus
st.InstallAction = installAction
st.Status = status
return st
} | [
"func",
"KeybaseFuseStatus",
"(",
"bundleVersion",
"string",
",",
"log",
"Log",
")",
"keybase1",
".",
"FuseStatus",
"{",
"st",
":=",
"keybase1",
".",
"FuseStatus",
"{",
"BundleVersion",
":",
"bundleVersion",
",",
"InstallStatus",
":",
"keybase1",
".",
"InstallStatus_UNKNOWN",
",",
"InstallAction",
":",
"keybase1",
".",
"InstallAction_UNKNOWN",
",",
"}",
"\n\n",
"var",
"kextInfo",
"*",
"kext",
".",
"Info",
"\n\n",
"if",
"_",
",",
"err",
":=",
"os",
".",
"Stat",
"(",
"installPath",
")",
";",
"err",
"==",
"nil",
"{",
"st",
".",
"Path",
"=",
"installPath",
"\n",
"kextID",
":=",
"\"",
"\"",
"\n",
"var",
"loadErr",
"error",
"\n",
"kextInfo",
",",
"loadErr",
"=",
"kext",
".",
"LoadInfo",
"(",
"kextID",
")",
"\n",
"if",
"loadErr",
"!=",
"nil",
"{",
"st",
".",
"InstallStatus",
"=",
"keybase1",
".",
"InstallStatus_ERROR",
"\n",
"st",
".",
"InstallAction",
"=",
"keybase1",
".",
"InstallAction_REINSTALL",
"\n",
"st",
".",
"Status",
"=",
"keybase1",
".",
"Status",
"{",
"Code",
":",
"libkb",
".",
"SCGeneric",
",",
"Name",
":",
"\"",
"\"",
",",
"Desc",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"loadErr",
")",
"}",
"\n",
"return",
"st",
"\n",
"}",
"\n",
"if",
"kextInfo",
"==",
"nil",
"{",
"log",
".",
"Debug",
"(",
"\"",
"\"",
")",
"\n",
"// This means the kext isn't loaded, which is ok, kbfs will call",
"// load_kbfuse when it starts up.",
"// We have to get the version from the installed plist.",
"installedVersion",
",",
"fivErr",
":=",
"fuseInstallVersion",
"(",
"log",
")",
"\n",
"if",
"fivErr",
"!=",
"nil",
"{",
"st",
".",
"InstallStatus",
"=",
"keybase1",
".",
"InstallStatus_ERROR",
"\n",
"st",
".",
"InstallAction",
"=",
"keybase1",
".",
"InstallAction_REINSTALL",
"\n",
"st",
".",
"Status",
"=",
"keybase1",
".",
"Status",
"{",
"Code",
":",
"libkb",
".",
"SCGeneric",
",",
"Name",
":",
"\"",
"\"",
",",
"Desc",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"fivErr",
")",
"}",
"\n",
"return",
"st",
"\n",
"}",
"\n",
"if",
"installedVersion",
"!=",
"\"",
"\"",
"{",
"kextInfo",
"=",
"&",
"kext",
".",
"Info",
"{",
"Version",
":",
"installedVersion",
",",
"Started",
":",
"false",
",",
"}",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Installed",
"st",
".",
"KextID",
"=",
"kextID",
"\n",
"}",
"\n\n",
"// If neither is found, we have no install",
"if",
"st",
".",
"KextID",
"==",
"\"",
"\"",
"||",
"kextInfo",
"==",
"nil",
"{",
"st",
".",
"InstallStatus",
"=",
"keybase1",
".",
"InstallStatus_NOT_INSTALLED",
"\n",
"st",
".",
"InstallAction",
"=",
"keybase1",
".",
"InstallAction_INSTALL",
"\n",
"return",
"st",
"\n",
"}",
"\n\n",
"// Try to get mount info, it's non-critical if we fail though.",
"mountInfos",
",",
"err",
":=",
"mountInfo",
"(",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"st",
".",
"MountInfos",
"=",
"mountInfos",
"\n\n",
"st",
".",
"Version",
"=",
"kextInfo",
".",
"Version",
"\n",
"st",
".",
"KextStarted",
"=",
"kextInfo",
".",
"Started",
"\n\n",
"installStatus",
",",
"installAction",
",",
"status",
":=",
"ResolveInstallStatus",
"(",
"st",
".",
"Version",
",",
"st",
".",
"BundleVersion",
",",
"\"",
"\"",
",",
"log",
")",
"\n",
"st",
".",
"InstallStatus",
"=",
"installStatus",
"\n",
"st",
".",
"InstallAction",
"=",
"installAction",
"\n",
"st",
".",
"Status",
"=",
"status",
"\n\n",
"return",
"st",
"\n",
"}"
] | // KeybaseFuseStatus returns Fuse status | [
"KeybaseFuseStatus",
"returns",
"Fuse",
"status"
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/install/fuse_status_darwin.go#L25-L92 |
160,577 | keybase/client | go/kbfs/libfs/file_info.go | Mode | func (fi *FileInfo) Mode() os.FileMode {
mode, err := WritePermMode(
fi.fs.ctx, fi.node, os.FileMode(0), fi.fs.config.KBPKI(),
fi.fs.config, fi.fs.h)
if err != nil {
fi.fs.log.CWarningf(
fi.fs.ctx, "Couldn't get mode for file %s: %+v", fi.Name(), err)
mode = os.FileMode(0)
}
mode |= 0400
switch fi.ei.Type {
case data.Dir:
mode |= os.ModeDir | 0100
case data.Sym:
mode |= os.ModeSymlink
case data.Exec:
mode |= 0100
}
return mode
} | go | func (fi *FileInfo) Mode() os.FileMode {
mode, err := WritePermMode(
fi.fs.ctx, fi.node, os.FileMode(0), fi.fs.config.KBPKI(),
fi.fs.config, fi.fs.h)
if err != nil {
fi.fs.log.CWarningf(
fi.fs.ctx, "Couldn't get mode for file %s: %+v", fi.Name(), err)
mode = os.FileMode(0)
}
mode |= 0400
switch fi.ei.Type {
case data.Dir:
mode |= os.ModeDir | 0100
case data.Sym:
mode |= os.ModeSymlink
case data.Exec:
mode |= 0100
}
return mode
} | [
"func",
"(",
"fi",
"*",
"FileInfo",
")",
"Mode",
"(",
")",
"os",
".",
"FileMode",
"{",
"mode",
",",
"err",
":=",
"WritePermMode",
"(",
"fi",
".",
"fs",
".",
"ctx",
",",
"fi",
".",
"node",
",",
"os",
".",
"FileMode",
"(",
"0",
")",
",",
"fi",
".",
"fs",
".",
"config",
".",
"KBPKI",
"(",
")",
",",
"fi",
".",
"fs",
".",
"config",
",",
"fi",
".",
"fs",
".",
"h",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"fi",
".",
"fs",
".",
"log",
".",
"CWarningf",
"(",
"fi",
".",
"fs",
".",
"ctx",
",",
"\"",
"\"",
",",
"fi",
".",
"Name",
"(",
")",
",",
"err",
")",
"\n",
"mode",
"=",
"os",
".",
"FileMode",
"(",
"0",
")",
"\n",
"}",
"\n\n",
"mode",
"|=",
"0400",
"\n",
"switch",
"fi",
".",
"ei",
".",
"Type",
"{",
"case",
"data",
".",
"Dir",
":",
"mode",
"|=",
"os",
".",
"ModeDir",
"|",
"0100",
"\n",
"case",
"data",
".",
"Sym",
":",
"mode",
"|=",
"os",
".",
"ModeSymlink",
"\n",
"case",
"data",
".",
"Exec",
":",
"mode",
"|=",
"0100",
"\n",
"}",
"\n",
"return",
"mode",
"\n",
"}"
] | // Mode implements the os.FileInfo interface for FileInfo. | [
"Mode",
"implements",
"the",
"os",
".",
"FileInfo",
"interface",
"for",
"FileInfo",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/file_info.go#L46-L66 |
160,578 | keybase/client | go/kbfs/libfs/file_info.go | ModTime | func (fi *FileInfo) ModTime() time.Time {
return time.Unix(0, fi.ei.Mtime)
} | go | func (fi *FileInfo) ModTime() time.Time {
return time.Unix(0, fi.ei.Mtime)
} | [
"func",
"(",
"fi",
"*",
"FileInfo",
")",
"ModTime",
"(",
")",
"time",
".",
"Time",
"{",
"return",
"time",
".",
"Unix",
"(",
"0",
",",
"fi",
".",
"ei",
".",
"Mtime",
")",
"\n",
"}"
] | // ModTime implements the os.FileInfo interface for FileInfo. | [
"ModTime",
"implements",
"the",
"os",
".",
"FileInfo",
"interface",
"for",
"FileInfo",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/file_info.go#L69-L71 |
160,579 | keybase/client | go/kbfs/libfs/file_info.go | Mode | func (fif *FileInfoFast) Mode() os.FileMode {
mode := os.FileMode(0400)
switch fif.ei.Type {
case data.Dir:
mode |= os.ModeDir | 0100
case data.Sym:
mode |= os.ModeSymlink
case data.Exec:
mode |= 0100
}
return mode
} | go | func (fif *FileInfoFast) Mode() os.FileMode {
mode := os.FileMode(0400)
switch fif.ei.Type {
case data.Dir:
mode |= os.ModeDir | 0100
case data.Sym:
mode |= os.ModeSymlink
case data.Exec:
mode |= 0100
}
return mode
} | [
"func",
"(",
"fif",
"*",
"FileInfoFast",
")",
"Mode",
"(",
")",
"os",
".",
"FileMode",
"{",
"mode",
":=",
"os",
".",
"FileMode",
"(",
"0400",
")",
"\n",
"switch",
"fif",
".",
"ei",
".",
"Type",
"{",
"case",
"data",
".",
"Dir",
":",
"mode",
"|=",
"os",
".",
"ModeDir",
"|",
"0100",
"\n",
"case",
"data",
".",
"Sym",
":",
"mode",
"|=",
"os",
".",
"ModeSymlink",
"\n",
"case",
"data",
".",
"Exec",
":",
"mode",
"|=",
"0100",
"\n",
"}",
"\n",
"return",
"mode",
"\n",
"}"
] | // Mode implements the os.FileInfo interface. | [
"Mode",
"implements",
"the",
"os",
".",
"FileInfo",
"interface",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/file_info.go#L185-L196 |
160,580 | keybase/client | go/kbfs/libfs/file_info.go | ModTime | func (fif *FileInfoFast) ModTime() time.Time {
return time.Unix(0, fif.ei.Mtime)
} | go | func (fif *FileInfoFast) ModTime() time.Time {
return time.Unix(0, fif.ei.Mtime)
} | [
"func",
"(",
"fif",
"*",
"FileInfoFast",
")",
"ModTime",
"(",
")",
"time",
".",
"Time",
"{",
"return",
"time",
".",
"Unix",
"(",
"0",
",",
"fif",
".",
"ei",
".",
"Mtime",
")",
"\n",
"}"
] | // ModTime implements the os.FileInfo interface. | [
"ModTime",
"implements",
"the",
"os",
".",
"FileInfo",
"interface",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/file_info.go#L199-L201 |
160,581 | keybase/client | go/kbfs/libfs/file_info.go | EnableFastMode | func EnableFastMode(ctx context.Context) context.Context {
return context.WithValue(ctx, ctxFastModeKey{}, true)
} | go | func EnableFastMode(ctx context.Context) context.Context {
return context.WithValue(ctx, ctxFastModeKey{}, true)
} | [
"func",
"EnableFastMode",
"(",
"ctx",
"context",
".",
"Context",
")",
"context",
".",
"Context",
"{",
"return",
"context",
".",
"WithValue",
"(",
"ctx",
",",
"ctxFastModeKey",
"{",
"}",
",",
"true",
")",
"\n",
"}"
] | // EnableFastMode returns a context.Context based on ctx that will test to true
// with IsFastModeEnabled. | [
"EnableFastMode",
"returns",
"a",
"context",
".",
"Context",
"based",
"on",
"ctx",
"that",
"will",
"test",
"to",
"true",
"with",
"IsFastModeEnabled",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libfs/file_info.go#L217-L219 |
160,582 | keybase/client | go/kbfs/kbfsgit/runner.go | getElapsedStr | func (r *runner) getElapsedStr(
ctx context.Context, startTime time.Time, profName string,
cpuProfFullPath string) string {
if r.verbosity < 2 {
return ""
}
elapsed := r.config.Clock().Now().Sub(startTime)
elapsedStr := fmt.Sprintf(" [%s]", elapsed)
if r.verbosity >= 3 {
profName = filepath.Join(os.TempDir(), profName)
f, err := os.Create(profName)
if err != nil {
r.log.CDebugf(ctx, err.Error())
} else {
runtime.GC()
pprof.WriteHeapProfile(f)
f.Close()
}
elapsedStr += " [memprof " + profName + "]"
}
if cpuProfFullPath != "" {
pprof.StopCPUProfile()
elapsedStr += " [cpuprof " + cpuProfFullPath + "]"
}
return elapsedStr
} | go | func (r *runner) getElapsedStr(
ctx context.Context, startTime time.Time, profName string,
cpuProfFullPath string) string {
if r.verbosity < 2 {
return ""
}
elapsed := r.config.Clock().Now().Sub(startTime)
elapsedStr := fmt.Sprintf(" [%s]", elapsed)
if r.verbosity >= 3 {
profName = filepath.Join(os.TempDir(), profName)
f, err := os.Create(profName)
if err != nil {
r.log.CDebugf(ctx, err.Error())
} else {
runtime.GC()
pprof.WriteHeapProfile(f)
f.Close()
}
elapsedStr += " [memprof " + profName + "]"
}
if cpuProfFullPath != "" {
pprof.StopCPUProfile()
elapsedStr += " [cpuprof " + cpuProfFullPath + "]"
}
return elapsedStr
} | [
"func",
"(",
"r",
"*",
"runner",
")",
"getElapsedStr",
"(",
"ctx",
"context",
".",
"Context",
",",
"startTime",
"time",
".",
"Time",
",",
"profName",
"string",
",",
"cpuProfFullPath",
"string",
")",
"string",
"{",
"if",
"r",
".",
"verbosity",
"<",
"2",
"{",
"return",
"\"",
"\"",
"\n",
"}",
"\n",
"elapsed",
":=",
"r",
".",
"config",
".",
"Clock",
"(",
")",
".",
"Now",
"(",
")",
".",
"Sub",
"(",
"startTime",
")",
"\n",
"elapsedStr",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"elapsed",
")",
"\n\n",
"if",
"r",
".",
"verbosity",
">=",
"3",
"{",
"profName",
"=",
"filepath",
".",
"Join",
"(",
"os",
".",
"TempDir",
"(",
")",
",",
"profName",
")",
"\n",
"f",
",",
"err",
":=",
"os",
".",
"Create",
"(",
"profName",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"else",
"{",
"runtime",
".",
"GC",
"(",
")",
"\n",
"pprof",
".",
"WriteHeapProfile",
"(",
"f",
")",
"\n",
"f",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"elapsedStr",
"+=",
"\"",
"\"",
"+",
"profName",
"+",
"\"",
"\"",
"\n",
"}",
"\n\n",
"if",
"cpuProfFullPath",
"!=",
"\"",
"\"",
"{",
"pprof",
".",
"StopCPUProfile",
"(",
")",
"\n",
"elapsedStr",
"+=",
"\"",
"\"",
"+",
"cpuProfFullPath",
"+",
"\"",
"\"",
"\n",
"}",
"\n\n",
"return",
"elapsedStr",
"\n",
"}"
] | // getElapsedStr gets an additional string to append to the errput
// message at the end of a phase. It includes the measured time of
// the phase, and if verbosity is high enough, it includes the
// location of a memory profile taken at the end of the phase. | [
"getElapsedStr",
"gets",
"an",
"additional",
"string",
"to",
"append",
"to",
"the",
"errput",
"message",
"at",
"the",
"end",
"of",
"a",
"phase",
".",
"It",
"includes",
"the",
"measured",
"time",
"of",
"the",
"phase",
"and",
"if",
"verbosity",
"is",
"high",
"enough",
"it",
"includes",
"the",
"location",
"of",
"a",
"memory",
"profile",
"taken",
"at",
"the",
"end",
"of",
"the",
"phase",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsgit/runner.go#L220-L248 |
160,583 | keybase/client | go/kbfs/kbfsgit/runner.go | printStageEndIfNeeded | func (r *runner) printStageEndIfNeeded(ctx context.Context) {
r.printStageLock.Lock()
defer r.printStageLock.Unlock()
// go-git grabs the lock right after plumbing.StatusIndexOffset, but before
// sending the Done status update. As a result, it would look like we are
// flushing the journal before plumbing.StatusIndexOffset is done. So
// instead, print "done." only if it's not printed yet.
if r.needPrintDone {
elapsedStr := r.getElapsedStr(ctx,
r.stageStartTime, r.stageMemProfName, r.stageCPUProfPath)
r.errput.Write([]byte("done." + elapsedStr + "\n"))
r.needPrintDone = false
}
} | go | func (r *runner) printStageEndIfNeeded(ctx context.Context) {
r.printStageLock.Lock()
defer r.printStageLock.Unlock()
// go-git grabs the lock right after plumbing.StatusIndexOffset, but before
// sending the Done status update. As a result, it would look like we are
// flushing the journal before plumbing.StatusIndexOffset is done. So
// instead, print "done." only if it's not printed yet.
if r.needPrintDone {
elapsedStr := r.getElapsedStr(ctx,
r.stageStartTime, r.stageMemProfName, r.stageCPUProfPath)
r.errput.Write([]byte("done." + elapsedStr + "\n"))
r.needPrintDone = false
}
} | [
"func",
"(",
"r",
"*",
"runner",
")",
"printStageEndIfNeeded",
"(",
"ctx",
"context",
".",
"Context",
")",
"{",
"r",
".",
"printStageLock",
".",
"Lock",
"(",
")",
"\n",
"defer",
"r",
".",
"printStageLock",
".",
"Unlock",
"(",
")",
"\n",
"// go-git grabs the lock right after plumbing.StatusIndexOffset, but before",
"// sending the Done status update. As a result, it would look like we are",
"// flushing the journal before plumbing.StatusIndexOffset is done. So",
"// instead, print \"done.\" only if it's not printed yet.",
"if",
"r",
".",
"needPrintDone",
"{",
"elapsedStr",
":=",
"r",
".",
"getElapsedStr",
"(",
"ctx",
",",
"r",
".",
"stageStartTime",
",",
"r",
".",
"stageMemProfName",
",",
"r",
".",
"stageCPUProfPath",
")",
"\n",
"r",
".",
"errput",
".",
"Write",
"(",
"[",
"]",
"byte",
"(",
"\"",
"\"",
"+",
"elapsedStr",
"+",
"\"",
"\\n",
"\"",
")",
")",
"\n",
"r",
".",
"needPrintDone",
"=",
"false",
"\n",
"}",
"\n",
"}"
] | // printStageEndIfNeeded should only be used to end stages started with
// printStageStart. | [
"printStageEndIfNeeded",
"should",
"only",
"be",
"used",
"to",
"end",
"stages",
"started",
"with",
"printStageStart",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsgit/runner.go#L405-L418 |
160,584 | keybase/client | go/kbfs/kbfsgit/runner.go | printJournalStatus | func (r *runner) printJournalStatus(
ctx context.Context, jManager *libkbfs.JournalManager, tlfID tlf.ID,
doneCh <-chan struct{}) {
r.printStageEndIfNeeded(ctx)
// Note: the "first" status here gets us the number of unflushed
// bytes left at the time we started printing. However, we don't
// have the total number of bytes being flushed to the server
// throughout the whole operation, which would be more
// informative. It would be better to have that as the
// denominator, but there's no easy way to get it right now.
firstStatus, err := jManager.JournalStatus(tlfID)
if err != nil {
r.log.CDebugf(ctx, "Error getting status: %+v", err)
return
}
if firstStatus.UnflushedBytes == 0 {
return
}
adj := "encrypted"
if r.h.Type() == tlf.Public {
adj = "signed"
}
if r.verbosity >= 1 {
r.printStageStart(ctx,
[]byte(fmt.Sprintf("Syncing %s data to Keybase: ", adj)),
"mem.flush.prof", "")
}
r.log.CDebugf(ctx, "Waiting for %d journal bytes to flush",
firstStatus.UnflushedBytes)
bytesFmt := "(%.2f%%) %s... "
str := fmt.Sprintf(
bytesFmt, float64(0), humanizeBytes(0, firstStatus.UnflushedBytes))
lastByteCount := len(str)
if r.progress {
r.errput.Write([]byte(str))
}
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
status, err := jManager.JournalStatus(tlfID)
if err != nil {
r.log.CDebugf(ctx, "Error getting status: %+v", err)
return
}
if r.verbosity >= 1 && r.progress {
eraseStr := strings.Repeat("\b", lastByteCount)
flushed := firstStatus.UnflushedBytes - status.UnflushedBytes
str := fmt.Sprintf(
bytesFmt, percent(flushed, firstStatus.UnflushedBytes),
humanizeBytes(flushed, firstStatus.UnflushedBytes))
lastByteCount = len(str)
r.errput.Write([]byte(eraseStr + str))
}
case <-doneCh:
if r.verbosity >= 1 && r.progress {
eraseStr := strings.Repeat("\b", lastByteCount)
// doneCh is closed. So assume journal flushing is done and
// take the shortcut.
flushed := firstStatus.UnflushedBytes
str := fmt.Sprintf(
bytesFmt, percent(flushed, firstStatus.UnflushedBytes),
humanizeBytes(flushed, firstStatus.UnflushedBytes))
lastByteCount = len(str)
r.errput.Write([]byte(eraseStr + str))
}
if r.verbosity >= 1 {
r.printStageEndIfNeeded(ctx)
}
return
}
}
} | go | func (r *runner) printJournalStatus(
ctx context.Context, jManager *libkbfs.JournalManager, tlfID tlf.ID,
doneCh <-chan struct{}) {
r.printStageEndIfNeeded(ctx)
// Note: the "first" status here gets us the number of unflushed
// bytes left at the time we started printing. However, we don't
// have the total number of bytes being flushed to the server
// throughout the whole operation, which would be more
// informative. It would be better to have that as the
// denominator, but there's no easy way to get it right now.
firstStatus, err := jManager.JournalStatus(tlfID)
if err != nil {
r.log.CDebugf(ctx, "Error getting status: %+v", err)
return
}
if firstStatus.UnflushedBytes == 0 {
return
}
adj := "encrypted"
if r.h.Type() == tlf.Public {
adj = "signed"
}
if r.verbosity >= 1 {
r.printStageStart(ctx,
[]byte(fmt.Sprintf("Syncing %s data to Keybase: ", adj)),
"mem.flush.prof", "")
}
r.log.CDebugf(ctx, "Waiting for %d journal bytes to flush",
firstStatus.UnflushedBytes)
bytesFmt := "(%.2f%%) %s... "
str := fmt.Sprintf(
bytesFmt, float64(0), humanizeBytes(0, firstStatus.UnflushedBytes))
lastByteCount := len(str)
if r.progress {
r.errput.Write([]byte(str))
}
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
status, err := jManager.JournalStatus(tlfID)
if err != nil {
r.log.CDebugf(ctx, "Error getting status: %+v", err)
return
}
if r.verbosity >= 1 && r.progress {
eraseStr := strings.Repeat("\b", lastByteCount)
flushed := firstStatus.UnflushedBytes - status.UnflushedBytes
str := fmt.Sprintf(
bytesFmt, percent(flushed, firstStatus.UnflushedBytes),
humanizeBytes(flushed, firstStatus.UnflushedBytes))
lastByteCount = len(str)
r.errput.Write([]byte(eraseStr + str))
}
case <-doneCh:
if r.verbosity >= 1 && r.progress {
eraseStr := strings.Repeat("\b", lastByteCount)
// doneCh is closed. So assume journal flushing is done and
// take the shortcut.
flushed := firstStatus.UnflushedBytes
str := fmt.Sprintf(
bytesFmt, percent(flushed, firstStatus.UnflushedBytes),
humanizeBytes(flushed, firstStatus.UnflushedBytes))
lastByteCount = len(str)
r.errput.Write([]byte(eraseStr + str))
}
if r.verbosity >= 1 {
r.printStageEndIfNeeded(ctx)
}
return
}
}
} | [
"func",
"(",
"r",
"*",
"runner",
")",
"printJournalStatus",
"(",
"ctx",
"context",
".",
"Context",
",",
"jManager",
"*",
"libkbfs",
".",
"JournalManager",
",",
"tlfID",
"tlf",
".",
"ID",
",",
"doneCh",
"<-",
"chan",
"struct",
"{",
"}",
")",
"{",
"r",
".",
"printStageEndIfNeeded",
"(",
"ctx",
")",
"\n",
"// Note: the \"first\" status here gets us the number of unflushed",
"// bytes left at the time we started printing. However, we don't",
"// have the total number of bytes being flushed to the server",
"// throughout the whole operation, which would be more",
"// informative. It would be better to have that as the",
"// denominator, but there's no easy way to get it right now.",
"firstStatus",
",",
"err",
":=",
"jManager",
".",
"JournalStatus",
"(",
"tlfID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"firstStatus",
".",
"UnflushedBytes",
"==",
"0",
"{",
"return",
"\n",
"}",
"\n",
"adj",
":=",
"\"",
"\"",
"\n",
"if",
"r",
".",
"h",
".",
"Type",
"(",
")",
"==",
"tlf",
".",
"Public",
"{",
"adj",
"=",
"\"",
"\"",
"\n",
"}",
"\n",
"if",
"r",
".",
"verbosity",
">=",
"1",
"{",
"r",
".",
"printStageStart",
"(",
"ctx",
",",
"[",
"]",
"byte",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"adj",
")",
")",
",",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"firstStatus",
".",
"UnflushedBytes",
")",
"\n\n",
"bytesFmt",
":=",
"\"",
"\"",
"\n",
"str",
":=",
"fmt",
".",
"Sprintf",
"(",
"bytesFmt",
",",
"float64",
"(",
"0",
")",
",",
"humanizeBytes",
"(",
"0",
",",
"firstStatus",
".",
"UnflushedBytes",
")",
")",
"\n",
"lastByteCount",
":=",
"len",
"(",
"str",
")",
"\n",
"if",
"r",
".",
"progress",
"{",
"r",
".",
"errput",
".",
"Write",
"(",
"[",
"]",
"byte",
"(",
"str",
")",
")",
"\n",
"}",
"\n\n",
"ticker",
":=",
"time",
".",
"NewTicker",
"(",
"1",
"*",
"time",
".",
"Second",
")",
"\n",
"defer",
"ticker",
".",
"Stop",
"(",
")",
"\n",
"for",
"{",
"select",
"{",
"case",
"<-",
"ticker",
".",
"C",
":",
"status",
",",
"err",
":=",
"jManager",
".",
"JournalStatus",
"(",
"tlfID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"r",
".",
"verbosity",
">=",
"1",
"&&",
"r",
".",
"progress",
"{",
"eraseStr",
":=",
"strings",
".",
"Repeat",
"(",
"\"",
"\\b",
"\"",
",",
"lastByteCount",
")",
"\n",
"flushed",
":=",
"firstStatus",
".",
"UnflushedBytes",
"-",
"status",
".",
"UnflushedBytes",
"\n",
"str",
":=",
"fmt",
".",
"Sprintf",
"(",
"bytesFmt",
",",
"percent",
"(",
"flushed",
",",
"firstStatus",
".",
"UnflushedBytes",
")",
",",
"humanizeBytes",
"(",
"flushed",
",",
"firstStatus",
".",
"UnflushedBytes",
")",
")",
"\n",
"lastByteCount",
"=",
"len",
"(",
"str",
")",
"\n",
"r",
".",
"errput",
".",
"Write",
"(",
"[",
"]",
"byte",
"(",
"eraseStr",
"+",
"str",
")",
")",
"\n",
"}",
"\n",
"case",
"<-",
"doneCh",
":",
"if",
"r",
".",
"verbosity",
">=",
"1",
"&&",
"r",
".",
"progress",
"{",
"eraseStr",
":=",
"strings",
".",
"Repeat",
"(",
"\"",
"\\b",
"\"",
",",
"lastByteCount",
")",
"\n",
"// doneCh is closed. So assume journal flushing is done and",
"// take the shortcut.",
"flushed",
":=",
"firstStatus",
".",
"UnflushedBytes",
"\n",
"str",
":=",
"fmt",
".",
"Sprintf",
"(",
"bytesFmt",
",",
"percent",
"(",
"flushed",
",",
"firstStatus",
".",
"UnflushedBytes",
")",
",",
"humanizeBytes",
"(",
"flushed",
",",
"firstStatus",
".",
"UnflushedBytes",
")",
")",
"\n",
"lastByteCount",
"=",
"len",
"(",
"str",
")",
"\n",
"r",
".",
"errput",
".",
"Write",
"(",
"[",
"]",
"byte",
"(",
"eraseStr",
"+",
"str",
")",
")",
"\n",
"}",
"\n\n",
"if",
"r",
".",
"verbosity",
">=",
"1",
"{",
"r",
".",
"printStageEndIfNeeded",
"(",
"ctx",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // caller should make sure doneCh is closed when journal is all flushed. | [
"caller",
"should",
"make",
"sure",
"doneCh",
"is",
"closed",
"when",
"journal",
"is",
"all",
"flushed",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsgit/runner.go#L452-L529 |
160,585 | keybase/client | go/kbfs/kbfsgit/runner.go | recursiveByteCount | func (r *runner) recursiveByteCount(
ctx context.Context, fs billy.Filesystem, totalSoFar int64, toErase int) (
bytes int64, toEraseRet int, err error) {
fileInfos, err := fs.ReadDir("/")
if err != nil {
return 0, 0, err
}
for _, fi := range fileInfos {
if fi.IsDir() {
if fi.Name() == "." {
continue
}
chrootFS, err := fs.Chroot(fi.Name())
if err != nil {
return 0, 0, err
}
var chrootBytes int64
chrootBytes, toErase, err = r.recursiveByteCount(
ctx, chrootFS, totalSoFar+bytes, toErase)
if err != nil {
return 0, 0, err
}
bytes += chrootBytes
} else {
bytes += fi.Size()
if r.progress {
// This function only runs if r.verbosity >= 1.
eraseStr := strings.Repeat("\b", toErase)
newStr := fmt.Sprintf(
"%s... ", humanizeBytes(totalSoFar+bytes, 1))
toErase = len(newStr)
r.errput.Write([]byte(eraseStr + newStr))
}
}
}
return bytes, toErase, nil
} | go | func (r *runner) recursiveByteCount(
ctx context.Context, fs billy.Filesystem, totalSoFar int64, toErase int) (
bytes int64, toEraseRet int, err error) {
fileInfos, err := fs.ReadDir("/")
if err != nil {
return 0, 0, err
}
for _, fi := range fileInfos {
if fi.IsDir() {
if fi.Name() == "." {
continue
}
chrootFS, err := fs.Chroot(fi.Name())
if err != nil {
return 0, 0, err
}
var chrootBytes int64
chrootBytes, toErase, err = r.recursiveByteCount(
ctx, chrootFS, totalSoFar+bytes, toErase)
if err != nil {
return 0, 0, err
}
bytes += chrootBytes
} else {
bytes += fi.Size()
if r.progress {
// This function only runs if r.verbosity >= 1.
eraseStr := strings.Repeat("\b", toErase)
newStr := fmt.Sprintf(
"%s... ", humanizeBytes(totalSoFar+bytes, 1))
toErase = len(newStr)
r.errput.Write([]byte(eraseStr + newStr))
}
}
}
return bytes, toErase, nil
} | [
"func",
"(",
"r",
"*",
"runner",
")",
"recursiveByteCount",
"(",
"ctx",
"context",
".",
"Context",
",",
"fs",
"billy",
".",
"Filesystem",
",",
"totalSoFar",
"int64",
",",
"toErase",
"int",
")",
"(",
"bytes",
"int64",
",",
"toEraseRet",
"int",
",",
"err",
"error",
")",
"{",
"fileInfos",
",",
"err",
":=",
"fs",
".",
"ReadDir",
"(",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"fi",
":=",
"range",
"fileInfos",
"{",
"if",
"fi",
".",
"IsDir",
"(",
")",
"{",
"if",
"fi",
".",
"Name",
"(",
")",
"==",
"\"",
"\"",
"{",
"continue",
"\n",
"}",
"\n",
"chrootFS",
",",
"err",
":=",
"fs",
".",
"Chroot",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"var",
"chrootBytes",
"int64",
"\n",
"chrootBytes",
",",
"toErase",
",",
"err",
"=",
"r",
".",
"recursiveByteCount",
"(",
"ctx",
",",
"chrootFS",
",",
"totalSoFar",
"+",
"bytes",
",",
"toErase",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"bytes",
"+=",
"chrootBytes",
"\n",
"}",
"else",
"{",
"bytes",
"+=",
"fi",
".",
"Size",
"(",
")",
"\n",
"if",
"r",
".",
"progress",
"{",
"// This function only runs if r.verbosity >= 1.",
"eraseStr",
":=",
"strings",
".",
"Repeat",
"(",
"\"",
"\\b",
"\"",
",",
"toErase",
")",
"\n",
"newStr",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"humanizeBytes",
"(",
"totalSoFar",
"+",
"bytes",
",",
"1",
")",
")",
"\n",
"toErase",
"=",
"len",
"(",
"newStr",
")",
"\n",
"r",
".",
"errput",
".",
"Write",
"(",
"[",
"]",
"byte",
"(",
"eraseStr",
"+",
"newStr",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"bytes",
",",
"toErase",
",",
"nil",
"\n",
"}"
] | // recursiveByteCount returns a sum of the size of all files under the
// directory represented by `fs`. It also returns the length of the
// last string it printed to `r.errput` as `toErase`, to aid in
// overwriting the text on the next update. | [
"recursiveByteCount",
"returns",
"a",
"sum",
"of",
"the",
"size",
"of",
"all",
"files",
"under",
"the",
"directory",
"represented",
"by",
"fs",
".",
"It",
"also",
"returns",
"the",
"length",
"of",
"the",
"last",
"string",
"it",
"printed",
"to",
"r",
".",
"errput",
"as",
"toErase",
"to",
"aid",
"in",
"overwriting",
"the",
"text",
"on",
"the",
"next",
"update",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsgit/runner.go#L834-L871 |
160,586 | keybase/client | go/kbfs/kbfsgit/runner.go | recursiveCopy | func (r *runner) recursiveCopy(
ctx context.Context, from billy.Filesystem, to billy.Filesystem,
sw *statusWriter) (err error) {
fileInfos, err := from.ReadDir("")
if err != nil {
return err
}
for _, fi := range fileInfos {
if fi.IsDir() {
if fi.Name() == "." {
continue
}
err := to.MkdirAll(fi.Name(), 0775)
if err != nil {
return err
}
chrootFrom, err := from.Chroot(fi.Name())
if err != nil {
return err
}
chrootTo, err := to.Chroot(fi.Name())
if err != nil {
return err
}
err = r.recursiveCopy(ctx, chrootFrom, chrootTo, sw)
if err != nil {
return err
}
} else {
err := r.copyFile(ctx, from, to, fi.Name(), sw)
if err != nil {
return err
}
}
}
return nil
} | go | func (r *runner) recursiveCopy(
ctx context.Context, from billy.Filesystem, to billy.Filesystem,
sw *statusWriter) (err error) {
fileInfos, err := from.ReadDir("")
if err != nil {
return err
}
for _, fi := range fileInfos {
if fi.IsDir() {
if fi.Name() == "." {
continue
}
err := to.MkdirAll(fi.Name(), 0775)
if err != nil {
return err
}
chrootFrom, err := from.Chroot(fi.Name())
if err != nil {
return err
}
chrootTo, err := to.Chroot(fi.Name())
if err != nil {
return err
}
err = r.recursiveCopy(ctx, chrootFrom, chrootTo, sw)
if err != nil {
return err
}
} else {
err := r.copyFile(ctx, from, to, fi.Name(), sw)
if err != nil {
return err
}
}
}
return nil
} | [
"func",
"(",
"r",
"*",
"runner",
")",
"recursiveCopy",
"(",
"ctx",
"context",
".",
"Context",
",",
"from",
"billy",
".",
"Filesystem",
",",
"to",
"billy",
".",
"Filesystem",
",",
"sw",
"*",
"statusWriter",
")",
"(",
"err",
"error",
")",
"{",
"fileInfos",
",",
"err",
":=",
"from",
".",
"ReadDir",
"(",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"for",
"_",
",",
"fi",
":=",
"range",
"fileInfos",
"{",
"if",
"fi",
".",
"IsDir",
"(",
")",
"{",
"if",
"fi",
".",
"Name",
"(",
")",
"==",
"\"",
"\"",
"{",
"continue",
"\n",
"}",
"\n",
"err",
":=",
"to",
".",
"MkdirAll",
"(",
"fi",
".",
"Name",
"(",
")",
",",
"0775",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"chrootFrom",
",",
"err",
":=",
"from",
".",
"Chroot",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"chrootTo",
",",
"err",
":=",
"to",
".",
"Chroot",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"err",
"=",
"r",
".",
"recursiveCopy",
"(",
"ctx",
",",
"chrootFrom",
",",
"chrootTo",
",",
"sw",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"else",
"{",
"err",
":=",
"r",
".",
"copyFile",
"(",
"ctx",
",",
"from",
",",
"to",
",",
"fi",
".",
"Name",
"(",
")",
",",
"sw",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // recursiveCopy copies the entire subdirectory rooted at `fs` to
// `localFS`. | [
"recursiveCopy",
"copies",
"the",
"entire",
"subdirectory",
"rooted",
"at",
"fs",
"to",
"localFS",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsgit/runner.go#L969-L1006 |
160,587 | keybase/client | go/kbfs/kbfsgit/runner.go | parentCommitsForRef | func (r *runner) parentCommitsForRef(ctx context.Context,
localStorer gogitstor.Storer, remoteStorer gogitstor.Storer,
refs map[gogitcfg.RefSpec]bool) (libgit.RefDataByName, error) {
commitsByRef := make(libgit.RefDataByName, len(refs))
haves := make(map[plumbing.Hash]bool)
for refspec := range refs {
if refspec.IsDelete() {
commitsByRef[refspec.Dst("")] = &libgit.RefData{
IsDelete: true,
}
continue
}
refName := plumbing.ReferenceName(refspec.Src())
resolved, err := gogitstor.ResolveReference(localStorer, refName)
if err != nil {
r.log.CDebugf(ctx, "Error resolving ref %s", refName)
}
if resolved != nil {
refName = resolved.Name()
}
ref, err := localStorer.Reference(refName)
if err != nil {
r.log.CDebugf(ctx, "Error getting reference %s: %+v",
refName, err)
continue
}
hash := ref.Hash()
// Get the HEAD commit for the ref from the local repository.
commit, err := gogitobj.GetCommit(localStorer, hash)
if err != nil {
r.log.CDebugf(ctx, "Error getting commit for hash %s (%s): %+v",
string(hash[:]), refName, err)
continue
}
// Iterate through the commits backward, until we experience any of the
// following:
// 1. Find a commit that the remote knows about,
// 2. Reach our maximum number of commits to check,
// 3. Run out of commits.
walker := gogitobj.NewCommitPreorderIter(commit, haves, nil)
toVisit := maxCommitsToVisitPerRef
dstRefName := refspec.Dst("")
commitsByRef[dstRefName] = &libgit.RefData{
IsDelete: refspec.IsDelete(),
Commits: make([]*gogitobj.Commit, 0, maxCommitsToVisitPerRef),
}
err = walker.ForEach(func(c *gogitobj.Commit) error {
haves[c.Hash] = true
toVisit--
// If toVisit starts out at 0 (indicating there is no
// max), then it will be negative here and we won't stop
// early.
if toVisit == 0 {
// Append a sentinel value to communicate that there would be
// more commits.
commitsByRef[dstRefName].Commits =
append(commitsByRef[dstRefName].Commits,
libgit.CommitSentinelValue)
return gogitstor.ErrStop
}
hasEncodedObjectErr := remoteStorer.HasEncodedObject(c.Hash)
if hasEncodedObjectErr == nil {
return gogitstor.ErrStop
}
commitsByRef[dstRefName].Commits =
append(commitsByRef[dstRefName].Commits, c)
return nil
})
if err != nil {
return nil, err
}
}
return commitsByRef, nil
} | go | func (r *runner) parentCommitsForRef(ctx context.Context,
localStorer gogitstor.Storer, remoteStorer gogitstor.Storer,
refs map[gogitcfg.RefSpec]bool) (libgit.RefDataByName, error) {
commitsByRef := make(libgit.RefDataByName, len(refs))
haves := make(map[plumbing.Hash]bool)
for refspec := range refs {
if refspec.IsDelete() {
commitsByRef[refspec.Dst("")] = &libgit.RefData{
IsDelete: true,
}
continue
}
refName := plumbing.ReferenceName(refspec.Src())
resolved, err := gogitstor.ResolveReference(localStorer, refName)
if err != nil {
r.log.CDebugf(ctx, "Error resolving ref %s", refName)
}
if resolved != nil {
refName = resolved.Name()
}
ref, err := localStorer.Reference(refName)
if err != nil {
r.log.CDebugf(ctx, "Error getting reference %s: %+v",
refName, err)
continue
}
hash := ref.Hash()
// Get the HEAD commit for the ref from the local repository.
commit, err := gogitobj.GetCommit(localStorer, hash)
if err != nil {
r.log.CDebugf(ctx, "Error getting commit for hash %s (%s): %+v",
string(hash[:]), refName, err)
continue
}
// Iterate through the commits backward, until we experience any of the
// following:
// 1. Find a commit that the remote knows about,
// 2. Reach our maximum number of commits to check,
// 3. Run out of commits.
walker := gogitobj.NewCommitPreorderIter(commit, haves, nil)
toVisit := maxCommitsToVisitPerRef
dstRefName := refspec.Dst("")
commitsByRef[dstRefName] = &libgit.RefData{
IsDelete: refspec.IsDelete(),
Commits: make([]*gogitobj.Commit, 0, maxCommitsToVisitPerRef),
}
err = walker.ForEach(func(c *gogitobj.Commit) error {
haves[c.Hash] = true
toVisit--
// If toVisit starts out at 0 (indicating there is no
// max), then it will be negative here and we won't stop
// early.
if toVisit == 0 {
// Append a sentinel value to communicate that there would be
// more commits.
commitsByRef[dstRefName].Commits =
append(commitsByRef[dstRefName].Commits,
libgit.CommitSentinelValue)
return gogitstor.ErrStop
}
hasEncodedObjectErr := remoteStorer.HasEncodedObject(c.Hash)
if hasEncodedObjectErr == nil {
return gogitstor.ErrStop
}
commitsByRef[dstRefName].Commits =
append(commitsByRef[dstRefName].Commits, c)
return nil
})
if err != nil {
return nil, err
}
}
return commitsByRef, nil
} | [
"func",
"(",
"r",
"*",
"runner",
")",
"parentCommitsForRef",
"(",
"ctx",
"context",
".",
"Context",
",",
"localStorer",
"gogitstor",
".",
"Storer",
",",
"remoteStorer",
"gogitstor",
".",
"Storer",
",",
"refs",
"map",
"[",
"gogitcfg",
".",
"RefSpec",
"]",
"bool",
")",
"(",
"libgit",
".",
"RefDataByName",
",",
"error",
")",
"{",
"commitsByRef",
":=",
"make",
"(",
"libgit",
".",
"RefDataByName",
",",
"len",
"(",
"refs",
")",
")",
"\n",
"haves",
":=",
"make",
"(",
"map",
"[",
"plumbing",
".",
"Hash",
"]",
"bool",
")",
"\n\n",
"for",
"refspec",
":=",
"range",
"refs",
"{",
"if",
"refspec",
".",
"IsDelete",
"(",
")",
"{",
"commitsByRef",
"[",
"refspec",
".",
"Dst",
"(",
"\"",
"\"",
")",
"]",
"=",
"&",
"libgit",
".",
"RefData",
"{",
"IsDelete",
":",
"true",
",",
"}",
"\n",
"continue",
"\n",
"}",
"\n",
"refName",
":=",
"plumbing",
".",
"ReferenceName",
"(",
"refspec",
".",
"Src",
"(",
")",
")",
"\n",
"resolved",
",",
"err",
":=",
"gogitstor",
".",
"ResolveReference",
"(",
"localStorer",
",",
"refName",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"refName",
")",
"\n",
"}",
"\n",
"if",
"resolved",
"!=",
"nil",
"{",
"refName",
"=",
"resolved",
".",
"Name",
"(",
")",
"\n",
"}",
"\n\n",
"ref",
",",
"err",
":=",
"localStorer",
".",
"Reference",
"(",
"refName",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"refName",
",",
"err",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"hash",
":=",
"ref",
".",
"Hash",
"(",
")",
"\n\n",
"// Get the HEAD commit for the ref from the local repository.",
"commit",
",",
"err",
":=",
"gogitobj",
".",
"GetCommit",
"(",
"localStorer",
",",
"hash",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"log",
".",
"CDebugf",
"(",
"ctx",
",",
"\"",
"\"",
",",
"string",
"(",
"hash",
"[",
":",
"]",
")",
",",
"refName",
",",
"err",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// Iterate through the commits backward, until we experience any of the",
"// following:",
"// 1. Find a commit that the remote knows about,",
"// 2. Reach our maximum number of commits to check,",
"// 3. Run out of commits.",
"walker",
":=",
"gogitobj",
".",
"NewCommitPreorderIter",
"(",
"commit",
",",
"haves",
",",
"nil",
")",
"\n",
"toVisit",
":=",
"maxCommitsToVisitPerRef",
"\n",
"dstRefName",
":=",
"refspec",
".",
"Dst",
"(",
"\"",
"\"",
")",
"\n",
"commitsByRef",
"[",
"dstRefName",
"]",
"=",
"&",
"libgit",
".",
"RefData",
"{",
"IsDelete",
":",
"refspec",
".",
"IsDelete",
"(",
")",
",",
"Commits",
":",
"make",
"(",
"[",
"]",
"*",
"gogitobj",
".",
"Commit",
",",
"0",
",",
"maxCommitsToVisitPerRef",
")",
",",
"}",
"\n",
"err",
"=",
"walker",
".",
"ForEach",
"(",
"func",
"(",
"c",
"*",
"gogitobj",
".",
"Commit",
")",
"error",
"{",
"haves",
"[",
"c",
".",
"Hash",
"]",
"=",
"true",
"\n",
"toVisit",
"--",
"\n",
"// If toVisit starts out at 0 (indicating there is no",
"// max), then it will be negative here and we won't stop",
"// early.",
"if",
"toVisit",
"==",
"0",
"{",
"// Append a sentinel value to communicate that there would be",
"// more commits.",
"commitsByRef",
"[",
"dstRefName",
"]",
".",
"Commits",
"=",
"append",
"(",
"commitsByRef",
"[",
"dstRefName",
"]",
".",
"Commits",
",",
"libgit",
".",
"CommitSentinelValue",
")",
"\n",
"return",
"gogitstor",
".",
"ErrStop",
"\n",
"}",
"\n",
"hasEncodedObjectErr",
":=",
"remoteStorer",
".",
"HasEncodedObject",
"(",
"c",
".",
"Hash",
")",
"\n",
"if",
"hasEncodedObjectErr",
"==",
"nil",
"{",
"return",
"gogitstor",
".",
"ErrStop",
"\n",
"}",
"\n",
"commitsByRef",
"[",
"dstRefName",
"]",
".",
"Commits",
"=",
"append",
"(",
"commitsByRef",
"[",
"dstRefName",
"]",
".",
"Commits",
",",
"c",
")",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"commitsByRef",
",",
"nil",
"\n",
"}"
] | // parentCommitsForRef returns a map of refs with a list of commits for each
// ref, newest first. It only includes commits that exist in `localStorer` but
// not in `remoteStorer`. | [
"parentCommitsForRef",
"returns",
"a",
"map",
"of",
"refs",
"with",
"a",
"list",
"of",
"commits",
"for",
"each",
"ref",
"newest",
"first",
".",
"It",
"only",
"includes",
"commits",
"that",
"exist",
"in",
"localStorer",
"but",
"not",
"in",
"remoteStorer",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/kbfsgit/runner.go#L1449-L1527 |
160,588 | keybase/client | go/libkb/env.go | GetNegBool | func (e *Env) GetNegBool(def bool, flist []NegBoolFunc) bool {
for _, f := range flist {
if val, isSet := f.f(); isSet {
return (val != f.neg)
}
}
return def
} | go | func (e *Env) GetNegBool(def bool, flist []NegBoolFunc) bool {
for _, f := range flist {
if val, isSet := f.f(); isSet {
return (val != f.neg)
}
}
return def
} | [
"func",
"(",
"e",
"*",
"Env",
")",
"GetNegBool",
"(",
"def",
"bool",
",",
"flist",
"[",
"]",
"NegBoolFunc",
")",
"bool",
"{",
"for",
"_",
",",
"f",
":=",
"range",
"flist",
"{",
"if",
"val",
",",
"isSet",
":=",
"f",
".",
"f",
"(",
")",
";",
"isSet",
"{",
"return",
"(",
"val",
"!=",
"f",
".",
"neg",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"def",
"\n",
"}"
] | // GetNegBool gets a negatable bool. You can give it a list of functions,
// and also possible negations for those functions. | [
"GetNegBool",
"gets",
"a",
"negatable",
"bool",
".",
"You",
"can",
"give",
"it",
"a",
"list",
"of",
"functions",
"and",
"also",
"possible",
"negations",
"for",
"those",
"functions",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/env.go#L500-L507 |
160,589 | keybase/client | go/libkb/env.go | GetChatMemberType | func (e *Env) GetChatMemberType() string {
return e.GetString(
func() string { return os.Getenv("KEYBASE_CHAT_MEMBER_TYPE") },
func() string { return "impteam" },
)
} | go | func (e *Env) GetChatMemberType() string {
return e.GetString(
func() string { return os.Getenv("KEYBASE_CHAT_MEMBER_TYPE") },
func() string { return "impteam" },
)
} | [
"func",
"(",
"e",
"*",
"Env",
")",
"GetChatMemberType",
"(",
")",
"string",
"{",
"return",
"e",
".",
"GetString",
"(",
"func",
"(",
")",
"string",
"{",
"return",
"os",
".",
"Getenv",
"(",
"\"",
"\"",
")",
"}",
",",
"func",
"(",
")",
"string",
"{",
"return",
"\"",
"\"",
"}",
",",
")",
"\n",
"}"
] | // GetChatMemberType returns the default member type for new conversations. | [
"GetChatMemberType",
"returns",
"the",
"default",
"member",
"type",
"for",
"new",
"conversations",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/env.go#L1396-L1401 |
160,590 | keybase/client | go/libkb/base58.go | Encode58 | func Encode58(inp []byte) string {
num := new(big.Int).SetBytes(inp)
buf := make([]byte, 0, len(inp))
base := big.NewInt(int64(58))
rem := new(big.Int)
quo := new(big.Int)
for num.Sign() != 0 {
num, rem = quo.QuoRem(num, base, rem)
c := alphabet[rem.Uint64()]
buf = append(buf, c)
}
// Pad leading zeros...
for _, c := range inp {
if c == 0x0 {
buf = append(buf, alphabet[0])
} else {
// Stop adding padding after the first nonzero byte.
break
}
}
reverseBuf(buf)
return string(buf)
} | go | func Encode58(inp []byte) string {
num := new(big.Int).SetBytes(inp)
buf := make([]byte, 0, len(inp))
base := big.NewInt(int64(58))
rem := new(big.Int)
quo := new(big.Int)
for num.Sign() != 0 {
num, rem = quo.QuoRem(num, base, rem)
c := alphabet[rem.Uint64()]
buf = append(buf, c)
}
// Pad leading zeros...
for _, c := range inp {
if c == 0x0 {
buf = append(buf, alphabet[0])
} else {
// Stop adding padding after the first nonzero byte.
break
}
}
reverseBuf(buf)
return string(buf)
} | [
"func",
"Encode58",
"(",
"inp",
"[",
"]",
"byte",
")",
"string",
"{",
"num",
":=",
"new",
"(",
"big",
".",
"Int",
")",
".",
"SetBytes",
"(",
"inp",
")",
"\n",
"buf",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"0",
",",
"len",
"(",
"inp",
")",
")",
"\n",
"base",
":=",
"big",
".",
"NewInt",
"(",
"int64",
"(",
"58",
")",
")",
"\n",
"rem",
":=",
"new",
"(",
"big",
".",
"Int",
")",
"\n",
"quo",
":=",
"new",
"(",
"big",
".",
"Int",
")",
"\n\n",
"for",
"num",
".",
"Sign",
"(",
")",
"!=",
"0",
"{",
"num",
",",
"rem",
"=",
"quo",
".",
"QuoRem",
"(",
"num",
",",
"base",
",",
"rem",
")",
"\n",
"c",
":=",
"alphabet",
"[",
"rem",
".",
"Uint64",
"(",
")",
"]",
"\n",
"buf",
"=",
"append",
"(",
"buf",
",",
"c",
")",
"\n",
"}",
"\n\n",
"// Pad leading zeros...",
"for",
"_",
",",
"c",
":=",
"range",
"inp",
"{",
"if",
"c",
"==",
"0x0",
"{",
"buf",
"=",
"append",
"(",
"buf",
",",
"alphabet",
"[",
"0",
"]",
")",
"\n",
"}",
"else",
"{",
"// Stop adding padding after the first nonzero byte.",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"reverseBuf",
"(",
"buf",
")",
"\n\n",
"return",
"string",
"(",
"buf",
")",
"\n",
"}"
] | // Encode58 base58 encodes the input. | [
"Encode58",
"base58",
"encodes",
"the",
"input",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/base58.go#L37-L62 |
160,591 | keybase/client | go/libkb/base58.go | Decode58 | func Decode58(inp string) (outp []byte, err error) {
place := big.NewInt(1)
base := big.NewInt(58)
buf := []byte(inp)
padlen := 0
// Advance to first non-pad byte
for ; padlen < len(buf); padlen++ {
if buf[padlen] != alphabet[0] {
break
}
}
buf = buf[padlen:]
reverseBuf(buf)
tmp := new(big.Int)
res := big.NewInt(0)
for i, c := range buf {
charIndex := alphabetMap[c]
if charIndex == base58InvalidIndex {
err = fmt.Errorf("Bad character '%c' found at pos %d", c, i)
return
}
tmp.Mul(place, big.NewInt(int64(charIndex)))
res.Add(res, tmp)
if i != len(buf)-1 {
place.Mul(place, base)
}
}
buf = res.Bytes()
pad := bytes.Repeat([]byte{0}, padlen)
outp = append(pad, buf...)
return
} | go | func Decode58(inp string) (outp []byte, err error) {
place := big.NewInt(1)
base := big.NewInt(58)
buf := []byte(inp)
padlen := 0
// Advance to first non-pad byte
for ; padlen < len(buf); padlen++ {
if buf[padlen] != alphabet[0] {
break
}
}
buf = buf[padlen:]
reverseBuf(buf)
tmp := new(big.Int)
res := big.NewInt(0)
for i, c := range buf {
charIndex := alphabetMap[c]
if charIndex == base58InvalidIndex {
err = fmt.Errorf("Bad character '%c' found at pos %d", c, i)
return
}
tmp.Mul(place, big.NewInt(int64(charIndex)))
res.Add(res, tmp)
if i != len(buf)-1 {
place.Mul(place, base)
}
}
buf = res.Bytes()
pad := bytes.Repeat([]byte{0}, padlen)
outp = append(pad, buf...)
return
} | [
"func",
"Decode58",
"(",
"inp",
"string",
")",
"(",
"outp",
"[",
"]",
"byte",
",",
"err",
"error",
")",
"{",
"place",
":=",
"big",
".",
"NewInt",
"(",
"1",
")",
"\n",
"base",
":=",
"big",
".",
"NewInt",
"(",
"58",
")",
"\n",
"buf",
":=",
"[",
"]",
"byte",
"(",
"inp",
")",
"\n",
"padlen",
":=",
"0",
"\n\n",
"// Advance to first non-pad byte",
"for",
";",
"padlen",
"<",
"len",
"(",
"buf",
")",
";",
"padlen",
"++",
"{",
"if",
"buf",
"[",
"padlen",
"]",
"!=",
"alphabet",
"[",
"0",
"]",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"buf",
"=",
"buf",
"[",
"padlen",
":",
"]",
"\n",
"reverseBuf",
"(",
"buf",
")",
"\n\n",
"tmp",
":=",
"new",
"(",
"big",
".",
"Int",
")",
"\n",
"res",
":=",
"big",
".",
"NewInt",
"(",
"0",
")",
"\n\n",
"for",
"i",
",",
"c",
":=",
"range",
"buf",
"{",
"charIndex",
":=",
"alphabetMap",
"[",
"c",
"]",
"\n",
"if",
"charIndex",
"==",
"base58InvalidIndex",
"{",
"err",
"=",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"c",
",",
"i",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"tmp",
".",
"Mul",
"(",
"place",
",",
"big",
".",
"NewInt",
"(",
"int64",
"(",
"charIndex",
")",
")",
")",
"\n",
"res",
".",
"Add",
"(",
"res",
",",
"tmp",
")",
"\n\n",
"if",
"i",
"!=",
"len",
"(",
"buf",
")",
"-",
"1",
"{",
"place",
".",
"Mul",
"(",
"place",
",",
"base",
")",
"\n",
"}",
"\n",
"}",
"\n",
"buf",
"=",
"res",
".",
"Bytes",
"(",
")",
"\n",
"pad",
":=",
"bytes",
".",
"Repeat",
"(",
"[",
"]",
"byte",
"{",
"0",
"}",
",",
"padlen",
")",
"\n",
"outp",
"=",
"append",
"(",
"pad",
",",
"buf",
"...",
")",
"\n\n",
"return",
"\n",
"}"
] | // Decode58 base58 decodes the input or returns an error. | [
"Decode58",
"base58",
"decodes",
"the",
"input",
"or",
"returns",
"an",
"error",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/libkb/base58.go#L65-L102 |
160,592 | keybase/client | go/engine/saltpack_encrypt.go | NewSaltpackEncrypt | func NewSaltpackEncrypt(arg *SaltpackEncryptArg, newKeyfinderHook func(arg libkb.SaltpackRecipientKeyfinderArg) libkb.SaltpackRecipientKeyfinderEngineInterface) *SaltpackEncrypt {
return &SaltpackEncrypt{
arg: arg,
newKeyfinderHook: newKeyfinderHook,
}
} | go | func NewSaltpackEncrypt(arg *SaltpackEncryptArg, newKeyfinderHook func(arg libkb.SaltpackRecipientKeyfinderArg) libkb.SaltpackRecipientKeyfinderEngineInterface) *SaltpackEncrypt {
return &SaltpackEncrypt{
arg: arg,
newKeyfinderHook: newKeyfinderHook,
}
} | [
"func",
"NewSaltpackEncrypt",
"(",
"arg",
"*",
"SaltpackEncryptArg",
",",
"newKeyfinderHook",
"func",
"(",
"arg",
"libkb",
".",
"SaltpackRecipientKeyfinderArg",
")",
"libkb",
".",
"SaltpackRecipientKeyfinderEngineInterface",
")",
"*",
"SaltpackEncrypt",
"{",
"return",
"&",
"SaltpackEncrypt",
"{",
"arg",
":",
"arg",
",",
"newKeyfinderHook",
":",
"newKeyfinderHook",
",",
"}",
"\n",
"}"
] | // NewSaltpackEncrypt creates a SaltpackEncrypt engine. | [
"NewSaltpackEncrypt",
"creates",
"a",
"SaltpackEncrypt",
"engine",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/saltpack_encrypt.go#L36-L41 |
160,593 | keybase/client | go/kbfs/libkbfs/rekey_queue.go | NewRekeyQueueStandard | func NewRekeyQueueStandard(config Config) (rkq *RekeyQueueStandard) {
ctx, cancel := context.WithCancel(context.Background())
rkq = &RekeyQueueStandard{
config: config,
log: config.MakeLogger("RQ"),
queue: make(chan tlf.ID, config.Mode().RekeyQueueSize()),
limiter: rate.NewLimiter(rekeysPerSecond, numConcurrentRekeys),
pendings: make(map[tlf.ID]bool),
cancel: cancel,
}
if config.Mode().RekeyWorkers() > 0 {
rkq.start(ctx)
}
return rkq
} | go | func NewRekeyQueueStandard(config Config) (rkq *RekeyQueueStandard) {
ctx, cancel := context.WithCancel(context.Background())
rkq = &RekeyQueueStandard{
config: config,
log: config.MakeLogger("RQ"),
queue: make(chan tlf.ID, config.Mode().RekeyQueueSize()),
limiter: rate.NewLimiter(rekeysPerSecond, numConcurrentRekeys),
pendings: make(map[tlf.ID]bool),
cancel: cancel,
}
if config.Mode().RekeyWorkers() > 0 {
rkq.start(ctx)
}
return rkq
} | [
"func",
"NewRekeyQueueStandard",
"(",
"config",
"Config",
")",
"(",
"rkq",
"*",
"RekeyQueueStandard",
")",
"{",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"context",
".",
"Background",
"(",
")",
")",
"\n",
"rkq",
"=",
"&",
"RekeyQueueStandard",
"{",
"config",
":",
"config",
",",
"log",
":",
"config",
".",
"MakeLogger",
"(",
"\"",
"\"",
")",
",",
"queue",
":",
"make",
"(",
"chan",
"tlf",
".",
"ID",
",",
"config",
".",
"Mode",
"(",
")",
".",
"RekeyQueueSize",
"(",
")",
")",
",",
"limiter",
":",
"rate",
".",
"NewLimiter",
"(",
"rekeysPerSecond",
",",
"numConcurrentRekeys",
")",
",",
"pendings",
":",
"make",
"(",
"map",
"[",
"tlf",
".",
"ID",
"]",
"bool",
")",
",",
"cancel",
":",
"cancel",
",",
"}",
"\n",
"if",
"config",
".",
"Mode",
"(",
")",
".",
"RekeyWorkers",
"(",
")",
">",
"0",
"{",
"rkq",
".",
"start",
"(",
"ctx",
")",
"\n",
"}",
"\n",
"return",
"rkq",
"\n",
"}"
] | // NewRekeyQueueStandard creates a new rekey queue. | [
"NewRekeyQueueStandard",
"creates",
"a",
"new",
"rekey",
"queue",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_queue.go#L58-L72 |
160,594 | keybase/client | go/kbfs/libkbfs/rekey_queue.go | start | func (rkq *RekeyQueueStandard) start(ctx context.Context) {
go func() {
for {
select {
case id := <-rkq.queue:
if err := rkq.limiter.Wait(ctx); err != nil {
rkq.log.Debug("Waiting on rate limiter for tlf=%v error: %v", id, err)
return
}
rkq.config.KBFSOps().RequestRekey(context.Background(), id)
func(id tlf.ID) {
rkq.mu.Lock()
defer rkq.mu.Unlock()
delete(rkq.pendings, id)
}(id)
case err := <-ctx.Done():
rkq.log.Debug("Rekey queue background routine context done: %v", err)
return
}
}
}()
} | go | func (rkq *RekeyQueueStandard) start(ctx context.Context) {
go func() {
for {
select {
case id := <-rkq.queue:
if err := rkq.limiter.Wait(ctx); err != nil {
rkq.log.Debug("Waiting on rate limiter for tlf=%v error: %v", id, err)
return
}
rkq.config.KBFSOps().RequestRekey(context.Background(), id)
func(id tlf.ID) {
rkq.mu.Lock()
defer rkq.mu.Unlock()
delete(rkq.pendings, id)
}(id)
case err := <-ctx.Done():
rkq.log.Debug("Rekey queue background routine context done: %v", err)
return
}
}
}()
} | [
"func",
"(",
"rkq",
"*",
"RekeyQueueStandard",
")",
"start",
"(",
"ctx",
"context",
".",
"Context",
")",
"{",
"go",
"func",
"(",
")",
"{",
"for",
"{",
"select",
"{",
"case",
"id",
":=",
"<-",
"rkq",
".",
"queue",
":",
"if",
"err",
":=",
"rkq",
".",
"limiter",
".",
"Wait",
"(",
"ctx",
")",
";",
"err",
"!=",
"nil",
"{",
"rkq",
".",
"log",
".",
"Debug",
"(",
"\"",
"\"",
",",
"id",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"rkq",
".",
"config",
".",
"KBFSOps",
"(",
")",
".",
"RequestRekey",
"(",
"context",
".",
"Background",
"(",
")",
",",
"id",
")",
"\n",
"func",
"(",
"id",
"tlf",
".",
"ID",
")",
"{",
"rkq",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"rkq",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"delete",
"(",
"rkq",
".",
"pendings",
",",
"id",
")",
"\n",
"}",
"(",
"id",
")",
"\n",
"case",
"err",
":=",
"<-",
"ctx",
".",
"Done",
"(",
")",
":",
"rkq",
".",
"log",
".",
"Debug",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"}"
] | // start spawns a goroutine that dispatches rekey requests to correct folder
// branch ops while conforming to the rater limiter. | [
"start",
"spawns",
"a",
"goroutine",
"that",
"dispatches",
"rekey",
"requests",
"to",
"correct",
"folder",
"branch",
"ops",
"while",
"conforming",
"to",
"the",
"rater",
"limiter",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_queue.go#L76-L97 |
160,595 | keybase/client | go/kbfs/libkbfs/rekey_queue.go | Enqueue | func (rkq *RekeyQueueStandard) Enqueue(id tlf.ID) {
rkq.mu.Lock()
defer rkq.mu.Unlock()
rkq.pendings[id] = true
select {
case rkq.queue <- id:
default:
// The queue is full; drop this one for now until the next
// request to the server for more rekeys.
rkq.log.Debug("Rekey queue is full; dropping %s", id)
}
} | go | func (rkq *RekeyQueueStandard) Enqueue(id tlf.ID) {
rkq.mu.Lock()
defer rkq.mu.Unlock()
rkq.pendings[id] = true
select {
case rkq.queue <- id:
default:
// The queue is full; drop this one for now until the next
// request to the server for more rekeys.
rkq.log.Debug("Rekey queue is full; dropping %s", id)
}
} | [
"func",
"(",
"rkq",
"*",
"RekeyQueueStandard",
")",
"Enqueue",
"(",
"id",
"tlf",
".",
"ID",
")",
"{",
"rkq",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"rkq",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"rkq",
".",
"pendings",
"[",
"id",
"]",
"=",
"true",
"\n\n",
"select",
"{",
"case",
"rkq",
".",
"queue",
"<-",
"id",
":",
"default",
":",
"// The queue is full; drop this one for now until the next",
"// request to the server for more rekeys.",
"rkq",
".",
"log",
".",
"Debug",
"(",
"\"",
"\"",
",",
"id",
")",
"\n",
"}",
"\n",
"}"
] | // Enqueue implements the RekeyQueue interface for RekeyQueueStandard. | [
"Enqueue",
"implements",
"the",
"RekeyQueue",
"interface",
"for",
"RekeyQueueStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_queue.go#L100-L112 |
160,596 | keybase/client | go/kbfs/libkbfs/rekey_queue.go | IsRekeyPending | func (rkq *RekeyQueueStandard) IsRekeyPending(id tlf.ID) bool {
rkq.mu.RLock()
defer rkq.mu.RUnlock()
return rkq.pendings[id]
} | go | func (rkq *RekeyQueueStandard) IsRekeyPending(id tlf.ID) bool {
rkq.mu.RLock()
defer rkq.mu.RUnlock()
return rkq.pendings[id]
} | [
"func",
"(",
"rkq",
"*",
"RekeyQueueStandard",
")",
"IsRekeyPending",
"(",
"id",
"tlf",
".",
"ID",
")",
"bool",
"{",
"rkq",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"rkq",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"rkq",
".",
"pendings",
"[",
"id",
"]",
"\n",
"}"
] | // IsRekeyPending implements the RekeyQueue interface for RekeyQueueStandard. | [
"IsRekeyPending",
"implements",
"the",
"RekeyQueue",
"interface",
"for",
"RekeyQueueStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_queue.go#L115-L119 |
160,597 | keybase/client | go/kbfs/libkbfs/rekey_queue.go | Shutdown | func (rkq *RekeyQueueStandard) Shutdown() {
rkq.mu.Lock()
defer rkq.mu.Unlock()
if rkq.cancel != nil {
rkq.cancel()
rkq.cancel = nil
}
} | go | func (rkq *RekeyQueueStandard) Shutdown() {
rkq.mu.Lock()
defer rkq.mu.Unlock()
if rkq.cancel != nil {
rkq.cancel()
rkq.cancel = nil
}
} | [
"func",
"(",
"rkq",
"*",
"RekeyQueueStandard",
")",
"Shutdown",
"(",
")",
"{",
"rkq",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"rkq",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"if",
"rkq",
".",
"cancel",
"!=",
"nil",
"{",
"rkq",
".",
"cancel",
"(",
")",
"\n",
"rkq",
".",
"cancel",
"=",
"nil",
"\n",
"}",
"\n",
"}"
] | // Shutdown implements the RekeyQueue interface for RekeyQueueStandard. | [
"Shutdown",
"implements",
"the",
"RekeyQueue",
"interface",
"for",
"RekeyQueueStandard",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/kbfs/libkbfs/rekey_queue.go#L122-L129 |
160,598 | keybase/client | go/engine/paperkey_submit.go | NewPaperKeySubmit | func NewPaperKeySubmit(g *libkb.GlobalContext, paperPhrase string) *PaperKeySubmit {
return &PaperKeySubmit{
Contextified: libkb.NewContextified(g),
paperPhrase: paperPhrase,
}
} | go | func NewPaperKeySubmit(g *libkb.GlobalContext, paperPhrase string) *PaperKeySubmit {
return &PaperKeySubmit{
Contextified: libkb.NewContextified(g),
paperPhrase: paperPhrase,
}
} | [
"func",
"NewPaperKeySubmit",
"(",
"g",
"*",
"libkb",
".",
"GlobalContext",
",",
"paperPhrase",
"string",
")",
"*",
"PaperKeySubmit",
"{",
"return",
"&",
"PaperKeySubmit",
"{",
"Contextified",
":",
"libkb",
".",
"NewContextified",
"(",
"g",
")",
",",
"paperPhrase",
":",
"paperPhrase",
",",
"}",
"\n",
"}"
] | // NewPaperKeySubmit creates a PaperKeySubmit engine. | [
"NewPaperKeySubmit",
"creates",
"a",
"PaperKeySubmit",
"engine",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/engine/paperkey_submit.go#L18-L23 |
160,599 | keybase/client | go/protocol/keybase1/crypto.go | SignED25519ForKBFS | func (c CryptoClient) SignED25519ForKBFS(ctx context.Context, __arg SignED25519ForKBFSArg) (res ED25519SignatureInfo, err error) {
err = c.Cli.Call(ctx, "keybase.1.crypto.signED25519ForKBFS", []interface{}{__arg}, &res)
return
} | go | func (c CryptoClient) SignED25519ForKBFS(ctx context.Context, __arg SignED25519ForKBFSArg) (res ED25519SignatureInfo, err error) {
err = c.Cli.Call(ctx, "keybase.1.crypto.signED25519ForKBFS", []interface{}{__arg}, &res)
return
} | [
"func",
"(",
"c",
"CryptoClient",
")",
"SignED25519ForKBFS",
"(",
"ctx",
"context",
".",
"Context",
",",
"__arg",
"SignED25519ForKBFSArg",
")",
"(",
"res",
"ED25519SignatureInfo",
",",
"err",
"error",
")",
"{",
"err",
"=",
"c",
".",
"Cli",
".",
"Call",
"(",
"ctx",
",",
"\"",
"\"",
",",
"[",
"]",
"interface",
"{",
"}",
"{",
"__arg",
"}",
",",
"&",
"res",
")",
"\n",
"return",
"\n",
"}"
] | // Same as the above except a KBFS-specific prefix is added to the payload to be signed. | [
"Same",
"as",
"the",
"above",
"except",
"a",
"KBFS",
"-",
"specific",
"prefix",
"is",
"added",
"to",
"the",
"payload",
"to",
"be",
"signed",
"."
] | b352622cd8cc94798cfacbcb56ada203c18e519e | https://github.com/keybase/client/blob/b352622cd8cc94798cfacbcb56ada203c18e519e/go/protocol/keybase1/crypto.go#L243-L246 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.