repo
stringlengths
5
54
path
stringlengths
4
155
func_name
stringlengths
1
118
original_string
stringlengths
52
85.5k
language
stringclasses
1 value
code
stringlengths
52
85.5k
code_tokens
sequence
docstring
stringlengths
6
2.61k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
85
252
partition
stringclasses
1 value
perkeep/perkeep
pkg/serverinit/genconfig.go
sortedDBMS
func (b *lowBuilder) sortedDBMS(named dbname) (map[string]interface{}, error) { if b.high.MySQL != "" { return b.dbIndexStorage("mysql", b.high.MySQL, named) } if b.high.PostgreSQL != "" { return b.dbIndexStorage("postgres", b.high.PostgreSQL, named) } if b.high.Mongo != "" { return b.mongoIndexStorage(b.high.Mongo, named) } return nil, nil }
go
func (b *lowBuilder) sortedDBMS(named dbname) (map[string]interface{}, error) { if b.high.MySQL != "" { return b.dbIndexStorage("mysql", b.high.MySQL, named) } if b.high.PostgreSQL != "" { return b.dbIndexStorage("postgres", b.high.PostgreSQL, named) } if b.high.Mongo != "" { return b.mongoIndexStorage(b.high.Mongo, named) } return nil, nil }
[ "func", "(", "b", "*", "lowBuilder", ")", "sortedDBMS", "(", "named", "dbname", ")", "(", "map", "[", "string", "]", "interface", "{", "}", ",", "error", ")", "{", "if", "b", ".", "high", ".", "MySQL", "!=", "\"", "\"", "{", "return", "b", ".", "dbIndexStorage", "(", "\"", "\"", ",", "b", ".", "high", ".", "MySQL", ",", "named", ")", "\n", "}", "\n", "if", "b", ".", "high", ".", "PostgreSQL", "!=", "\"", "\"", "{", "return", "b", ".", "dbIndexStorage", "(", "\"", "\"", ",", "b", ".", "high", ".", "PostgreSQL", ",", "named", ")", "\n", "}", "\n", "if", "b", ".", "high", ".", "Mongo", "!=", "\"", "\"", "{", "return", "b", ".", "mongoIndexStorage", "(", "b", ".", "high", ".", "Mongo", ",", "named", ")", "\n", "}", "\n", "return", "nil", ",", "nil", "\n", "}" ]
// sortedDBMS returns the configuration for a name database on one of the // DBMS, if any was found in the configuration. It returns nil otherwise.
[ "sortedDBMS", "returns", "the", "configuration", "for", "a", "name", "database", "on", "one", "of", "the", "DBMS", "if", "any", "was", "found", "in", "the", "configuration", ".", "It", "returns", "nil", "otherwise", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/serverinit/genconfig.go#L467-L478
train
perkeep/perkeep
pkg/serverinit/genconfig.go
sortedStorageAt
func (b *lowBuilder) sortedStorageAt(sortedType dbname, filePrefix string) (map[string]interface{}, error) { dbms, err := b.sortedDBMS(sortedType) if err != nil { return nil, err } if dbms != nil { return dbms, nil } if b.high.MemoryIndex { return map[string]interface{}{ "type": "memory", }, nil } if sortedType != "index" && filePrefix == "" { return nil, fmt.Errorf("internal error: use of sortedStorageAt with a non-index type (%v) and no file location for non-database sorted implementation", sortedType) } // dbFile returns path directly if sortedType == "index", else it returns filePrefix+"."+ext. dbFile := func(path, ext string) string { if sortedType == "index" { return path } return filePrefix + "." + ext } if b.high.SQLite != "" { return map[string]interface{}{ "type": "sqlite", "file": dbFile(b.high.SQLite, "sqlite"), }, nil } if b.high.KVFile != "" { return map[string]interface{}{ "type": "kv", "file": dbFile(b.high.KVFile, "kv"), }, nil } if b.high.LevelDB != "" { return map[string]interface{}{ "type": "leveldb", "file": dbFile(b.high.LevelDB, "leveldb"), }, nil } panic("internal error: sortedStorageAt didn't find a sorted implementation") }
go
func (b *lowBuilder) sortedStorageAt(sortedType dbname, filePrefix string) (map[string]interface{}, error) { dbms, err := b.sortedDBMS(sortedType) if err != nil { return nil, err } if dbms != nil { return dbms, nil } if b.high.MemoryIndex { return map[string]interface{}{ "type": "memory", }, nil } if sortedType != "index" && filePrefix == "" { return nil, fmt.Errorf("internal error: use of sortedStorageAt with a non-index type (%v) and no file location for non-database sorted implementation", sortedType) } // dbFile returns path directly if sortedType == "index", else it returns filePrefix+"."+ext. dbFile := func(path, ext string) string { if sortedType == "index" { return path } return filePrefix + "." + ext } if b.high.SQLite != "" { return map[string]interface{}{ "type": "sqlite", "file": dbFile(b.high.SQLite, "sqlite"), }, nil } if b.high.KVFile != "" { return map[string]interface{}{ "type": "kv", "file": dbFile(b.high.KVFile, "kv"), }, nil } if b.high.LevelDB != "" { return map[string]interface{}{ "type": "leveldb", "file": dbFile(b.high.LevelDB, "leveldb"), }, nil } panic("internal error: sortedStorageAt didn't find a sorted implementation") }
[ "func", "(", "b", "*", "lowBuilder", ")", "sortedStorageAt", "(", "sortedType", "dbname", ",", "filePrefix", "string", ")", "(", "map", "[", "string", "]", "interface", "{", "}", ",", "error", ")", "{", "dbms", ",", "err", ":=", "b", ".", "sortedDBMS", "(", "sortedType", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "if", "dbms", "!=", "nil", "{", "return", "dbms", ",", "nil", "\n", "}", "\n", "if", "b", ".", "high", ".", "MemoryIndex", "{", "return", "map", "[", "string", "]", "interface", "{", "}", "{", "\"", "\"", ":", "\"", "\"", ",", "}", ",", "nil", "\n", "}", "\n", "if", "sortedType", "!=", "\"", "\"", "&&", "filePrefix", "==", "\"", "\"", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "sortedType", ")", "\n", "}", "\n", "// dbFile returns path directly if sortedType == \"index\", else it returns filePrefix+\".\"+ext.", "dbFile", ":=", "func", "(", "path", ",", "ext", "string", ")", "string", "{", "if", "sortedType", "==", "\"", "\"", "{", "return", "path", "\n", "}", "\n", "return", "filePrefix", "+", "\"", "\"", "+", "ext", "\n", "}", "\n", "if", "b", ".", "high", ".", "SQLite", "!=", "\"", "\"", "{", "return", "map", "[", "string", "]", "interface", "{", "}", "{", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "dbFile", "(", "b", ".", "high", ".", "SQLite", ",", "\"", "\"", ")", ",", "}", ",", "nil", "\n", "}", "\n", "if", "b", ".", "high", ".", "KVFile", "!=", "\"", "\"", "{", "return", "map", "[", "string", "]", "interface", "{", "}", "{", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "dbFile", "(", "b", ".", "high", ".", "KVFile", ",", "\"", "\"", ")", ",", "}", ",", "nil", "\n", "}", "\n", "if", "b", ".", "high", ".", "LevelDB", "!=", "\"", "\"", "{", "return", "map", "[", "string", "]", "interface", "{", "}", "{", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "dbFile", "(", "b", ".", "high", ".", "LevelDB", ",", "\"", "\"", ")", ",", "}", ",", "nil", "\n", "}", "\n", "panic", "(", "\"", "\"", ")", "\n", "}" ]
// filePrefix gives a file path of where to put the database. It can be omitted by // some sorted implementations, but is required by others. // The filePrefix should be to a file, not a directory, and should not end in a ".ext" extension. // An extension like ".kv" or ".sqlite" will be added.
[ "filePrefix", "gives", "a", "file", "path", "of", "where", "to", "put", "the", "database", ".", "It", "can", "be", "omitted", "by", "some", "sorted", "implementations", "but", "is", "required", "by", "others", ".", "The", "filePrefix", "should", "be", "to", "a", "file", "not", "a", "directory", "and", "should", "not", "end", "in", "a", ".", "ext", "extension", ".", "An", "extension", "like", ".", "kv", "or", ".", "sqlite", "will", "be", "added", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/serverinit/genconfig.go#L484-L526
train
perkeep/perkeep
pkg/serverinit/genconfig.go
indexFileDir
func (b *lowBuilder) indexFileDir() string { switch { case b.high.SQLite != "": return filepath.Dir(b.high.SQLite) case b.high.KVFile != "": return filepath.Dir(b.high.KVFile) case b.high.LevelDB != "": return filepath.Dir(b.high.LevelDB) } return "" }
go
func (b *lowBuilder) indexFileDir() string { switch { case b.high.SQLite != "": return filepath.Dir(b.high.SQLite) case b.high.KVFile != "": return filepath.Dir(b.high.KVFile) case b.high.LevelDB != "": return filepath.Dir(b.high.LevelDB) } return "" }
[ "func", "(", "b", "*", "lowBuilder", ")", "indexFileDir", "(", ")", "string", "{", "switch", "{", "case", "b", ".", "high", ".", "SQLite", "!=", "\"", "\"", ":", "return", "filepath", ".", "Dir", "(", "b", ".", "high", ".", "SQLite", ")", "\n", "case", "b", ".", "high", ".", "KVFile", "!=", "\"", "\"", ":", "return", "filepath", ".", "Dir", "(", "b", ".", "high", ".", "KVFile", ")", "\n", "case", "b", ".", "high", ".", "LevelDB", "!=", "\"", "\"", ":", "return", "filepath", ".", "Dir", "(", "b", ".", "high", ".", "LevelDB", ")", "\n", "}", "\n", "return", "\"", "\"", "\n", "}" ]
// indexFileDir returns the directory of the sqlite or kv file, or the // empty string.
[ "indexFileDir", "returns", "the", "directory", "of", "the", "sqlite", "or", "kv", "file", "or", "the", "empty", "string", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/serverinit/genconfig.go#L827-L837
train
perkeep/perkeep
pkg/serverinit/genconfig.go
WriteDefaultConfigFile
func WriteDefaultConfigFile(filePath string, useSQLite bool) error { conf := defaultBaseConfig blobDir, err := osutil.CamliBlobRoot() if err != nil { return err } varDir, err := osutil.CamliVarDir() if err != nil { return err } if err := wkfs.MkdirAll(blobDir, 0700); err != nil { return fmt.Errorf("Could not create default blobs directory: %v", err) } conf.BlobPath = blobDir conf.PackRelated = true if useSQLite { conf.SQLite = filepath.Join(varDir, "index.sqlite") } else { conf.LevelDB = filepath.Join(varDir, "index.leveldb") } keyID, secretRing, err := getOrMakeKeyring() if err != nil { return err } conf.Identity = keyID conf.IdentitySecretRing = secretRing confData, err := json.MarshalIndent(conf, "", " ") if err != nil { return fmt.Errorf("Could not json encode config file : %v", err) } if err := wkfs.WriteFile(filePath, confData, 0600); err != nil { return fmt.Errorf("Could not create or write default server config: %v", err) } return nil }
go
func WriteDefaultConfigFile(filePath string, useSQLite bool) error { conf := defaultBaseConfig blobDir, err := osutil.CamliBlobRoot() if err != nil { return err } varDir, err := osutil.CamliVarDir() if err != nil { return err } if err := wkfs.MkdirAll(blobDir, 0700); err != nil { return fmt.Errorf("Could not create default blobs directory: %v", err) } conf.BlobPath = blobDir conf.PackRelated = true if useSQLite { conf.SQLite = filepath.Join(varDir, "index.sqlite") } else { conf.LevelDB = filepath.Join(varDir, "index.leveldb") } keyID, secretRing, err := getOrMakeKeyring() if err != nil { return err } conf.Identity = keyID conf.IdentitySecretRing = secretRing confData, err := json.MarshalIndent(conf, "", " ") if err != nil { return fmt.Errorf("Could not json encode config file : %v", err) } if err := wkfs.WriteFile(filePath, confData, 0600); err != nil { return fmt.Errorf("Could not create or write default server config: %v", err) } return nil }
[ "func", "WriteDefaultConfigFile", "(", "filePath", "string", ",", "useSQLite", "bool", ")", "error", "{", "conf", ":=", "defaultBaseConfig", "\n", "blobDir", ",", "err", ":=", "osutil", ".", "CamliBlobRoot", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "varDir", ",", "err", ":=", "osutil", ".", "CamliVarDir", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "wkfs", ".", "MkdirAll", "(", "blobDir", ",", "0700", ")", ";", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "conf", ".", "BlobPath", "=", "blobDir", "\n", "conf", ".", "PackRelated", "=", "true", "\n\n", "if", "useSQLite", "{", "conf", ".", "SQLite", "=", "filepath", ".", "Join", "(", "varDir", ",", "\"", "\"", ")", "\n", "}", "else", "{", "conf", ".", "LevelDB", "=", "filepath", ".", "Join", "(", "varDir", ",", "\"", "\"", ")", "\n", "}", "\n\n", "keyID", ",", "secretRing", ",", "err", ":=", "getOrMakeKeyring", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "conf", ".", "Identity", "=", "keyID", "\n", "conf", ".", "IdentitySecretRing", "=", "secretRing", "\n\n", "confData", ",", "err", ":=", "json", ".", "MarshalIndent", "(", "conf", ",", "\"", "\"", ",", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "if", "err", ":=", "wkfs", ".", "WriteFile", "(", "filePath", ",", "confData", ",", "0600", ")", ";", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// WriteDefaultConfigFile generates a new default high-level server configuration // file at filePath. If useSQLite, the default indexer will use SQLite, otherwise // leveldb. If filePath already exists, it is overwritten.
[ "WriteDefaultConfigFile", "generates", "a", "new", "default", "high", "-", "level", "server", "configuration", "file", "at", "filePath", ".", "If", "useSQLite", "the", "default", "indexer", "will", "use", "SQLite", "otherwise", "leveldb", ".", "If", "filePath", "already", "exists", "it", "is", "overwritten", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/serverinit/genconfig.go#L1246-L1285
train
perkeep/perkeep
server/perkeepd/autodns.go
listenForCamliNet
func listenForCamliNet(ws *webserver.Server, config *serverinit.Config) (baseURL string, err error) { camliNetIP := config.CamliNetIP() if camliNetIP == "" { return "", errors.New("no camliNetIP") } if ip := net.ParseIP(camliNetIP); ip == nil { return "", fmt.Errorf("camliNetIP value %q is not a valid IP address", camliNetIP) } else if ip.To4() == nil { // TODO: support IPv6 when GCE supports IPv6: https://code.google.com/p/google-compute-engine/issues/detail?id=8 return "", errors.New("CamliNetIP should be an IPv4, as IPv6 is not yet supported on GCE") } challengeHostname := camliNetIP + gpgchallenge.SNISuffix selfCert, selfKey, err := httputil.GenSelfTLS(challengeHostname) if err != nil { return "", fmt.Errorf("could not generate self-signed certificate: %v", err) } gpgchallengeCert, err := tls.X509KeyPair(selfCert, selfKey) if err != nil { return "", fmt.Errorf("could not load TLS certificate: %v", err) } _, keyId, err := config.KeyRingAndId() if err != nil { return "", fmt.Errorf("could not get keyId for camliNet hostname: %v", err) } // catch future length changes if len(keyId) != 16 { panic("length of GPG keyId is not 16 anymore") } shortKeyId := keyId[8:] camliNetHostName = strings.ToLower(shortKeyId + "." + camliNetDomain) m := autocert.Manager{ Prompt: autocert.AcceptTOS, HostPolicy: autocert.HostWhitelist(camliNetHostName), Cache: autocert.DirCache(osutil.DefaultLetsEncryptCache()), } go func() { err := http.ListenAndServe(":http", m.HTTPHandler(nil)) log.Fatalf("Could not start server for http-01 challenge: %v", err) }() getCertificate := func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { if hello.ServerName == challengeHostname { return &gpgchallengeCert, nil } return m.GetCertificate(hello) } log.Printf("TLS enabled, with Let's Encrypt for %v", camliNetHostName) ws.SetTLS(webserver.TLSSetup{CertManager: getCertificate}) err = ws.Listen(fmt.Sprintf(":%d", gpgchallenge.ClientChallengedPort)) if err != nil { return "", fmt.Errorf("Listen: %v", err) } return fmt.Sprintf("https://%s", camliNetHostName), nil }
go
func listenForCamliNet(ws *webserver.Server, config *serverinit.Config) (baseURL string, err error) { camliNetIP := config.CamliNetIP() if camliNetIP == "" { return "", errors.New("no camliNetIP") } if ip := net.ParseIP(camliNetIP); ip == nil { return "", fmt.Errorf("camliNetIP value %q is not a valid IP address", camliNetIP) } else if ip.To4() == nil { // TODO: support IPv6 when GCE supports IPv6: https://code.google.com/p/google-compute-engine/issues/detail?id=8 return "", errors.New("CamliNetIP should be an IPv4, as IPv6 is not yet supported on GCE") } challengeHostname := camliNetIP + gpgchallenge.SNISuffix selfCert, selfKey, err := httputil.GenSelfTLS(challengeHostname) if err != nil { return "", fmt.Errorf("could not generate self-signed certificate: %v", err) } gpgchallengeCert, err := tls.X509KeyPair(selfCert, selfKey) if err != nil { return "", fmt.Errorf("could not load TLS certificate: %v", err) } _, keyId, err := config.KeyRingAndId() if err != nil { return "", fmt.Errorf("could not get keyId for camliNet hostname: %v", err) } // catch future length changes if len(keyId) != 16 { panic("length of GPG keyId is not 16 anymore") } shortKeyId := keyId[8:] camliNetHostName = strings.ToLower(shortKeyId + "." + camliNetDomain) m := autocert.Manager{ Prompt: autocert.AcceptTOS, HostPolicy: autocert.HostWhitelist(camliNetHostName), Cache: autocert.DirCache(osutil.DefaultLetsEncryptCache()), } go func() { err := http.ListenAndServe(":http", m.HTTPHandler(nil)) log.Fatalf("Could not start server for http-01 challenge: %v", err) }() getCertificate := func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { if hello.ServerName == challengeHostname { return &gpgchallengeCert, nil } return m.GetCertificate(hello) } log.Printf("TLS enabled, with Let's Encrypt for %v", camliNetHostName) ws.SetTLS(webserver.TLSSetup{CertManager: getCertificate}) err = ws.Listen(fmt.Sprintf(":%d", gpgchallenge.ClientChallengedPort)) if err != nil { return "", fmt.Errorf("Listen: %v", err) } return fmt.Sprintf("https://%s", camliNetHostName), nil }
[ "func", "listenForCamliNet", "(", "ws", "*", "webserver", ".", "Server", ",", "config", "*", "serverinit", ".", "Config", ")", "(", "baseURL", "string", ",", "err", "error", ")", "{", "camliNetIP", ":=", "config", ".", "CamliNetIP", "(", ")", "\n", "if", "camliNetIP", "==", "\"", "\"", "{", "return", "\"", "\"", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "ip", ":=", "net", ".", "ParseIP", "(", "camliNetIP", ")", ";", "ip", "==", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "camliNetIP", ")", "\n", "}", "else", "if", "ip", ".", "To4", "(", ")", "==", "nil", "{", "// TODO: support IPv6 when GCE supports IPv6: https://code.google.com/p/google-compute-engine/issues/detail?id=8", "return", "\"", "\"", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "challengeHostname", ":=", "camliNetIP", "+", "gpgchallenge", ".", "SNISuffix", "\n", "selfCert", ",", "selfKey", ",", "err", ":=", "httputil", ".", "GenSelfTLS", "(", "challengeHostname", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "gpgchallengeCert", ",", "err", ":=", "tls", ".", "X509KeyPair", "(", "selfCert", ",", "selfKey", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "_", ",", "keyId", ",", "err", ":=", "config", ".", "KeyRingAndId", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "// catch future length changes", "if", "len", "(", "keyId", ")", "!=", "16", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "shortKeyId", ":=", "keyId", "[", "8", ":", "]", "\n", "camliNetHostName", "=", "strings", ".", "ToLower", "(", "shortKeyId", "+", "\"", "\"", "+", "camliNetDomain", ")", "\n", "m", ":=", "autocert", ".", "Manager", "{", "Prompt", ":", "autocert", ".", "AcceptTOS", ",", "HostPolicy", ":", "autocert", ".", "HostWhitelist", "(", "camliNetHostName", ")", ",", "Cache", ":", "autocert", ".", "DirCache", "(", "osutil", ".", "DefaultLetsEncryptCache", "(", ")", ")", ",", "}", "\n", "go", "func", "(", ")", "{", "err", ":=", "http", ".", "ListenAndServe", "(", "\"", "\"", ",", "m", ".", "HTTPHandler", "(", "nil", ")", ")", "\n", "log", ".", "Fatalf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "(", ")", "\n", "getCertificate", ":=", "func", "(", "hello", "*", "tls", ".", "ClientHelloInfo", ")", "(", "*", "tls", ".", "Certificate", ",", "error", ")", "{", "if", "hello", ".", "ServerName", "==", "challengeHostname", "{", "return", "&", "gpgchallengeCert", ",", "nil", "\n", "}", "\n", "return", "m", ".", "GetCertificate", "(", "hello", ")", "\n", "}", "\n", "log", ".", "Printf", "(", "\"", "\"", ",", "camliNetHostName", ")", "\n", "ws", ".", "SetTLS", "(", "webserver", ".", "TLSSetup", "{", "CertManager", ":", "getCertificate", "}", ")", "\n\n", "err", "=", "ws", ".", "Listen", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "gpgchallenge", ".", "ClientChallengedPort", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "camliNetHostName", ")", ",", "nil", "\n", "}" ]
// listenForCamliNet prepares the TLS listener for both the GPG challenge, and // for Let's Encrypt. It then starts listening and returns the baseURL derived from // the hostname we should obtain from the GPG challenge.
[ "listenForCamliNet", "prepares", "the", "TLS", "listener", "for", "both", "the", "GPG", "challenge", "and", "for", "Let", "s", "Encrypt", ".", "It", "then", "starts", "listening", "and", "returns", "the", "baseURL", "derived", "from", "the", "hostname", "we", "should", "obtain", "from", "the", "GPG", "challenge", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/server/perkeepd/autodns.go#L52-L105
train
perkeep/perkeep
pkg/gc/gc.go
markItem
func (c *Collector) markItem(ctx context.Context, it Item, isRoot bool) error { if !isRoot { marked, err := c.Marker.IsMarked(it) if err != nil { return err } if marked { return nil } } if err := c.Marker.Mark(it); err != nil { return err } // FIXME(tgulacsi): is it a problem that we cannot cancel the parent? ctx, cancel := context.WithCancel(ctx) ch := make(chan Item, buffered) var grp syncutil.Group grp.Go(func() error { return c.ItemEnumerator.EnumerateItem(ctx, it, ch) }) grp.Go(func() error { for it := range ch { if err := c.markItem(ctx, it, false); err != nil { return err } } return nil }) if err := grp.Err(); err != nil { cancel() return err } return nil }
go
func (c *Collector) markItem(ctx context.Context, it Item, isRoot bool) error { if !isRoot { marked, err := c.Marker.IsMarked(it) if err != nil { return err } if marked { return nil } } if err := c.Marker.Mark(it); err != nil { return err } // FIXME(tgulacsi): is it a problem that we cannot cancel the parent? ctx, cancel := context.WithCancel(ctx) ch := make(chan Item, buffered) var grp syncutil.Group grp.Go(func() error { return c.ItemEnumerator.EnumerateItem(ctx, it, ch) }) grp.Go(func() error { for it := range ch { if err := c.markItem(ctx, it, false); err != nil { return err } } return nil }) if err := grp.Err(); err != nil { cancel() return err } return nil }
[ "func", "(", "c", "*", "Collector", ")", "markItem", "(", "ctx", "context", ".", "Context", ",", "it", "Item", ",", "isRoot", "bool", ")", "error", "{", "if", "!", "isRoot", "{", "marked", ",", "err", ":=", "c", ".", "Marker", ".", "IsMarked", "(", "it", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "marked", "{", "return", "nil", "\n", "}", "\n", "}", "\n", "if", "err", ":=", "c", ".", "Marker", ".", "Mark", "(", "it", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// FIXME(tgulacsi): is it a problem that we cannot cancel the parent?", "ctx", ",", "cancel", ":=", "context", ".", "WithCancel", "(", "ctx", ")", "\n", "ch", ":=", "make", "(", "chan", "Item", ",", "buffered", ")", "\n", "var", "grp", "syncutil", ".", "Group", "\n", "grp", ".", "Go", "(", "func", "(", ")", "error", "{", "return", "c", ".", "ItemEnumerator", ".", "EnumerateItem", "(", "ctx", ",", "it", ",", "ch", ")", "\n", "}", ")", "\n", "grp", ".", "Go", "(", "func", "(", ")", "error", "{", "for", "it", ":=", "range", "ch", "{", "if", "err", ":=", "c", ".", "markItem", "(", "ctx", ",", "it", ",", "false", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}", ")", "\n", "if", "err", ":=", "grp", ".", "Err", "(", ")", ";", "err", "!=", "nil", "{", "cancel", "(", ")", "\n", "return", "err", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// ctx will be canceled on failure
[ "ctx", "will", "be", "canceled", "on", "failure" ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/gc/gc.go#L88-L122
train
perkeep/perkeep
pkg/gc/gc.go
Collect
func (c *Collector) Collect(ctx context.Context) (err error) { if c.World == nil { return errors.New("no World") } if c.Marker == nil { return errors.New("no Marker") } if c.Roots == nil { return errors.New("no Roots") } if c.Sweeper == nil { return errors.New("no Sweeper") } if c.ItemEnumerator == nil { return errors.New("no ItemEnumerator") } if c.Deleter == nil { return errors.New("no Deleter") } if err := c.World.Stop(); err != nil { return err } defer func() { startErr := c.World.Start() if err == nil { err = startErr } }() // Mark. roots := make(chan Item, buffered) markCtx, cancelMark := context.WithCancel(ctx) var marker syncutil.Group marker.Go(func() error { defer cancelMark() for it := range roots { if err := c.markItem(markCtx, it, true); err != nil { return err } } return nil }) marker.Go(func() error { return c.Roots.Enumerate(markCtx, roots) }) if err := marker.Err(); err != nil { return fmt.Errorf("Mark failure: %v", err) } // Sweep. all := make(chan Item, buffered) sweepCtx, _ := context.WithCancel(ctx) var sweeper syncutil.Group sweeper.Go(func() error { return c.Sweeper.Enumerate(sweepCtx, all) }) sweeper.Go(func() error { defer sweepCtx.Done() for it := range all { ok, err := c.Marker.IsMarked(it) if err != nil { return err } if !ok { if err := c.Deleter.Delete(it); err != nil { return err } } } return nil }) if err := sweeper.Err(); err != nil { return fmt.Errorf("Sweep failure: %v", err) } return nil }
go
func (c *Collector) Collect(ctx context.Context) (err error) { if c.World == nil { return errors.New("no World") } if c.Marker == nil { return errors.New("no Marker") } if c.Roots == nil { return errors.New("no Roots") } if c.Sweeper == nil { return errors.New("no Sweeper") } if c.ItemEnumerator == nil { return errors.New("no ItemEnumerator") } if c.Deleter == nil { return errors.New("no Deleter") } if err := c.World.Stop(); err != nil { return err } defer func() { startErr := c.World.Start() if err == nil { err = startErr } }() // Mark. roots := make(chan Item, buffered) markCtx, cancelMark := context.WithCancel(ctx) var marker syncutil.Group marker.Go(func() error { defer cancelMark() for it := range roots { if err := c.markItem(markCtx, it, true); err != nil { return err } } return nil }) marker.Go(func() error { return c.Roots.Enumerate(markCtx, roots) }) if err := marker.Err(); err != nil { return fmt.Errorf("Mark failure: %v", err) } // Sweep. all := make(chan Item, buffered) sweepCtx, _ := context.WithCancel(ctx) var sweeper syncutil.Group sweeper.Go(func() error { return c.Sweeper.Enumerate(sweepCtx, all) }) sweeper.Go(func() error { defer sweepCtx.Done() for it := range all { ok, err := c.Marker.IsMarked(it) if err != nil { return err } if !ok { if err := c.Deleter.Delete(it); err != nil { return err } } } return nil }) if err := sweeper.Err(); err != nil { return fmt.Errorf("Sweep failure: %v", err) } return nil }
[ "func", "(", "c", "*", "Collector", ")", "Collect", "(", "ctx", "context", ".", "Context", ")", "(", "err", "error", ")", "{", "if", "c", ".", "World", "==", "nil", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "c", ".", "Marker", "==", "nil", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "c", ".", "Roots", "==", "nil", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "c", ".", "Sweeper", "==", "nil", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "c", ".", "ItemEnumerator", "==", "nil", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "c", ".", "Deleter", "==", "nil", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "err", ":=", "c", ".", "World", ".", "Stop", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "defer", "func", "(", ")", "{", "startErr", ":=", "c", ".", "World", ".", "Start", "(", ")", "\n", "if", "err", "==", "nil", "{", "err", "=", "startErr", "\n", "}", "\n", "}", "(", ")", "\n\n", "// Mark.", "roots", ":=", "make", "(", "chan", "Item", ",", "buffered", ")", "\n", "markCtx", ",", "cancelMark", ":=", "context", ".", "WithCancel", "(", "ctx", ")", "\n", "var", "marker", "syncutil", ".", "Group", "\n", "marker", ".", "Go", "(", "func", "(", ")", "error", "{", "defer", "cancelMark", "(", ")", "\n", "for", "it", ":=", "range", "roots", "{", "if", "err", ":=", "c", ".", "markItem", "(", "markCtx", ",", "it", ",", "true", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}", ")", "\n", "marker", ".", "Go", "(", "func", "(", ")", "error", "{", "return", "c", ".", "Roots", ".", "Enumerate", "(", "markCtx", ",", "roots", ")", "\n", "}", ")", "\n", "if", "err", ":=", "marker", ".", "Err", "(", ")", ";", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "// Sweep.", "all", ":=", "make", "(", "chan", "Item", ",", "buffered", ")", "\n", "sweepCtx", ",", "_", ":=", "context", ".", "WithCancel", "(", "ctx", ")", "\n", "var", "sweeper", "syncutil", ".", "Group", "\n", "sweeper", ".", "Go", "(", "func", "(", ")", "error", "{", "return", "c", ".", "Sweeper", ".", "Enumerate", "(", "sweepCtx", ",", "all", ")", "\n", "}", ")", "\n", "sweeper", ".", "Go", "(", "func", "(", ")", "error", "{", "defer", "sweepCtx", ".", "Done", "(", ")", "\n", "for", "it", ":=", "range", "all", "{", "ok", ",", "err", ":=", "c", ".", "Marker", ".", "IsMarked", "(", "it", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "!", "ok", "{", "if", "err", ":=", "c", ".", "Deleter", ".", "Delete", "(", "it", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}", ")", "\n", "if", "err", ":=", "sweeper", ".", "Err", "(", ")", ";", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// Collect performs a garbage collection.
[ "Collect", "performs", "a", "garbage", "collection", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/gc/gc.go#L125-L200
train
perkeep/perkeep
pkg/sorted/buffer/buffer.go
New
func New(buffer, backing sorted.KeyValue, maxBufferBytes int64) *KeyValue { return &KeyValue{ buf: buffer, back: backing, maxBuffer: maxBufferBytes, } }
go
func New(buffer, backing sorted.KeyValue, maxBufferBytes int64) *KeyValue { return &KeyValue{ buf: buffer, back: backing, maxBuffer: maxBufferBytes, } }
[ "func", "New", "(", "buffer", ",", "backing", "sorted", ".", "KeyValue", ",", "maxBufferBytes", "int64", ")", "*", "KeyValue", "{", "return", "&", "KeyValue", "{", "buf", ":", "buffer", ",", "back", ":", "backing", ",", "maxBuffer", ":", "maxBufferBytes", ",", "}", "\n", "}" ]
// New returnes a sorted.KeyValue implementation that adds a Flush // method to flush the buffer to the backing storage. A flush will // also be performed when maxBufferBytes are reached. If // maxBufferBytes <= 0, no automatic flushing is performed.
[ "New", "returnes", "a", "sorted", ".", "KeyValue", "implementation", "that", "adds", "a", "Flush", "method", "to", "flush", "the", "buffer", "to", "the", "backing", "storage", ".", "A", "flush", "will", "also", "be", "performed", "when", "maxBufferBytes", "are", "reached", ".", "If", "maxBufferBytes", "<", "=", "0", "no", "automatic", "flushing", "is", "performed", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/sorted/buffer/buffer.go#L35-L41
train
perkeep/perkeep
cmd/pk-put/kvcache.go
maybeRunCompaction
func maybeRunCompaction(dbname string, db *leveldb.DB) error { val, err := db.GetProperty("leveldb.num-files-at-level0") if err != nil { return fmt.Errorf("could not get number of level-0 files of %v's LevelDB: %v", dbname, err) } nbFiles, err := strconv.Atoi(val) if err != nil { return fmt.Errorf("could not convert number of level-0 files to int: %v", err) } // Only force compaction if we're at the default trigger (4), see // github.com/syndtr/goleveldb/leveldb/opt.DefaultCompactionL0Trigger if nbFiles < 4 { return nil } if err := db.CompactRange(util.Range{nil, nil}); err != nil { return fmt.Errorf("could not run compaction on %v's LevelDB: %v", dbname, err) } return nil }
go
func maybeRunCompaction(dbname string, db *leveldb.DB) error { val, err := db.GetProperty("leveldb.num-files-at-level0") if err != nil { return fmt.Errorf("could not get number of level-0 files of %v's LevelDB: %v", dbname, err) } nbFiles, err := strconv.Atoi(val) if err != nil { return fmt.Errorf("could not convert number of level-0 files to int: %v", err) } // Only force compaction if we're at the default trigger (4), see // github.com/syndtr/goleveldb/leveldb/opt.DefaultCompactionL0Trigger if nbFiles < 4 { return nil } if err := db.CompactRange(util.Range{nil, nil}); err != nil { return fmt.Errorf("could not run compaction on %v's LevelDB: %v", dbname, err) } return nil }
[ "func", "maybeRunCompaction", "(", "dbname", "string", ",", "db", "*", "leveldb", ".", "DB", ")", "error", "{", "val", ",", "err", ":=", "db", ".", "GetProperty", "(", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "dbname", ",", "err", ")", "\n", "}", "\n", "nbFiles", ",", "err", ":=", "strconv", ".", "Atoi", "(", "val", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "// Only force compaction if we're at the default trigger (4), see", "// github.com/syndtr/goleveldb/leveldb/opt.DefaultCompactionL0Trigger", "if", "nbFiles", "<", "4", "{", "return", "nil", "\n", "}", "\n", "if", "err", ":=", "db", ".", "CompactRange", "(", "util", ".", "Range", "{", "nil", ",", "nil", "}", ")", ";", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "dbname", ",", "err", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// maybeRunCompaction forces compaction of db, if the number of // tables in level 0 is >= 4. dbname should be provided for error messages.
[ "maybeRunCompaction", "forces", "compaction", "of", "db", "if", "the", "number", "of", "tables", "in", "level", "0", "is", ">", "=", "4", ".", "dbname", "should", "be", "provided", "for", "error", "messages", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/cmd/pk-put/kvcache.go#L76-L94
train
perkeep/perkeep
cmd/pk-put/kvcache.go
marshalBinary
func (sk *statCacheKey) marshalBinary() ([]byte, error) { if sk == nil { return nil, errors.New("Can not marshal from a nil stat cache key") } data := make([]byte, 0, len(sk.Filepath)+3) data = append(data, 1) // version number data = append(data, sk.Filepath...) data = append(data, '|') if sk.Permanode { data = append(data, 1) } return data, nil }
go
func (sk *statCacheKey) marshalBinary() ([]byte, error) { if sk == nil { return nil, errors.New("Can not marshal from a nil stat cache key") } data := make([]byte, 0, len(sk.Filepath)+3) data = append(data, 1) // version number data = append(data, sk.Filepath...) data = append(data, '|') if sk.Permanode { data = append(data, 1) } return data, nil }
[ "func", "(", "sk", "*", "statCacheKey", ")", "marshalBinary", "(", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "if", "sk", "==", "nil", "{", "return", "nil", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "data", ":=", "make", "(", "[", "]", "byte", ",", "0", ",", "len", "(", "sk", ".", "Filepath", ")", "+", "3", ")", "\n", "data", "=", "append", "(", "data", ",", "1", ")", "// version number", "\n", "data", "=", "append", "(", "data", ",", "sk", ".", "Filepath", "...", ")", "\n", "data", "=", "append", "(", "data", ",", "'|'", ")", "\n", "if", "sk", ".", "Permanode", "{", "data", "=", "append", "(", "data", ",", "1", ")", "\n", "}", "\n", "return", "data", ",", "nil", "\n", "}" ]
// marshalBinary returns a more compact binary // representation of the contents of sk.
[ "marshalBinary", "returns", "a", "more", "compact", "binary", "representation", "of", "the", "contents", "of", "sk", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/cmd/pk-put/kvcache.go#L234-L246
train
perkeep/perkeep
cmd/pk-put/kvcache.go
marshalBinary
func (scv *statCacheValue) marshalBinary() ([]byte, error) { if scv == nil { return nil, errors.New("Can not marshal from a nil stat cache value") } binBr, _ := scv.Result.BlobRef.MarshalBinary() // Blob size fits on 4 bytes when binary encoded data := make([]byte, 0, len(scv.Fingerprint)+1+4+1+len(binBr)) buf := bytes.NewBuffer(data) _, err := buf.WriteString(string(scv.Fingerprint)) if err != nil { return nil, fmt.Errorf("Could not write fingerprint %v: %v", scv.Fingerprint, err) } err = buf.WriteByte('|') if err != nil { return nil, fmt.Errorf("Could not write '|': %v", err) } err = binary.Write(buf, binary.BigEndian, int32(scv.Result.Size)) if err != nil { return nil, fmt.Errorf("Could not write blob size %d: %v", scv.Result.Size, err) } err = buf.WriteByte('|') if err != nil { return nil, fmt.Errorf("Could not write '|': %v", err) } _, err = buf.Write(binBr) if err != nil { return nil, fmt.Errorf("Could not write binary blobref %q: %v", binBr, err) } return buf.Bytes(), nil }
go
func (scv *statCacheValue) marshalBinary() ([]byte, error) { if scv == nil { return nil, errors.New("Can not marshal from a nil stat cache value") } binBr, _ := scv.Result.BlobRef.MarshalBinary() // Blob size fits on 4 bytes when binary encoded data := make([]byte, 0, len(scv.Fingerprint)+1+4+1+len(binBr)) buf := bytes.NewBuffer(data) _, err := buf.WriteString(string(scv.Fingerprint)) if err != nil { return nil, fmt.Errorf("Could not write fingerprint %v: %v", scv.Fingerprint, err) } err = buf.WriteByte('|') if err != nil { return nil, fmt.Errorf("Could not write '|': %v", err) } err = binary.Write(buf, binary.BigEndian, int32(scv.Result.Size)) if err != nil { return nil, fmt.Errorf("Could not write blob size %d: %v", scv.Result.Size, err) } err = buf.WriteByte('|') if err != nil { return nil, fmt.Errorf("Could not write '|': %v", err) } _, err = buf.Write(binBr) if err != nil { return nil, fmt.Errorf("Could not write binary blobref %q: %v", binBr, err) } return buf.Bytes(), nil }
[ "func", "(", "scv", "*", "statCacheValue", ")", "marshalBinary", "(", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "if", "scv", "==", "nil", "{", "return", "nil", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "binBr", ",", "_", ":=", "scv", ".", "Result", ".", "BlobRef", ".", "MarshalBinary", "(", ")", "\n", "// Blob size fits on 4 bytes when binary encoded", "data", ":=", "make", "(", "[", "]", "byte", ",", "0", ",", "len", "(", "scv", ".", "Fingerprint", ")", "+", "1", "+", "4", "+", "1", "+", "len", "(", "binBr", ")", ")", "\n", "buf", ":=", "bytes", ".", "NewBuffer", "(", "data", ")", "\n", "_", ",", "err", ":=", "buf", ".", "WriteString", "(", "string", "(", "scv", ".", "Fingerprint", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "scv", ".", "Fingerprint", ",", "err", ")", "\n", "}", "\n", "err", "=", "buf", ".", "WriteByte", "(", "'|'", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "err", "=", "binary", ".", "Write", "(", "buf", ",", "binary", ".", "BigEndian", ",", "int32", "(", "scv", ".", "Result", ".", "Size", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "scv", ".", "Result", ".", "Size", ",", "err", ")", "\n", "}", "\n", "err", "=", "buf", ".", "WriteByte", "(", "'|'", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "_", ",", "err", "=", "buf", ".", "Write", "(", "binBr", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "binBr", ",", "err", ")", "\n", "}", "\n", "return", "buf", ".", "Bytes", "(", ")", ",", "nil", "\n", "}" ]
// marshalBinary returns a more compact binary // representation of the contents of scv.
[ "marshalBinary", "returns", "a", "more", "compact", "binary", "representation", "of", "the", "contents", "of", "scv", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/cmd/pk-put/kvcache.go#L257-L286
train
perkeep/perkeep
internal/osutil/restart_unix.go
RestartProcess
func RestartProcess(arg ...string) error { path, err := SelfPath() if err != nil { return fmt.Errorf("RestartProcess failed: %v", err) } var args []string if len(arg) > 0 { args = append(args, os.Args[0]) for _, v := range arg { args = append(args, v) } } else { args = os.Args } return syscall.Exec(path, args, os.Environ()) }
go
func RestartProcess(arg ...string) error { path, err := SelfPath() if err != nil { return fmt.Errorf("RestartProcess failed: %v", err) } var args []string if len(arg) > 0 { args = append(args, os.Args[0]) for _, v := range arg { args = append(args, v) } } else { args = os.Args } return syscall.Exec(path, args, os.Environ()) }
[ "func", "RestartProcess", "(", "arg", "...", "string", ")", "error", "{", "path", ",", "err", ":=", "SelfPath", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "var", "args", "[", "]", "string", "\n", "if", "len", "(", "arg", ")", ">", "0", "{", "args", "=", "append", "(", "args", ",", "os", ".", "Args", "[", "0", "]", ")", "\n", "for", "_", ",", "v", ":=", "range", "arg", "{", "args", "=", "append", "(", "args", ",", "v", ")", "\n", "}", "\n", "}", "else", "{", "args", "=", "os", ".", "Args", "\n", "}", "\n\n", "return", "syscall", ".", "Exec", "(", "path", ",", "args", ",", "os", ".", "Environ", "(", ")", ")", "\n", "}" ]
// RestartProcess restarts the process with the given arguments, if any, // replacing the original process's arguments. It defaults to os.Args otherwise. It // returns an error if things couldn't be restarted. On success, this function // never returns because the process becomes the new process.
[ "RestartProcess", "restarts", "the", "process", "with", "the", "given", "arguments", "if", "any", "replacing", "the", "original", "process", "s", "arguments", ".", "It", "defaults", "to", "os", ".", "Args", "otherwise", ".", "It", "returns", "an", "error", "if", "things", "couldn", "t", "be", "restarted", ".", "On", "success", "this", "function", "never", "returns", "because", "the", "process", "becomes", "the", "new", "process", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/osutil/restart_unix.go#L63-L80
train
perkeep/perkeep
misc/docker/dock.go
buildDockerImage
func buildDockerImage(imageDir, imageName string) { if dockDir == "" { panic("dockDir should be set before calling buildDockerImage") } cmd := exec.Command("docker", "build", "-t", imageName, ".") cmd.Dir = filepath.Join(dockDir, imageDir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Fatalf("Error building docker image %v: %v", imageName, err) } }
go
func buildDockerImage(imageDir, imageName string) { if dockDir == "" { panic("dockDir should be set before calling buildDockerImage") } cmd := exec.Command("docker", "build", "-t", imageName, ".") cmd.Dir = filepath.Join(dockDir, imageDir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Fatalf("Error building docker image %v: %v", imageName, err) } }
[ "func", "buildDockerImage", "(", "imageDir", ",", "imageName", "string", ")", "{", "if", "dockDir", "==", "\"", "\"", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "cmd", ":=", "exec", ".", "Command", "(", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "imageName", ",", "\"", "\"", ")", "\n", "cmd", ".", "Dir", "=", "filepath", ".", "Join", "(", "dockDir", ",", "imageDir", ")", "\n", "cmd", ".", "Stdout", "=", "os", ".", "Stdout", "\n", "cmd", ".", "Stderr", "=", "os", ".", "Stderr", "\n", "if", "err", ":=", "cmd", ".", "Run", "(", ")", ";", "err", "!=", "nil", "{", "log", ".", "Fatalf", "(", "\"", "\"", ",", "imageName", ",", "err", ")", "\n", "}", "\n", "}" ]
// buildDockerImage builds a docker image from the Dockerfile located in // imageDir, which is a path relative to dockDir. The image will be named after // imageName. dockDir should have been set behorehand.
[ "buildDockerImage", "builds", "a", "docker", "image", "from", "the", "Dockerfile", "located", "in", "imageDir", "which", "is", "a", "path", "relative", "to", "dockDir", ".", "The", "image", "will", "be", "named", "after", "imageName", ".", "dockDir", "should", "have", "been", "set", "behorehand", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/misc/docker/dock.go#L54-L65
train
perkeep/perkeep
pkg/blobserver/mergedenum.go
MergedEnumerate
func MergedEnumerate(ctx context.Context, dest chan<- blob.SizedRef, sources []BlobEnumerator, after string, limit int) error { return mergedEnumerate(ctx, dest, len(sources), func(i int) BlobEnumerator { return sources[i] }, after, limit) }
go
func MergedEnumerate(ctx context.Context, dest chan<- blob.SizedRef, sources []BlobEnumerator, after string, limit int) error { return mergedEnumerate(ctx, dest, len(sources), func(i int) BlobEnumerator { return sources[i] }, after, limit) }
[ "func", "MergedEnumerate", "(", "ctx", "context", ".", "Context", ",", "dest", "chan", "<-", "blob", ".", "SizedRef", ",", "sources", "[", "]", "BlobEnumerator", ",", "after", "string", ",", "limit", "int", ")", "error", "{", "return", "mergedEnumerate", "(", "ctx", ",", "dest", ",", "len", "(", "sources", ")", ",", "func", "(", "i", "int", ")", "BlobEnumerator", "{", "return", "sources", "[", "i", "]", "}", ",", "after", ",", "limit", ")", "\n", "}" ]
// MergedEnumerate implements the BlobEnumerator interface by // merge-joining 0 or more sources.
[ "MergedEnumerate", "implements", "the", "BlobEnumerator", "interface", "by", "merge", "-", "joining", "0", "or", "more", "sources", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/mergedenum.go#L29-L31
train
perkeep/perkeep
pkg/deploy/gce/deploy.go
NewOAuthConfig
func NewOAuthConfig(clientID, clientSecret string) *oauth2.Config { return &oauth2.Config{ Scopes: []string{ logging.WriteScope, compute.DevstorageFullControlScope, compute.ComputeScope, cloudresourcemanager.CloudPlatformScope, servicemanagement.CloudPlatformScope, "https://www.googleapis.com/auth/sqlservice", "https://www.googleapis.com/auth/sqlservice.admin", }, Endpoint: google.Endpoint, ClientID: clientID, ClientSecret: clientSecret, } }
go
func NewOAuthConfig(clientID, clientSecret string) *oauth2.Config { return &oauth2.Config{ Scopes: []string{ logging.WriteScope, compute.DevstorageFullControlScope, compute.ComputeScope, cloudresourcemanager.CloudPlatformScope, servicemanagement.CloudPlatformScope, "https://www.googleapis.com/auth/sqlservice", "https://www.googleapis.com/auth/sqlservice.admin", }, Endpoint: google.Endpoint, ClientID: clientID, ClientSecret: clientSecret, } }
[ "func", "NewOAuthConfig", "(", "clientID", ",", "clientSecret", "string", ")", "*", "oauth2", ".", "Config", "{", "return", "&", "oauth2", ".", "Config", "{", "Scopes", ":", "[", "]", "string", "{", "logging", ".", "WriteScope", ",", "compute", ".", "DevstorageFullControlScope", ",", "compute", ".", "ComputeScope", ",", "cloudresourcemanager", ".", "CloudPlatformScope", ",", "servicemanagement", ".", "CloudPlatformScope", ",", "\"", "\"", ",", "\"", "\"", ",", "}", ",", "Endpoint", ":", "google", ".", "Endpoint", ",", "ClientID", ":", "clientID", ",", "ClientSecret", ":", "clientSecret", ",", "}", "\n", "}" ]
// NewOAuthConfig returns an OAuth configuration template.
[ "NewOAuthConfig", "returns", "an", "OAuth", "configuration", "template", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/deploy/gce/deploy.go#L83-L98
train
perkeep/perkeep
pkg/deploy/gce/deploy.go
Get
func (d *Deployer) Get() (*compute.Instance, error) { computeService, err := compute.New(d.Client) if err != nil { return nil, err } return computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do() }
go
func (d *Deployer) Get() (*compute.Instance, error) { computeService, err := compute.New(d.Client) if err != nil { return nil, err } return computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do() }
[ "func", "(", "d", "*", "Deployer", ")", "Get", "(", ")", "(", "*", "compute", ".", "Instance", ",", "error", ")", "{", "computeService", ",", "err", ":=", "compute", ".", "New", "(", "d", ".", "Client", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "return", "computeService", ".", "Instances", ".", "Get", "(", "d", ".", "Conf", ".", "Project", ",", "d", ".", "Conf", ".", "Zone", ",", "d", ".", "Conf", ".", "Name", ")", ".", "Do", "(", ")", "\n", "}" ]
// Get returns the Instance corresponding to the Project, Zone, and Name defined in the // Deployer's Conf.
[ "Get", "returns", "the", "Instance", "corresponding", "to", "the", "Project", "Zone", "and", "Name", "defined", "in", "the", "Deployer", "s", "Conf", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/deploy/gce/deploy.go#L138-L144
train
perkeep/perkeep
pkg/deploy/gce/deploy.go
projectHasInstance
func (d *Deployer) projectHasInstance() (zone string, err error) { s, err := compute.New(d.Client) if err != nil { return "", err } // TODO(mpl): make use of the handler's cached zones. zl, err := compute.NewZonesService(s).List(d.Conf.Project).Do() if err != nil { return "", fmt.Errorf("could not get a list of zones: %v", err) } computeService, _ := compute.New(d.Client) var zoneOnce sync.Once var grp syncutil.Group errc := make(chan error, 1) zonec := make(chan string, 1) timeout := time.NewTimer(30 * time.Second) defer timeout.Stop() for _, z := range zl.Items { z := z grp.Go(func() error { list, err := computeService.Instances.List(d.Conf.Project, z.Name).Do() if err != nil { return fmt.Errorf("could not list existing instances: %v", err) } if len(list.Items) > 0 { zoneOnce.Do(func() { zonec <- z.Name }) } return nil }) } go func() { errc <- grp.Err() }() // We block until either an instance was found in a zone, or all the instance // listing is done. Or we timed-out. select { case err = <-errc: return "", err case zone = <-zonec: // We voluntarily ignore any listing error if we found at least one instance // because that's what we primarily want to report about. return zone, nil case <-timeout.C: return "", errors.New("timed out") } }
go
func (d *Deployer) projectHasInstance() (zone string, err error) { s, err := compute.New(d.Client) if err != nil { return "", err } // TODO(mpl): make use of the handler's cached zones. zl, err := compute.NewZonesService(s).List(d.Conf.Project).Do() if err != nil { return "", fmt.Errorf("could not get a list of zones: %v", err) } computeService, _ := compute.New(d.Client) var zoneOnce sync.Once var grp syncutil.Group errc := make(chan error, 1) zonec := make(chan string, 1) timeout := time.NewTimer(30 * time.Second) defer timeout.Stop() for _, z := range zl.Items { z := z grp.Go(func() error { list, err := computeService.Instances.List(d.Conf.Project, z.Name).Do() if err != nil { return fmt.Errorf("could not list existing instances: %v", err) } if len(list.Items) > 0 { zoneOnce.Do(func() { zonec <- z.Name }) } return nil }) } go func() { errc <- grp.Err() }() // We block until either an instance was found in a zone, or all the instance // listing is done. Or we timed-out. select { case err = <-errc: return "", err case zone = <-zonec: // We voluntarily ignore any listing error if we found at least one instance // because that's what we primarily want to report about. return zone, nil case <-timeout.C: return "", errors.New("timed out") } }
[ "func", "(", "d", "*", "Deployer", ")", "projectHasInstance", "(", ")", "(", "zone", "string", ",", "err", "error", ")", "{", "s", ",", "err", ":=", "compute", ".", "New", "(", "d", ".", "Client", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "err", "\n", "}", "\n", "// TODO(mpl): make use of the handler's cached zones.", "zl", ",", "err", ":=", "compute", ".", "NewZonesService", "(", "s", ")", ".", "List", "(", "d", ".", "Conf", ".", "Project", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "computeService", ",", "_", ":=", "compute", ".", "New", "(", "d", ".", "Client", ")", "\n", "var", "zoneOnce", "sync", ".", "Once", "\n", "var", "grp", "syncutil", ".", "Group", "\n", "errc", ":=", "make", "(", "chan", "error", ",", "1", ")", "\n", "zonec", ":=", "make", "(", "chan", "string", ",", "1", ")", "\n", "timeout", ":=", "time", ".", "NewTimer", "(", "30", "*", "time", ".", "Second", ")", "\n", "defer", "timeout", ".", "Stop", "(", ")", "\n", "for", "_", ",", "z", ":=", "range", "zl", ".", "Items", "{", "z", ":=", "z", "\n", "grp", ".", "Go", "(", "func", "(", ")", "error", "{", "list", ",", "err", ":=", "computeService", ".", "Instances", ".", "List", "(", "d", ".", "Conf", ".", "Project", ",", "z", ".", "Name", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "if", "len", "(", "list", ".", "Items", ")", ">", "0", "{", "zoneOnce", ".", "Do", "(", "func", "(", ")", "{", "zonec", "<-", "z", ".", "Name", "\n", "}", ")", "\n", "}", "\n", "return", "nil", "\n", "}", ")", "\n", "}", "\n", "go", "func", "(", ")", "{", "errc", "<-", "grp", ".", "Err", "(", ")", "\n", "}", "(", ")", "\n", "// We block until either an instance was found in a zone, or all the instance", "// listing is done. Or we timed-out.", "select", "{", "case", "err", "=", "<-", "errc", ":", "return", "\"", "\"", ",", "err", "\n", "case", "zone", "=", "<-", "zonec", ":", "// We voluntarily ignore any listing error if we found at least one instance", "// because that's what we primarily want to report about.", "return", "zone", ",", "nil", "\n", "case", "<-", "timeout", ".", "C", ":", "return", "\"", "\"", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "}" ]
// projectHasInstance checks for all the possible zones if there's already an instance for the project. // It returns the name of the zone at the first instance it finds, if any.
[ "projectHasInstance", "checks", "for", "all", "the", "possible", "zones", "if", "there", "s", "already", "an", "instance", "for", "the", "project", ".", "It", "returns", "the", "name", "of", "the", "zone", "at", "the", "first", "instance", "it", "finds", "if", "any", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/deploy/gce/deploy.go#L169-L216
train
perkeep/perkeep
pkg/deploy/gce/deploy.go
getInstanceAttribute
func (d *Deployer) getInstanceAttribute(attr string) (string, error) { s, err := compute.New(d.Client) if err != nil { return "", fmt.Errorf("error getting compute service: %v", err) } inst, err := compute.NewInstancesService(s).Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do() if err != nil { return "", fmt.Errorf("error getting instance: %v", err) } for _, v := range inst.Metadata.Items { if v.Key == attr { return *(v.Value), nil } } return "", errAttrNotFound }
go
func (d *Deployer) getInstanceAttribute(attr string) (string, error) { s, err := compute.New(d.Client) if err != nil { return "", fmt.Errorf("error getting compute service: %v", err) } inst, err := compute.NewInstancesService(s).Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do() if err != nil { return "", fmt.Errorf("error getting instance: %v", err) } for _, v := range inst.Metadata.Items { if v.Key == attr { return *(v.Value), nil } } return "", errAttrNotFound }
[ "func", "(", "d", "*", "Deployer", ")", "getInstanceAttribute", "(", "attr", "string", ")", "(", "string", ",", "error", ")", "{", "s", ",", "err", ":=", "compute", ".", "New", "(", "d", ".", "Client", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "inst", ",", "err", ":=", "compute", ".", "NewInstancesService", "(", "s", ")", ".", "Get", "(", "d", ".", "Conf", ".", "Project", ",", "d", ".", "Conf", ".", "Zone", ",", "d", ".", "Conf", ".", "Name", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "for", "_", ",", "v", ":=", "range", "inst", ".", "Metadata", ".", "Items", "{", "if", "v", ".", "Key", "==", "attr", "{", "return", "*", "(", "v", ".", "Value", ")", ",", "nil", "\n", "}", "\n", "}", "\n", "return", "\"", "\"", ",", "errAttrNotFound", "\n", "}" ]
// getInstanceAttribute returns the value for attr in the custom metadata of the // instance. It returns errAttrNotFound is such a metadata attributed does not // exist.
[ "getInstanceAttribute", "returns", "the", "value", "for", "attr", "in", "the", "custom", "metadata", "of", "the", "instance", ".", "It", "returns", "errAttrNotFound", "is", "such", "a", "metadata", "attributed", "does", "not", "exist", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/deploy/gce/deploy.go#L455-L470
train
perkeep/perkeep
pkg/deploy/gce/deploy.go
Create
func (d *Deployer) Create(ctx context.Context) (*compute.Instance, error) { if err := d.enableAPIs(); err != nil { return nil, projectIDError{ id: d.Conf.Project, cause: err, } } if err := d.checkProjectID(); err != nil { return nil, err } computeService, _ := compute.New(d.Client) storageService, _ := storage.New(d.Client) fwc := make(chan error, 1) go func() { fwc <- d.setFirewall(ctx, computeService) }() config := cloudConfig(d.Conf) const maxCloudConfig = 32 << 10 // per compute API docs if len(config) > maxCloudConfig { return nil, fmt.Errorf("cloud config length of %d bytes is over %d byte limit", len(config), maxCloudConfig) } if zone, err := d.projectHasInstance(); zone != "" { return nil, instanceExistsError{ project: d.Conf.Project, zone: zone, } } else if err != nil { return nil, fmt.Errorf("could not scan project for existing instances: %v", err) } if err := d.setBuckets(ctx, storageService); err != nil { return nil, fmt.Errorf("could not create buckets: %v", err) } if err := d.createInstance(ctx, computeService); err != nil { return nil, fmt.Errorf("could not create compute instance: %v", err) } inst, err := computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do() if err != nil { return nil, fmt.Errorf("error getting instance after creation: %v", err) } if Verbose { ij, _ := json.MarshalIndent(inst, "", " ") d.Printf("Instance: %s", ij) } if err = <-fwc; err != nil { return nil, fmt.Errorf("could not create firewall rules: %v", err) } return inst, nil }
go
func (d *Deployer) Create(ctx context.Context) (*compute.Instance, error) { if err := d.enableAPIs(); err != nil { return nil, projectIDError{ id: d.Conf.Project, cause: err, } } if err := d.checkProjectID(); err != nil { return nil, err } computeService, _ := compute.New(d.Client) storageService, _ := storage.New(d.Client) fwc := make(chan error, 1) go func() { fwc <- d.setFirewall(ctx, computeService) }() config := cloudConfig(d.Conf) const maxCloudConfig = 32 << 10 // per compute API docs if len(config) > maxCloudConfig { return nil, fmt.Errorf("cloud config length of %d bytes is over %d byte limit", len(config), maxCloudConfig) } if zone, err := d.projectHasInstance(); zone != "" { return nil, instanceExistsError{ project: d.Conf.Project, zone: zone, } } else if err != nil { return nil, fmt.Errorf("could not scan project for existing instances: %v", err) } if err := d.setBuckets(ctx, storageService); err != nil { return nil, fmt.Errorf("could not create buckets: %v", err) } if err := d.createInstance(ctx, computeService); err != nil { return nil, fmt.Errorf("could not create compute instance: %v", err) } inst, err := computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do() if err != nil { return nil, fmt.Errorf("error getting instance after creation: %v", err) } if Verbose { ij, _ := json.MarshalIndent(inst, "", " ") d.Printf("Instance: %s", ij) } if err = <-fwc; err != nil { return nil, fmt.Errorf("could not create firewall rules: %v", err) } return inst, nil }
[ "func", "(", "d", "*", "Deployer", ")", "Create", "(", "ctx", "context", ".", "Context", ")", "(", "*", "compute", ".", "Instance", ",", "error", ")", "{", "if", "err", ":=", "d", ".", "enableAPIs", "(", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "projectIDError", "{", "id", ":", "d", ".", "Conf", ".", "Project", ",", "cause", ":", "err", ",", "}", "\n", "}", "\n", "if", "err", ":=", "d", ".", "checkProjectID", "(", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "computeService", ",", "_", ":=", "compute", ".", "New", "(", "d", ".", "Client", ")", "\n", "storageService", ",", "_", ":=", "storage", ".", "New", "(", "d", ".", "Client", ")", "\n\n", "fwc", ":=", "make", "(", "chan", "error", ",", "1", ")", "\n", "go", "func", "(", ")", "{", "fwc", "<-", "d", ".", "setFirewall", "(", "ctx", ",", "computeService", ")", "\n", "}", "(", ")", "\n\n", "config", ":=", "cloudConfig", "(", "d", ".", "Conf", ")", "\n", "const", "maxCloudConfig", "=", "32", "<<", "10", "// per compute API docs", "\n", "if", "len", "(", "config", ")", ">", "maxCloudConfig", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "len", "(", "config", ")", ",", "maxCloudConfig", ")", "\n", "}", "\n\n", "if", "zone", ",", "err", ":=", "d", ".", "projectHasInstance", "(", ")", ";", "zone", "!=", "\"", "\"", "{", "return", "nil", ",", "instanceExistsError", "{", "project", ":", "d", ".", "Conf", ".", "Project", ",", "zone", ":", "zone", ",", "}", "\n", "}", "else", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "if", "err", ":=", "d", ".", "setBuckets", "(", "ctx", ",", "storageService", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "if", "err", ":=", "d", ".", "createInstance", "(", "ctx", ",", "computeService", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "inst", ",", "err", ":=", "computeService", ".", "Instances", ".", "Get", "(", "d", ".", "Conf", ".", "Project", ",", "d", ".", "Conf", ".", "Zone", ",", "d", ".", "Conf", ".", "Name", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "if", "Verbose", "{", "ij", ",", "_", ":=", "json", ".", "MarshalIndent", "(", "inst", ",", "\"", "\"", ",", "\"", "\"", ")", "\n", "d", ".", "Printf", "(", "\"", "\"", ",", "ij", ")", "\n", "}", "\n\n", "if", "err", "=", "<-", "fwc", ";", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "return", "inst", ",", "nil", "\n", "}" ]
// Create sets up and starts a Google Compute Engine instance as defined in d.Conf. It // creates the necessary Google Storage buckets beforehand.
[ "Create", "sets", "up", "and", "starts", "a", "Google", "Compute", "Engine", "instance", "as", "defined", "in", "d", ".", "Conf", ".", "It", "creates", "the", "necessary", "Google", "Storage", "buckets", "beforehand", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/deploy/gce/deploy.go#L474-L529
train
perkeep/perkeep
pkg/deploy/gce/deploy.go
setBuckets
func (d *Deployer) setBuckets(ctx context.Context, storageService *storage.Service) error { projBucket := d.Conf.Project + "-camlistore" needBucket := map[string]bool{ projBucket: true, } buckets, err := storageService.Buckets.List(d.Conf.Project).Do() if err != nil { return fmt.Errorf("error listing buckets: %v", err) } for _, it := range buckets.Items { delete(needBucket, it.Name) } if len(needBucket) > 0 { if Verbose { d.Printf("Need to create buckets: %v", needBucket) } var waitBucket sync.WaitGroup var bucketErr error for name := range needBucket { select { case <-ctx.Done(): return ctx.Err() default: } name := name waitBucket.Add(1) go func() { defer waitBucket.Done() if Verbose { d.Printf("Creating bucket %s", name) } b, err := storageService.Buckets.Insert(d.Conf.Project, &storage.Bucket{ Id: name, Name: name, }).Do() if err != nil && bucketErr == nil { bucketErr = fmt.Errorf("error creating bucket %s: %v", name, err) return } if Verbose { d.Printf("Created bucket %s: %+v", name, b) } }() } waitBucket.Wait() if bucketErr != nil { return bucketErr } } d.Conf.configDir = path.Join(projBucket, configDir) d.Conf.blobDir = path.Join(projBucket, "blobs") return nil }
go
func (d *Deployer) setBuckets(ctx context.Context, storageService *storage.Service) error { projBucket := d.Conf.Project + "-camlistore" needBucket := map[string]bool{ projBucket: true, } buckets, err := storageService.Buckets.List(d.Conf.Project).Do() if err != nil { return fmt.Errorf("error listing buckets: %v", err) } for _, it := range buckets.Items { delete(needBucket, it.Name) } if len(needBucket) > 0 { if Verbose { d.Printf("Need to create buckets: %v", needBucket) } var waitBucket sync.WaitGroup var bucketErr error for name := range needBucket { select { case <-ctx.Done(): return ctx.Err() default: } name := name waitBucket.Add(1) go func() { defer waitBucket.Done() if Verbose { d.Printf("Creating bucket %s", name) } b, err := storageService.Buckets.Insert(d.Conf.Project, &storage.Bucket{ Id: name, Name: name, }).Do() if err != nil && bucketErr == nil { bucketErr = fmt.Errorf("error creating bucket %s: %v", name, err) return } if Verbose { d.Printf("Created bucket %s: %+v", name, b) } }() } waitBucket.Wait() if bucketErr != nil { return bucketErr } } d.Conf.configDir = path.Join(projBucket, configDir) d.Conf.blobDir = path.Join(projBucket, "blobs") return nil }
[ "func", "(", "d", "*", "Deployer", ")", "setBuckets", "(", "ctx", "context", ".", "Context", ",", "storageService", "*", "storage", ".", "Service", ")", "error", "{", "projBucket", ":=", "d", ".", "Conf", ".", "Project", "+", "\"", "\"", "\n\n", "needBucket", ":=", "map", "[", "string", "]", "bool", "{", "projBucket", ":", "true", ",", "}", "\n\n", "buckets", ",", "err", ":=", "storageService", ".", "Buckets", ".", "List", "(", "d", ".", "Conf", ".", "Project", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "for", "_", ",", "it", ":=", "range", "buckets", ".", "Items", "{", "delete", "(", "needBucket", ",", "it", ".", "Name", ")", "\n", "}", "\n", "if", "len", "(", "needBucket", ")", ">", "0", "{", "if", "Verbose", "{", "d", ".", "Printf", "(", "\"", "\"", ",", "needBucket", ")", "\n", "}", "\n", "var", "waitBucket", "sync", ".", "WaitGroup", "\n", "var", "bucketErr", "error", "\n", "for", "name", ":=", "range", "needBucket", "{", "select", "{", "case", "<-", "ctx", ".", "Done", "(", ")", ":", "return", "ctx", ".", "Err", "(", ")", "\n", "default", ":", "}", "\n", "name", ":=", "name", "\n", "waitBucket", ".", "Add", "(", "1", ")", "\n", "go", "func", "(", ")", "{", "defer", "waitBucket", ".", "Done", "(", ")", "\n", "if", "Verbose", "{", "d", ".", "Printf", "(", "\"", "\"", ",", "name", ")", "\n", "}", "\n", "b", ",", "err", ":=", "storageService", ".", "Buckets", ".", "Insert", "(", "d", ".", "Conf", ".", "Project", ",", "&", "storage", ".", "Bucket", "{", "Id", ":", "name", ",", "Name", ":", "name", ",", "}", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "&&", "bucketErr", "==", "nil", "{", "bucketErr", "=", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "name", ",", "err", ")", "\n", "return", "\n", "}", "\n", "if", "Verbose", "{", "d", ".", "Printf", "(", "\"", "\"", ",", "name", ",", "b", ")", "\n", "}", "\n", "}", "(", ")", "\n", "}", "\n", "waitBucket", ".", "Wait", "(", ")", "\n", "if", "bucketErr", "!=", "nil", "{", "return", "bucketErr", "\n", "}", "\n", "}", "\n\n", "d", ".", "Conf", ".", "configDir", "=", "path", ".", "Join", "(", "projBucket", ",", "configDir", ")", "\n", "d", ".", "Conf", ".", "blobDir", "=", "path", ".", "Join", "(", "projBucket", ",", "\"", "\"", ")", "\n", "return", "nil", "\n", "}" ]
// setBuckets defines the buckets needed by the instance and creates them.
[ "setBuckets", "defines", "the", "buckets", "needed", "by", "the", "instance", "and", "creates", "them", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/deploy/gce/deploy.go#L708-L763
train
perkeep/perkeep
pkg/deploy/gce/deploy.go
setFirewall
func (d *Deployer) setFirewall(ctx context.Context, computeService *compute.Service) error { defaultNet, err := computeService.Networks.Get(d.Conf.Project, "default").Do() if err != nil { return fmt.Errorf("error getting default network: %v", err) } needRules := map[string]compute.Firewall{ "default-allow-http": { Name: "default-allow-http", SourceRanges: []string{"0.0.0.0/0"}, SourceTags: []string{"http-server"}, Allowed: []*compute.FirewallAllowed{{IPProtocol: "tcp", Ports: []string{"80"}}}, Network: defaultNet.SelfLink, }, "default-allow-https": { Name: "default-allow-https", SourceRanges: []string{"0.0.0.0/0"}, SourceTags: []string{"https-server"}, Allowed: []*compute.FirewallAllowed{{IPProtocol: "tcp", Ports: []string{"443"}}}, Network: defaultNet.SelfLink, }, } rules, err := computeService.Firewalls.List(d.Conf.Project).Do() if err != nil { return fmt.Errorf("error listing rules: %v", err) } for _, it := range rules.Items { delete(needRules, it.Name) } if len(needRules) == 0 { return nil } if Verbose { d.Printf("Need to create rules: %v", needRules) } var wg syncutil.Group for name, rule := range needRules { select { case <-ctx.Done(): return ctx.Err() default: } name, rule := name, rule wg.Go(func() error { if Verbose { d.Printf("Creating rule %s", name) } r, err := computeService.Firewalls.Insert(d.Conf.Project, &rule).Do() if err != nil { return fmt.Errorf("error creating rule %s: %v", name, err) } if Verbose { d.Printf("Created rule %s: %+v", name, r) } return nil }) } return wg.Err() }
go
func (d *Deployer) setFirewall(ctx context.Context, computeService *compute.Service) error { defaultNet, err := computeService.Networks.Get(d.Conf.Project, "default").Do() if err != nil { return fmt.Errorf("error getting default network: %v", err) } needRules := map[string]compute.Firewall{ "default-allow-http": { Name: "default-allow-http", SourceRanges: []string{"0.0.0.0/0"}, SourceTags: []string{"http-server"}, Allowed: []*compute.FirewallAllowed{{IPProtocol: "tcp", Ports: []string{"80"}}}, Network: defaultNet.SelfLink, }, "default-allow-https": { Name: "default-allow-https", SourceRanges: []string{"0.0.0.0/0"}, SourceTags: []string{"https-server"}, Allowed: []*compute.FirewallAllowed{{IPProtocol: "tcp", Ports: []string{"443"}}}, Network: defaultNet.SelfLink, }, } rules, err := computeService.Firewalls.List(d.Conf.Project).Do() if err != nil { return fmt.Errorf("error listing rules: %v", err) } for _, it := range rules.Items { delete(needRules, it.Name) } if len(needRules) == 0 { return nil } if Verbose { d.Printf("Need to create rules: %v", needRules) } var wg syncutil.Group for name, rule := range needRules { select { case <-ctx.Done(): return ctx.Err() default: } name, rule := name, rule wg.Go(func() error { if Verbose { d.Printf("Creating rule %s", name) } r, err := computeService.Firewalls.Insert(d.Conf.Project, &rule).Do() if err != nil { return fmt.Errorf("error creating rule %s: %v", name, err) } if Verbose { d.Printf("Created rule %s: %+v", name, r) } return nil }) } return wg.Err() }
[ "func", "(", "d", "*", "Deployer", ")", "setFirewall", "(", "ctx", "context", ".", "Context", ",", "computeService", "*", "compute", ".", "Service", ")", "error", "{", "defaultNet", ",", "err", ":=", "computeService", ".", "Networks", ".", "Get", "(", "d", ".", "Conf", ".", "Project", ",", "\"", "\"", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "needRules", ":=", "map", "[", "string", "]", "compute", ".", "Firewall", "{", "\"", "\"", ":", "{", "Name", ":", "\"", "\"", ",", "SourceRanges", ":", "[", "]", "string", "{", "\"", "\"", "}", ",", "SourceTags", ":", "[", "]", "string", "{", "\"", "\"", "}", ",", "Allowed", ":", "[", "]", "*", "compute", ".", "FirewallAllowed", "{", "{", "IPProtocol", ":", "\"", "\"", ",", "Ports", ":", "[", "]", "string", "{", "\"", "\"", "}", "}", "}", ",", "Network", ":", "defaultNet", ".", "SelfLink", ",", "}", ",", "\"", "\"", ":", "{", "Name", ":", "\"", "\"", ",", "SourceRanges", ":", "[", "]", "string", "{", "\"", "\"", "}", ",", "SourceTags", ":", "[", "]", "string", "{", "\"", "\"", "}", ",", "Allowed", ":", "[", "]", "*", "compute", ".", "FirewallAllowed", "{", "{", "IPProtocol", ":", "\"", "\"", ",", "Ports", ":", "[", "]", "string", "{", "\"", "\"", "}", "}", "}", ",", "Network", ":", "defaultNet", ".", "SelfLink", ",", "}", ",", "}", "\n\n", "rules", ",", "err", ":=", "computeService", ".", "Firewalls", ".", "List", "(", "d", ".", "Conf", ".", "Project", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "for", "_", ",", "it", ":=", "range", "rules", ".", "Items", "{", "delete", "(", "needRules", ",", "it", ".", "Name", ")", "\n", "}", "\n", "if", "len", "(", "needRules", ")", "==", "0", "{", "return", "nil", "\n", "}", "\n\n", "if", "Verbose", "{", "d", ".", "Printf", "(", "\"", "\"", ",", "needRules", ")", "\n", "}", "\n", "var", "wg", "syncutil", ".", "Group", "\n", "for", "name", ",", "rule", ":=", "range", "needRules", "{", "select", "{", "case", "<-", "ctx", ".", "Done", "(", ")", ":", "return", "ctx", ".", "Err", "(", ")", "\n", "default", ":", "}", "\n", "name", ",", "rule", ":=", "name", ",", "rule", "\n", "wg", ".", "Go", "(", "func", "(", ")", "error", "{", "if", "Verbose", "{", "d", ".", "Printf", "(", "\"", "\"", ",", "name", ")", "\n", "}", "\n", "r", ",", "err", ":=", "computeService", ".", "Firewalls", ".", "Insert", "(", "d", ".", "Conf", ".", "Project", ",", "&", "rule", ")", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "name", ",", "err", ")", "\n", "}", "\n", "if", "Verbose", "{", "d", ".", "Printf", "(", "\"", "\"", ",", "name", ",", "r", ")", "\n", "}", "\n", "return", "nil", "\n", "}", ")", "\n", "}", "\n", "return", "wg", ".", "Err", "(", ")", "\n", "}" ]
// setFirewall adds the firewall rules needed for ports 80 & 433 to the default network.
[ "setFirewall", "adds", "the", "firewall", "rules", "needed", "for", "ports", "80", "&", "433", "to", "the", "default", "network", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/deploy/gce/deploy.go#L766-L826
train
perkeep/perkeep
pkg/webserver/webserver.go
loadX509KeyPair
func loadX509KeyPair(certFile, keyFile string) (cert tls.Certificate, err error) { certPEMBlock, err := wkfs.ReadFile(certFile) if err != nil { return } keyPEMBlock, err := wkfs.ReadFile(keyFile) if err != nil { return } return tls.X509KeyPair(certPEMBlock, keyPEMBlock) }
go
func loadX509KeyPair(certFile, keyFile string) (cert tls.Certificate, err error) { certPEMBlock, err := wkfs.ReadFile(certFile) if err != nil { return } keyPEMBlock, err := wkfs.ReadFile(keyFile) if err != nil { return } return tls.X509KeyPair(certPEMBlock, keyPEMBlock) }
[ "func", "loadX509KeyPair", "(", "certFile", ",", "keyFile", "string", ")", "(", "cert", "tls", ".", "Certificate", ",", "err", "error", ")", "{", "certPEMBlock", ",", "err", ":=", "wkfs", ".", "ReadFile", "(", "certFile", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\n", "}", "\n", "keyPEMBlock", ",", "err", ":=", "wkfs", ".", "ReadFile", "(", "keyFile", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\n", "}", "\n", "return", "tls", ".", "X509KeyPair", "(", "certPEMBlock", ",", "keyPEMBlock", ")", "\n", "}" ]
// loadX509KeyPair is a copy of tls.LoadX509KeyPair but using wkfs.
[ "loadX509KeyPair", "is", "a", "copy", "of", "tls", ".", "LoadX509KeyPair", "but", "using", "wkfs", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/webserver/webserver.go#L277-L287
train
perkeep/perkeep
internal/httputil/httputil.go
RequestTargetPort
func RequestTargetPort(req *http.Request) int { _, portStr, err := net.SplitHostPort(req.Host) if err == nil && portStr != "" { port, err := strconv.ParseInt(portStr, 0, 64) if err == nil { return int(port) } } if req.TLS != nil { return 443 } return 80 }
go
func RequestTargetPort(req *http.Request) int { _, portStr, err := net.SplitHostPort(req.Host) if err == nil && portStr != "" { port, err := strconv.ParseInt(portStr, 0, 64) if err == nil { return int(port) } } if req.TLS != nil { return 443 } return 80 }
[ "func", "RequestTargetPort", "(", "req", "*", "http", ".", "Request", ")", "int", "{", "_", ",", "portStr", ",", "err", ":=", "net", ".", "SplitHostPort", "(", "req", ".", "Host", ")", "\n", "if", "err", "==", "nil", "&&", "portStr", "!=", "\"", "\"", "{", "port", ",", "err", ":=", "strconv", ".", "ParseInt", "(", "portStr", ",", "0", ",", "64", ")", "\n", "if", "err", "==", "nil", "{", "return", "int", "(", "port", ")", "\n", "}", "\n", "}", "\n", "if", "req", ".", "TLS", "!=", "nil", "{", "return", "443", "\n", "}", "\n", "return", "80", "\n", "}" ]
// RequestTargetPort returns the port targeted by the client // in req. If not present, it returns 80, or 443 if TLS is used.
[ "RequestTargetPort", "returns", "the", "port", "targeted", "by", "the", "client", "in", "req", ".", "If", "not", "present", "it", "returns", "80", "or", "443", "if", "TLS", "is", "used", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/httputil/httputil.go#L153-L165
train
perkeep/perkeep
internal/httputil/httputil.go
RecoverJSON
func RecoverJSON(rw http.ResponseWriter, req *http.Request) { e := recover() if e == nil { return } ServeJSONError(rw, e) }
go
func RecoverJSON(rw http.ResponseWriter, req *http.Request) { e := recover() if e == nil { return } ServeJSONError(rw, e) }
[ "func", "RecoverJSON", "(", "rw", "http", ".", "ResponseWriter", ",", "req", "*", "http", ".", "Request", ")", "{", "e", ":=", "recover", "(", ")", "\n", "if", "e", "==", "nil", "{", "return", "\n", "}", "\n", "ServeJSONError", "(", "rw", ",", "e", ")", "\n", "}" ]
// RecoverJSON is like Recover but returns with a JSON response.
[ "RecoverJSON", "is", "like", "Recover", "but", "returns", "with", "a", "JSON", "response", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/httputil/httputil.go#L182-L188
train
perkeep/perkeep
internal/httputil/httputil.go
MustGetBlobRef
func MustGetBlobRef(req *http.Request, param string) blob.Ref { br, ok := blob.Parse(MustGet(req, param)) if !ok { panic(InvalidParameterError(param)) } return br }
go
func MustGetBlobRef(req *http.Request, param string) blob.Ref { br, ok := blob.Parse(MustGet(req, param)) if !ok { panic(InvalidParameterError(param)) } return br }
[ "func", "MustGetBlobRef", "(", "req", "*", "http", ".", "Request", ",", "param", "string", ")", "blob", ".", "Ref", "{", "br", ",", "ok", ":=", "blob", ".", "Parse", "(", "MustGet", "(", "req", ",", "param", ")", ")", "\n", "if", "!", "ok", "{", "panic", "(", "InvalidParameterError", "(", "param", ")", ")", "\n", "}", "\n", "return", "br", "\n", "}" ]
// MustGetBlobRef returns a non-nil BlobRef from req, as given by param. // If it doesn't, it panics with a value understood by Recover or RecoverJSON.
[ "MustGetBlobRef", "returns", "a", "non", "-", "nil", "BlobRef", "from", "req", "as", "given", "by", "param", ".", "If", "it", "doesn", "t", "it", "panics", "with", "a", "value", "understood", "by", "Recover", "or", "RecoverJSON", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/httputil/httputil.go#L236-L242
train
perkeep/perkeep
internal/httputil/httputil.go
OptionalInt
func OptionalInt(req *http.Request, param string) int { v := req.FormValue(param) if v == "" { return 0 } i, err := strconv.Atoi(v) if err != nil { panic(InvalidParameterError(param)) } return i }
go
func OptionalInt(req *http.Request, param string) int { v := req.FormValue(param) if v == "" { return 0 } i, err := strconv.Atoi(v) if err != nil { panic(InvalidParameterError(param)) } return i }
[ "func", "OptionalInt", "(", "req", "*", "http", ".", "Request", ",", "param", "string", ")", "int", "{", "v", ":=", "req", ".", "FormValue", "(", "param", ")", "\n", "if", "v", "==", "\"", "\"", "{", "return", "0", "\n", "}", "\n", "i", ",", "err", ":=", "strconv", ".", "Atoi", "(", "v", ")", "\n", "if", "err", "!=", "nil", "{", "panic", "(", "InvalidParameterError", "(", "param", ")", ")", "\n", "}", "\n", "return", "i", "\n", "}" ]
// OptionalInt returns the integer in req given by param, or 0 if not present. // If the form value is not an integer, it panics with a a value understood by Recover or RecoverJSON.
[ "OptionalInt", "returns", "the", "integer", "in", "req", "given", "by", "param", "or", "0", "if", "not", "present", ".", "If", "the", "form", "value", "is", "not", "an", "integer", "it", "panics", "with", "a", "a", "value", "understood", "by", "Recover", "or", "RecoverJSON", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/httputil/httputil.go#L246-L256
train
perkeep/perkeep
pkg/server/download.go
fileInfoPacked
func fileInfoPacked(ctx context.Context, sh *search.Handler, src blob.Fetcher, r *http.Request, file blob.Ref) (packFileInfo fileInfo, ok bool) { if sh == nil { return fileInfo{whyNot: "no search"}, false } wf, ok := src.(blobserver.WholeRefFetcher) if !ok { return fileInfo{whyNot: "fetcher type"}, false } if r != nil && r.Header.Get("Range") != "" { // TODO: not handled yet. Maybe not even important, // considering rarity. return fileInfo{whyNot: "range header"}, false } des, err := sh.Describe(ctx, &search.DescribeRequest{BlobRef: file}) if err != nil { log.Printf("ui: fileInfoPacked: skipping fast path due to error from search: %v", err) return fileInfo{whyNot: "search error"}, false } db, ok := des.Meta[file.String()] if !ok || db.File == nil { return fileInfo{whyNot: "search index doesn't know file"}, false } fi := db.File if !fi.WholeRef.Valid() { return fileInfo{whyNot: "no wholeref from search index"}, false } offset := int64(0) rc, wholeSize, err := wf.OpenWholeRef(fi.WholeRef, offset) if err == os.ErrNotExist { return fileInfo{whyNot: "WholeRefFetcher returned ErrNotexist"}, false } if wholeSize != fi.Size { log.Printf("ui: fileInfoPacked: OpenWholeRef size %d != index size %d; ignoring fast path", wholeSize, fi.Size) return fileInfo{whyNot: "WholeRefFetcher and index don't agree"}, false } if err != nil { log.Printf("ui: fileInfoPacked: skipping fast path due to error from WholeRefFetcher (%T): %v", src, err) return fileInfo{whyNot: "WholeRefFetcher error"}, false } modtime := fi.ModTime if modtime.IsAnyZero() { modtime = fi.Time } // TODO(mpl): it'd be nicer to get the FileMode from the describe response, // instead of having to fetch the file schema again, but we don't index the // FileMode for now, so it's not just a matter of adding the FileMode to // camtypes.FileInfo fr, err := schema.NewFileReader(ctx, src, file) fr.Close() if err != nil { return fileInfo{whyNot: fmt.Sprintf("cannot open a file reader: %v", err)}, false } return fileInfo{ mime: fi.MIMEType, name: fi.FileName, size: fi.Size, modtime: modtime.Time(), mode: fr.FileMode(), rs: readerutil.NewFakeSeeker(rc, fi.Size-offset), close: rc.Close, }, true }
go
func fileInfoPacked(ctx context.Context, sh *search.Handler, src blob.Fetcher, r *http.Request, file blob.Ref) (packFileInfo fileInfo, ok bool) { if sh == nil { return fileInfo{whyNot: "no search"}, false } wf, ok := src.(blobserver.WholeRefFetcher) if !ok { return fileInfo{whyNot: "fetcher type"}, false } if r != nil && r.Header.Get("Range") != "" { // TODO: not handled yet. Maybe not even important, // considering rarity. return fileInfo{whyNot: "range header"}, false } des, err := sh.Describe(ctx, &search.DescribeRequest{BlobRef: file}) if err != nil { log.Printf("ui: fileInfoPacked: skipping fast path due to error from search: %v", err) return fileInfo{whyNot: "search error"}, false } db, ok := des.Meta[file.String()] if !ok || db.File == nil { return fileInfo{whyNot: "search index doesn't know file"}, false } fi := db.File if !fi.WholeRef.Valid() { return fileInfo{whyNot: "no wholeref from search index"}, false } offset := int64(0) rc, wholeSize, err := wf.OpenWholeRef(fi.WholeRef, offset) if err == os.ErrNotExist { return fileInfo{whyNot: "WholeRefFetcher returned ErrNotexist"}, false } if wholeSize != fi.Size { log.Printf("ui: fileInfoPacked: OpenWholeRef size %d != index size %d; ignoring fast path", wholeSize, fi.Size) return fileInfo{whyNot: "WholeRefFetcher and index don't agree"}, false } if err != nil { log.Printf("ui: fileInfoPacked: skipping fast path due to error from WholeRefFetcher (%T): %v", src, err) return fileInfo{whyNot: "WholeRefFetcher error"}, false } modtime := fi.ModTime if modtime.IsAnyZero() { modtime = fi.Time } // TODO(mpl): it'd be nicer to get the FileMode from the describe response, // instead of having to fetch the file schema again, but we don't index the // FileMode for now, so it's not just a matter of adding the FileMode to // camtypes.FileInfo fr, err := schema.NewFileReader(ctx, src, file) fr.Close() if err != nil { return fileInfo{whyNot: fmt.Sprintf("cannot open a file reader: %v", err)}, false } return fileInfo{ mime: fi.MIMEType, name: fi.FileName, size: fi.Size, modtime: modtime.Time(), mode: fr.FileMode(), rs: readerutil.NewFakeSeeker(rc, fi.Size-offset), close: rc.Close, }, true }
[ "func", "fileInfoPacked", "(", "ctx", "context", ".", "Context", ",", "sh", "*", "search", ".", "Handler", ",", "src", "blob", ".", "Fetcher", ",", "r", "*", "http", ".", "Request", ",", "file", "blob", ".", "Ref", ")", "(", "packFileInfo", "fileInfo", ",", "ok", "bool", ")", "{", "if", "sh", "==", "nil", "{", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n", "wf", ",", "ok", ":=", "src", ".", "(", "blobserver", ".", "WholeRefFetcher", ")", "\n", "if", "!", "ok", "{", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n", "if", "r", "!=", "nil", "&&", "r", ".", "Header", ".", "Get", "(", "\"", "\"", ")", "!=", "\"", "\"", "{", "// TODO: not handled yet. Maybe not even important,", "// considering rarity.", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n", "des", ",", "err", ":=", "sh", ".", "Describe", "(", "ctx", ",", "&", "search", ".", "DescribeRequest", "{", "BlobRef", ":", "file", "}", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "err", ")", "\n", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n", "db", ",", "ok", ":=", "des", ".", "Meta", "[", "file", ".", "String", "(", ")", "]", "\n", "if", "!", "ok", "||", "db", ".", "File", "==", "nil", "{", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n", "fi", ":=", "db", ".", "File", "\n", "if", "!", "fi", ".", "WholeRef", ".", "Valid", "(", ")", "{", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n\n", "offset", ":=", "int64", "(", "0", ")", "\n", "rc", ",", "wholeSize", ",", "err", ":=", "wf", ".", "OpenWholeRef", "(", "fi", ".", "WholeRef", ",", "offset", ")", "\n", "if", "err", "==", "os", ".", "ErrNotExist", "{", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n", "if", "wholeSize", "!=", "fi", ".", "Size", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "wholeSize", ",", "fi", ".", "Size", ")", "\n", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "src", ",", "err", ")", "\n", "return", "fileInfo", "{", "whyNot", ":", "\"", "\"", "}", ",", "false", "\n", "}", "\n", "modtime", ":=", "fi", ".", "ModTime", "\n", "if", "modtime", ".", "IsAnyZero", "(", ")", "{", "modtime", "=", "fi", ".", "Time", "\n", "}", "\n", "// TODO(mpl): it'd be nicer to get the FileMode from the describe response,", "// instead of having to fetch the file schema again, but we don't index the", "// FileMode for now, so it's not just a matter of adding the FileMode to", "// camtypes.FileInfo", "fr", ",", "err", ":=", "schema", ".", "NewFileReader", "(", "ctx", ",", "src", ",", "file", ")", "\n", "fr", ".", "Close", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fileInfo", "{", "whyNot", ":", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "err", ")", "}", ",", "false", "\n", "}", "\n", "return", "fileInfo", "{", "mime", ":", "fi", ".", "MIMEType", ",", "name", ":", "fi", ".", "FileName", ",", "size", ":", "fi", ".", "Size", ",", "modtime", ":", "modtime", ".", "Time", "(", ")", ",", "mode", ":", "fr", ".", "FileMode", "(", ")", ",", "rs", ":", "readerutil", ".", "NewFakeSeeker", "(", "rc", ",", "fi", ".", "Size", "-", "offset", ")", ",", "close", ":", "rc", ".", "Close", ",", "}", ",", "true", "\n", "}" ]
// Fast path for blobpacked.
[ "Fast", "path", "for", "blobpacked", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/server/download.go#L198-L260
train
perkeep/perkeep
pkg/server/download.go
isText
func isText(rs io.ReadSeeker) (ok bool, err error) { defer func() { if _, seekErr := rs.Seek(0, io.SeekStart); seekErr != nil { if err == nil { err = seekErr } } }() var buf bytes.Buffer if _, err := io.CopyN(&buf, rs, 1e6); err != nil { if err != io.EOF { return false, err } } return utf8.Valid(buf.Bytes()), nil }
go
func isText(rs io.ReadSeeker) (ok bool, err error) { defer func() { if _, seekErr := rs.Seek(0, io.SeekStart); seekErr != nil { if err == nil { err = seekErr } } }() var buf bytes.Buffer if _, err := io.CopyN(&buf, rs, 1e6); err != nil { if err != io.EOF { return false, err } } return utf8.Valid(buf.Bytes()), nil }
[ "func", "isText", "(", "rs", "io", ".", "ReadSeeker", ")", "(", "ok", "bool", ",", "err", "error", ")", "{", "defer", "func", "(", ")", "{", "if", "_", ",", "seekErr", ":=", "rs", ".", "Seek", "(", "0", ",", "io", ".", "SeekStart", ")", ";", "seekErr", "!=", "nil", "{", "if", "err", "==", "nil", "{", "err", "=", "seekErr", "\n", "}", "\n", "}", "\n", "}", "(", ")", "\n", "var", "buf", "bytes", ".", "Buffer", "\n", "if", "_", ",", "err", ":=", "io", ".", "CopyN", "(", "&", "buf", ",", "rs", ",", "1e6", ")", ";", "err", "!=", "nil", "{", "if", "err", "!=", "io", ".", "EOF", "{", "return", "false", ",", "err", "\n", "}", "\n", "}", "\n", "return", "utf8", ".", "Valid", "(", "buf", ".", "Bytes", "(", ")", ")", ",", "nil", "\n", "}" ]
// isText reports whether the first MB read from rs is valid UTF-8 text.
[ "isText", "reports", "whether", "the", "first", "MB", "read", "from", "rs", "is", "valid", "UTF", "-", "8", "text", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/server/download.go#L375-L390
train
perkeep/perkeep
pkg/server/download.go
statFiles
func (dh *DownloadHandler) statFiles(refs []blob.Ref) error { statter, ok := dh.Fetcher.(blobserver.BlobStatter) if !ok { return fmt.Errorf("DownloadHandler.Fetcher %T is not a BlobStatter", dh.Fetcher) } statted := make(map[blob.Ref]bool) err := statter.StatBlobs(context.TODO(), refs, func(sb blob.SizedRef) error { statted[sb.Ref] = true return nil }) if err != nil { log.Printf("Error statting blob files for download archive: %v", err) return fmt.Errorf("error looking for files") } for _, v := range refs { if _, ok := statted[v]; !ok { return fmt.Errorf("%q was not found", v) } } return nil }
go
func (dh *DownloadHandler) statFiles(refs []blob.Ref) error { statter, ok := dh.Fetcher.(blobserver.BlobStatter) if !ok { return fmt.Errorf("DownloadHandler.Fetcher %T is not a BlobStatter", dh.Fetcher) } statted := make(map[blob.Ref]bool) err := statter.StatBlobs(context.TODO(), refs, func(sb blob.SizedRef) error { statted[sb.Ref] = true return nil }) if err != nil { log.Printf("Error statting blob files for download archive: %v", err) return fmt.Errorf("error looking for files") } for _, v := range refs { if _, ok := statted[v]; !ok { return fmt.Errorf("%q was not found", v) } } return nil }
[ "func", "(", "dh", "*", "DownloadHandler", ")", "statFiles", "(", "refs", "[", "]", "blob", ".", "Ref", ")", "error", "{", "statter", ",", "ok", ":=", "dh", ".", "Fetcher", ".", "(", "blobserver", ".", "BlobStatter", ")", "\n", "if", "!", "ok", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "dh", ".", "Fetcher", ")", "\n", "}", "\n", "statted", ":=", "make", "(", "map", "[", "blob", ".", "Ref", "]", "bool", ")", "\n\n", "err", ":=", "statter", ".", "StatBlobs", "(", "context", ".", "TODO", "(", ")", ",", "refs", ",", "func", "(", "sb", "blob", ".", "SizedRef", ")", "error", "{", "statted", "[", "sb", ".", "Ref", "]", "=", "true", "\n", "return", "nil", "\n", "}", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "err", ")", "\n", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ")", "\n", "}", "\n", "for", "_", ",", "v", ":=", "range", "refs", "{", "if", "_", ",", "ok", ":=", "statted", "[", "v", "]", ";", "!", "ok", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "v", ")", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// statFiles stats the given refs and returns an error if any one of them is not // found. // It is the responsibility of the caller to check that dh.Fetcher is a // blobserver.BlobStatter.
[ "statFiles", "stats", "the", "given", "refs", "and", "returns", "an", "error", "if", "any", "one", "of", "them", "is", "not", "found", ".", "It", "is", "the", "responsibility", "of", "the", "caller", "to", "check", "that", "dh", ".", "Fetcher", "is", "a", "blobserver", ".", "BlobStatter", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/server/download.go#L396-L417
train
perkeep/perkeep
pkg/server/download.go
serveZip
func (dh *DownloadHandler) serveZip(w http.ResponseWriter, r *http.Request) { ctx := r.Context() if r.Method != "POST" { http.Error(w, "Invalid download method", http.StatusBadRequest) return } filesValue := r.FormValue("files") if filesValue == "" { http.Error(w, "No file blobRefs specified", http.StatusBadRequest) return } files := strings.Split(filesValue, ",") var refs []blob.Ref for _, file := range files { br, ok := blob.Parse(file) if !ok { http.Error(w, fmt.Sprintf("%q is not a valid blobRef", file), http.StatusBadRequest) return } refs = append(refs, br) } // We check as many things as we can before writing the zip, because // once we start sending a response we can't http.Error anymore. var allRefs map[blob.Ref]string _, ok := (dh.Fetcher).(*cacher.CachingFetcher) if ok { // If we have a caching fetcher, allRefs and dh.pathByRef are populated with all // the input refs plus their children, so we don't have to redo later the recursing // work that we're alreading doing in checkFiles. dh.pathByRef = make(map[blob.Ref]string, len(refs)) err := dh.checkFiles(ctx, "", refs) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } allRefs = dh.pathByRef } else { _, ok := dh.Fetcher.(blobserver.BlobStatter) if ok { if err := dh.statFiles(refs); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } // If we don't have a cacher we don't know yet of all the possible // children refs, so allRefs is just the input refs, and the // children will be discovered on the fly, while building the zip archive. // This is the case even if we have a statter, because statFiles does not // recurse into directories. allRefs = make(map[blob.Ref]string, len(refs)) for _, v := range refs { allRefs[v] = "" } } h := w.Header() h.Set("Content-Type", "application/zip") zipName := "camli-download-" + time.Now().Format(downloadTimeLayout) + ".zip" h.Set("Content-Disposition", "attachment; filename="+zipName) zw := zip.NewWriter(w) dh.r = r for br := range allRefs { if err := dh.zipFile(ctx, "", br, zw); err != nil { log.Printf("error zipping %v: %v", br, err) // http.Error is of no use since we've already started sending a response panic(http.ErrAbortHandler) } } if err := zw.Close(); err != nil { log.Printf("error closing zip stream: %v", err) panic(http.ErrAbortHandler) } }
go
func (dh *DownloadHandler) serveZip(w http.ResponseWriter, r *http.Request) { ctx := r.Context() if r.Method != "POST" { http.Error(w, "Invalid download method", http.StatusBadRequest) return } filesValue := r.FormValue("files") if filesValue == "" { http.Error(w, "No file blobRefs specified", http.StatusBadRequest) return } files := strings.Split(filesValue, ",") var refs []blob.Ref for _, file := range files { br, ok := blob.Parse(file) if !ok { http.Error(w, fmt.Sprintf("%q is not a valid blobRef", file), http.StatusBadRequest) return } refs = append(refs, br) } // We check as many things as we can before writing the zip, because // once we start sending a response we can't http.Error anymore. var allRefs map[blob.Ref]string _, ok := (dh.Fetcher).(*cacher.CachingFetcher) if ok { // If we have a caching fetcher, allRefs and dh.pathByRef are populated with all // the input refs plus their children, so we don't have to redo later the recursing // work that we're alreading doing in checkFiles. dh.pathByRef = make(map[blob.Ref]string, len(refs)) err := dh.checkFiles(ctx, "", refs) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } allRefs = dh.pathByRef } else { _, ok := dh.Fetcher.(blobserver.BlobStatter) if ok { if err := dh.statFiles(refs); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } // If we don't have a cacher we don't know yet of all the possible // children refs, so allRefs is just the input refs, and the // children will be discovered on the fly, while building the zip archive. // This is the case even if we have a statter, because statFiles does not // recurse into directories. allRefs = make(map[blob.Ref]string, len(refs)) for _, v := range refs { allRefs[v] = "" } } h := w.Header() h.Set("Content-Type", "application/zip") zipName := "camli-download-" + time.Now().Format(downloadTimeLayout) + ".zip" h.Set("Content-Disposition", "attachment; filename="+zipName) zw := zip.NewWriter(w) dh.r = r for br := range allRefs { if err := dh.zipFile(ctx, "", br, zw); err != nil { log.Printf("error zipping %v: %v", br, err) // http.Error is of no use since we've already started sending a response panic(http.ErrAbortHandler) } } if err := zw.Close(); err != nil { log.Printf("error closing zip stream: %v", err) panic(http.ErrAbortHandler) } }
[ "func", "(", "dh", "*", "DownloadHandler", ")", "serveZip", "(", "w", "http", ".", "ResponseWriter", ",", "r", "*", "http", ".", "Request", ")", "{", "ctx", ":=", "r", ".", "Context", "(", ")", "\n", "if", "r", ".", "Method", "!=", "\"", "\"", "{", "http", ".", "Error", "(", "w", ",", "\"", "\"", ",", "http", ".", "StatusBadRequest", ")", "\n", "return", "\n", "}", "\n\n", "filesValue", ":=", "r", ".", "FormValue", "(", "\"", "\"", ")", "\n", "if", "filesValue", "==", "\"", "\"", "{", "http", ".", "Error", "(", "w", ",", "\"", "\"", ",", "http", ".", "StatusBadRequest", ")", "\n", "return", "\n", "}", "\n", "files", ":=", "strings", ".", "Split", "(", "filesValue", ",", "\"", "\"", ")", "\n\n", "var", "refs", "[", "]", "blob", ".", "Ref", "\n", "for", "_", ",", "file", ":=", "range", "files", "{", "br", ",", "ok", ":=", "blob", ".", "Parse", "(", "file", ")", "\n", "if", "!", "ok", "{", "http", ".", "Error", "(", "w", ",", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "file", ")", ",", "http", ".", "StatusBadRequest", ")", "\n", "return", "\n", "}", "\n", "refs", "=", "append", "(", "refs", ",", "br", ")", "\n", "}", "\n\n", "// We check as many things as we can before writing the zip, because", "// once we start sending a response we can't http.Error anymore.", "var", "allRefs", "map", "[", "blob", ".", "Ref", "]", "string", "\n", "_", ",", "ok", ":=", "(", "dh", ".", "Fetcher", ")", ".", "(", "*", "cacher", ".", "CachingFetcher", ")", "\n", "if", "ok", "{", "// If we have a caching fetcher, allRefs and dh.pathByRef are populated with all", "// the input refs plus their children, so we don't have to redo later the recursing", "// work that we're alreading doing in checkFiles.", "dh", ".", "pathByRef", "=", "make", "(", "map", "[", "blob", ".", "Ref", "]", "string", ",", "len", "(", "refs", ")", ")", "\n", "err", ":=", "dh", ".", "checkFiles", "(", "ctx", ",", "\"", "\"", ",", "refs", ")", "\n", "if", "err", "!=", "nil", "{", "http", ".", "Error", "(", "w", ",", "err", ".", "Error", "(", ")", ",", "http", ".", "StatusInternalServerError", ")", "\n", "return", "\n", "}", "\n", "allRefs", "=", "dh", ".", "pathByRef", "\n", "}", "else", "{", "_", ",", "ok", ":=", "dh", ".", "Fetcher", ".", "(", "blobserver", ".", "BlobStatter", ")", "\n", "if", "ok", "{", "if", "err", ":=", "dh", ".", "statFiles", "(", "refs", ")", ";", "err", "!=", "nil", "{", "http", ".", "Error", "(", "w", ",", "err", ".", "Error", "(", ")", ",", "http", ".", "StatusInternalServerError", ")", "\n", "return", "\n", "}", "\n", "}", "\n", "// If we don't have a cacher we don't know yet of all the possible", "// children refs, so allRefs is just the input refs, and the", "// children will be discovered on the fly, while building the zip archive.", "// This is the case even if we have a statter, because statFiles does not", "// recurse into directories.", "allRefs", "=", "make", "(", "map", "[", "blob", ".", "Ref", "]", "string", ",", "len", "(", "refs", ")", ")", "\n", "for", "_", ",", "v", ":=", "range", "refs", "{", "allRefs", "[", "v", "]", "=", "\"", "\"", "\n", "}", "\n", "}", "\n\n", "h", ":=", "w", ".", "Header", "(", ")", "\n", "h", ".", "Set", "(", "\"", "\"", ",", "\"", "\"", ")", "\n", "zipName", ":=", "\"", "\"", "+", "time", ".", "Now", "(", ")", ".", "Format", "(", "downloadTimeLayout", ")", "+", "\"", "\"", "\n", "h", ".", "Set", "(", "\"", "\"", ",", "\"", "\"", "+", "zipName", ")", "\n", "zw", ":=", "zip", ".", "NewWriter", "(", "w", ")", "\n", "dh", ".", "r", "=", "r", "\n", "for", "br", ":=", "range", "allRefs", "{", "if", "err", ":=", "dh", ".", "zipFile", "(", "ctx", ",", "\"", "\"", ",", "br", ",", "zw", ")", ";", "err", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "br", ",", "err", ")", "\n", "// http.Error is of no use since we've already started sending a response", "panic", "(", "http", ".", "ErrAbortHandler", ")", "\n", "}", "\n", "}", "\n", "if", "err", ":=", "zw", ".", "Close", "(", ")", ";", "err", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "err", ")", "\n", "panic", "(", "http", ".", "ErrAbortHandler", ")", "\n", "}", "\n", "}" ]
// serveZip creates a zip archive from the files provided as // ?files=sha1-foo,sha1-bar,... and serves it as the response.
[ "serveZip", "creates", "a", "zip", "archive", "from", "the", "files", "provided", "as", "?files", "=", "sha1", "-", "foo", "sha1", "-", "bar", "...", "and", "serves", "it", "as", "the", "response", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/server/download.go#L477-L552
train
perkeep/perkeep
pkg/client/enumerate.go
SimpleEnumerateBlobs
func (c *Client) SimpleEnumerateBlobs(ctx context.Context, ch chan<- blob.SizedRef) error { return c.EnumerateBlobsOpts(ctx, ch, EnumerateOpts{}) }
go
func (c *Client) SimpleEnumerateBlobs(ctx context.Context, ch chan<- blob.SizedRef) error { return c.EnumerateBlobsOpts(ctx, ch, EnumerateOpts{}) }
[ "func", "(", "c", "*", "Client", ")", "SimpleEnumerateBlobs", "(", "ctx", "context", ".", "Context", ",", "ch", "chan", "<-", "blob", ".", "SizedRef", ")", "error", "{", "return", "c", ".", "EnumerateBlobsOpts", "(", "ctx", ",", "ch", ",", "EnumerateOpts", "{", "}", ")", "\n", "}" ]
// SimpleEnumerateBlobs sends all blobs to the provided channel. // The channel will be closed, regardless of whether an error is returned.
[ "SimpleEnumerateBlobs", "sends", "all", "blobs", "to", "the", "provided", "channel", ".", "The", "channel", "will", "be", "closed", "regardless", "of", "whether", "an", "error", "is", "returned", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/client/enumerate.go#L39-L41
train
perkeep/perkeep
pkg/client/enumerate.go
EnumerateBlobsOpts
func (c *Client) EnumerateBlobsOpts(ctx context.Context, ch chan<- blob.SizedRef, opts EnumerateOpts) error { defer close(ch) if opts.After != "" && opts.MaxWait != 0 { return errors.New("client error: it's invalid to use enumerate After and MaxWaitSec together") } pfx, err := c.prefix() if err != nil { return err } error := func(msg string, e error) error { err := fmt.Errorf("client enumerate error: %s: %v", msg, e) c.printf("%v", err) return err } nSent := 0 keepGoing := true after := opts.After for keepGoing { waitSec := 0 if after == "" { if opts.MaxWait > 0 { waitSec = int(opts.MaxWait.Seconds()) if waitSec == 0 { waitSec = 1 } } } url_ := fmt.Sprintf("%s/camli/enumerate-blobs?after=%s&limit=%d&maxwaitsec=%d", pfx, url.QueryEscape(after), enumerateBatchSize, waitSec) req := c.newRequest(ctx, "GET", url_) resp, err := c.httpClient.Do(req) if err != nil { return error("http request", err) } json, err := c.responseJSONMap("enumerate-blobs", resp) if err != nil { return error("stat json parse error", err) } blobs, ok := getJSONMapArray(json, "blobs") if !ok { return error("response JSON didn't contain 'blobs' array", nil) } for _, v := range blobs { itemJSON, ok := v.(map[string]interface{}) if !ok { return error("item in 'blobs' was malformed", nil) } blobrefStr, ok := getJSONMapString(itemJSON, "blobRef") if !ok { return error("item in 'blobs' was missing string 'blobRef'", nil) } size, ok := getJSONMapUint32(itemJSON, "size") if !ok { return error("item in 'blobs' was missing numeric 'size'", nil) } br, ok := blob.Parse(blobrefStr) if !ok { return error("item in 'blobs' had invalid blobref.", nil) } select { case ch <- blob.SizedRef{Ref: br, Size: uint32(size)}: case <-ctx.Done(): return ctx.Err() } nSent++ if opts.Limit == nSent { // nSent can't be zero at this point, so opts.Limit being 0 // is okay. return nil } } after, keepGoing = getJSONMapString(json, "continueAfter") } return nil }
go
func (c *Client) EnumerateBlobsOpts(ctx context.Context, ch chan<- blob.SizedRef, opts EnumerateOpts) error { defer close(ch) if opts.After != "" && opts.MaxWait != 0 { return errors.New("client error: it's invalid to use enumerate After and MaxWaitSec together") } pfx, err := c.prefix() if err != nil { return err } error := func(msg string, e error) error { err := fmt.Errorf("client enumerate error: %s: %v", msg, e) c.printf("%v", err) return err } nSent := 0 keepGoing := true after := opts.After for keepGoing { waitSec := 0 if after == "" { if opts.MaxWait > 0 { waitSec = int(opts.MaxWait.Seconds()) if waitSec == 0 { waitSec = 1 } } } url_ := fmt.Sprintf("%s/camli/enumerate-blobs?after=%s&limit=%d&maxwaitsec=%d", pfx, url.QueryEscape(after), enumerateBatchSize, waitSec) req := c.newRequest(ctx, "GET", url_) resp, err := c.httpClient.Do(req) if err != nil { return error("http request", err) } json, err := c.responseJSONMap("enumerate-blobs", resp) if err != nil { return error("stat json parse error", err) } blobs, ok := getJSONMapArray(json, "blobs") if !ok { return error("response JSON didn't contain 'blobs' array", nil) } for _, v := range blobs { itemJSON, ok := v.(map[string]interface{}) if !ok { return error("item in 'blobs' was malformed", nil) } blobrefStr, ok := getJSONMapString(itemJSON, "blobRef") if !ok { return error("item in 'blobs' was missing string 'blobRef'", nil) } size, ok := getJSONMapUint32(itemJSON, "size") if !ok { return error("item in 'blobs' was missing numeric 'size'", nil) } br, ok := blob.Parse(blobrefStr) if !ok { return error("item in 'blobs' had invalid blobref.", nil) } select { case ch <- blob.SizedRef{Ref: br, Size: uint32(size)}: case <-ctx.Done(): return ctx.Err() } nSent++ if opts.Limit == nSent { // nSent can't be zero at this point, so opts.Limit being 0 // is okay. return nil } } after, keepGoing = getJSONMapString(json, "continueAfter") } return nil }
[ "func", "(", "c", "*", "Client", ")", "EnumerateBlobsOpts", "(", "ctx", "context", ".", "Context", ",", "ch", "chan", "<-", "blob", ".", "SizedRef", ",", "opts", "EnumerateOpts", ")", "error", "{", "defer", "close", "(", "ch", ")", "\n", "if", "opts", ".", "After", "!=", "\"", "\"", "&&", "opts", ".", "MaxWait", "!=", "0", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "pfx", ",", "err", ":=", "c", ".", "prefix", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "error", ":=", "func", "(", "msg", "string", ",", "e", "error", ")", "error", "{", "err", ":=", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "msg", ",", "e", ")", "\n", "c", ".", "printf", "(", "\"", "\"", ",", "err", ")", "\n", "return", "err", "\n", "}", "\n\n", "nSent", ":=", "0", "\n", "keepGoing", ":=", "true", "\n", "after", ":=", "opts", ".", "After", "\n", "for", "keepGoing", "{", "waitSec", ":=", "0", "\n", "if", "after", "==", "\"", "\"", "{", "if", "opts", ".", "MaxWait", ">", "0", "{", "waitSec", "=", "int", "(", "opts", ".", "MaxWait", ".", "Seconds", "(", ")", ")", "\n", "if", "waitSec", "==", "0", "{", "waitSec", "=", "1", "\n", "}", "\n", "}", "\n", "}", "\n", "url_", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "pfx", ",", "url", ".", "QueryEscape", "(", "after", ")", ",", "enumerateBatchSize", ",", "waitSec", ")", "\n", "req", ":=", "c", ".", "newRequest", "(", "ctx", ",", "\"", "\"", ",", "url_", ")", "\n", "resp", ",", "err", ":=", "c", ".", "httpClient", ".", "Do", "(", "req", ")", "\n", "if", "err", "!=", "nil", "{", "return", "error", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "json", ",", "err", ":=", "c", ".", "responseJSONMap", "(", "\"", "\"", ",", "resp", ")", "\n", "if", "err", "!=", "nil", "{", "return", "error", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n\n", "blobs", ",", "ok", ":=", "getJSONMapArray", "(", "json", ",", "\"", "\"", ")", "\n", "if", "!", "ok", "{", "return", "error", "(", "\"", "\"", ",", "nil", ")", "\n", "}", "\n", "for", "_", ",", "v", ":=", "range", "blobs", "{", "itemJSON", ",", "ok", ":=", "v", ".", "(", "map", "[", "string", "]", "interface", "{", "}", ")", "\n", "if", "!", "ok", "{", "return", "error", "(", "\"", "\"", ",", "nil", ")", "\n", "}", "\n", "blobrefStr", ",", "ok", ":=", "getJSONMapString", "(", "itemJSON", ",", "\"", "\"", ")", "\n", "if", "!", "ok", "{", "return", "error", "(", "\"", "\"", ",", "nil", ")", "\n", "}", "\n", "size", ",", "ok", ":=", "getJSONMapUint32", "(", "itemJSON", ",", "\"", "\"", ")", "\n", "if", "!", "ok", "{", "return", "error", "(", "\"", "\"", ",", "nil", ")", "\n", "}", "\n", "br", ",", "ok", ":=", "blob", ".", "Parse", "(", "blobrefStr", ")", "\n", "if", "!", "ok", "{", "return", "error", "(", "\"", "\"", ",", "nil", ")", "\n", "}", "\n", "select", "{", "case", "ch", "<-", "blob", ".", "SizedRef", "{", "Ref", ":", "br", ",", "Size", ":", "uint32", "(", "size", ")", "}", ":", "case", "<-", "ctx", ".", "Done", "(", ")", ":", "return", "ctx", ".", "Err", "(", ")", "\n", "}", "\n", "nSent", "++", "\n", "if", "opts", ".", "Limit", "==", "nSent", "{", "// nSent can't be zero at this point, so opts.Limit being 0", "// is okay.", "return", "nil", "\n", "}", "\n", "}", "\n\n", "after", ",", "keepGoing", "=", "getJSONMapString", "(", "json", ",", "\"", "\"", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// EnumerateBlobsOpts sends blobs to the provided channel, as directed by opts. // The channel will be closed, regardless of whether an error is returned.
[ "EnumerateBlobsOpts", "sends", "blobs", "to", "the", "provided", "channel", "as", "directed", "by", "opts", ".", "The", "channel", "will", "be", "closed", "regardless", "of", "whether", "an", "error", "is", "returned", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/client/enumerate.go#L62-L141
train
perkeep/perkeep
pkg/cmdmain/cmdmain.go
RegisterCommand
func RegisterCommand(mode string, makeCmd func(Flags *flag.FlagSet) CommandRunner) { RegisterMode(mode, makeCmd) asNewCommand[mode] = true }
go
func RegisterCommand(mode string, makeCmd func(Flags *flag.FlagSet) CommandRunner) { RegisterMode(mode, makeCmd) asNewCommand[mode] = true }
[ "func", "RegisterCommand", "(", "mode", "string", ",", "makeCmd", "func", "(", "Flags", "*", "flag", ".", "FlagSet", ")", "CommandRunner", ")", "{", "RegisterMode", "(", "mode", ",", "makeCmd", ")", "\n", "asNewCommand", "[", "mode", "]", "=", "true", "\n", "}" ]
// RegisterCommand adds a mode to the list of modes for the main command, and // also specifies that this mode is just another executable that runs as a new // cmdmain command. The executable to run is determined by the LookPath implementation // for this mode.
[ "RegisterCommand", "adds", "a", "mode", "to", "the", "list", "of", "modes", "for", "the", "main", "command", "and", "also", "specifies", "that", "this", "mode", "is", "just", "another", "executable", "that", "runs", "as", "a", "new", "cmdmain", "command", ".", "The", "executable", "to", "run", "is", "determined", "by", "the", "LookPath", "implementation", "for", "this", "mode", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/cmdmain/cmdmain.go#L149-L152
train
perkeep/perkeep
pkg/cmdmain/cmdmain.go
runAsNewCommand
func runAsNewCommand(cmd CommandRunner, mode string) { execCmd, ok := cmd.(ExecRunner) if !ok { panic(fmt.Sprintf("%v does not implement ExecRunner", mode)) } cmdPath, err := execCmd.LookPath() if err != nil { Errorf("Error: %v\n", err) Exit(2) } allArgs := shiftFlags(mode) if err := runExec(cmdPath, allArgs, newCopyEnv()); err != nil { panic(fmt.Sprintf("running %v should have ended with an os.Exit, and not leave us with that error: %v", cmdPath, err)) } }
go
func runAsNewCommand(cmd CommandRunner, mode string) { execCmd, ok := cmd.(ExecRunner) if !ok { panic(fmt.Sprintf("%v does not implement ExecRunner", mode)) } cmdPath, err := execCmd.LookPath() if err != nil { Errorf("Error: %v\n", err) Exit(2) } allArgs := shiftFlags(mode) if err := runExec(cmdPath, allArgs, newCopyEnv()); err != nil { panic(fmt.Sprintf("running %v should have ended with an os.Exit, and not leave us with that error: %v", cmdPath, err)) } }
[ "func", "runAsNewCommand", "(", "cmd", "CommandRunner", ",", "mode", "string", ")", "{", "execCmd", ",", "ok", ":=", "cmd", ".", "(", "ExecRunner", ")", "\n", "if", "!", "ok", "{", "panic", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "mode", ")", ")", "\n", "}", "\n", "cmdPath", ",", "err", ":=", "execCmd", ".", "LookPath", "(", ")", "\n", "if", "err", "!=", "nil", "{", "Errorf", "(", "\"", "\\n", "\"", ",", "err", ")", "\n", "Exit", "(", "2", ")", "\n", "}", "\n", "allArgs", ":=", "shiftFlags", "(", "mode", ")", "\n", "if", "err", ":=", "runExec", "(", "cmdPath", ",", "allArgs", ",", "newCopyEnv", "(", ")", ")", ";", "err", "!=", "nil", "{", "panic", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "cmdPath", ",", "err", ")", ")", "\n", "}", "\n", "}" ]
// runAsNewCommand runs the executable specified by cmd's LookPath, which means // cmd must implement the ExecRunner interface. The executable must be a binary of // a program that runs Main.
[ "runAsNewCommand", "runs", "the", "executable", "specified", "by", "cmd", "s", "LookPath", "which", "means", "cmd", "must", "implement", "the", "ExecRunner", "interface", ".", "The", "executable", "must", "be", "a", "binary", "of", "a", "program", "that", "runs", "Main", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/cmdmain/cmdmain.go#L341-L355
train
perkeep/perkeep
pkg/cmdmain/cmdmain.go
Printf
func Printf(format string, args ...interface{}) { if *FlagVerbose { fmt.Fprintf(Stderr, format, args...) } }
go
func Printf(format string, args ...interface{}) { if *FlagVerbose { fmt.Fprintf(Stderr, format, args...) } }
[ "func", "Printf", "(", "format", "string", ",", "args", "...", "interface", "{", "}", ")", "{", "if", "*", "FlagVerbose", "{", "fmt", ".", "Fprintf", "(", "Stderr", ",", "format", ",", "args", "...", ")", "\n", "}", "\n", "}" ]
// Printf prints to Stderr if FlagVerbose, and is silent otherwise.
[ "Printf", "prints", "to", "Stderr", "if", "FlagVerbose", "and", "is", "silent", "otherwise", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/cmdmain/cmdmain.go#L377-L381
train
perkeep/perkeep
server/perkeepd/logging.go
maybeSetupGoogleCloudLogging
func maybeSetupGoogleCloudLogging() io.Closer { if flagGCEProjectID == "" && flagGCELogName == "" && flagGCEJWTFile == "" { return types.NopCloser } if flagGCEProjectID == "" || flagGCELogName == "" || flagGCEJWTFile == "" { exitf("All of --gce_project_id, --gce_log_name, and --gce_jwt_file must be specified for logging on Google Cloud Logging.") } ctx := context.Background() logc, err := logging.NewClient(ctx, flagGCEProjectID, option.WithServiceAccountFile(flagGCEJWTFile)) if err != nil { exitf("Error creating GCL client: %v", err) } if err := logc.Ping(ctx); err != nil { exitf("Google logging client not ready (ping failed): %v", err) } logw := gclWriter{ severity: logging.Debug, logger: logc.Logger(flagGCELogName), } log.SetOutput(io.MultiWriter(os.Stderr, logw)) return logc }
go
func maybeSetupGoogleCloudLogging() io.Closer { if flagGCEProjectID == "" && flagGCELogName == "" && flagGCEJWTFile == "" { return types.NopCloser } if flagGCEProjectID == "" || flagGCELogName == "" || flagGCEJWTFile == "" { exitf("All of --gce_project_id, --gce_log_name, and --gce_jwt_file must be specified for logging on Google Cloud Logging.") } ctx := context.Background() logc, err := logging.NewClient(ctx, flagGCEProjectID, option.WithServiceAccountFile(flagGCEJWTFile)) if err != nil { exitf("Error creating GCL client: %v", err) } if err := logc.Ping(ctx); err != nil { exitf("Google logging client not ready (ping failed): %v", err) } logw := gclWriter{ severity: logging.Debug, logger: logc.Logger(flagGCELogName), } log.SetOutput(io.MultiWriter(os.Stderr, logw)) return logc }
[ "func", "maybeSetupGoogleCloudLogging", "(", ")", "io", ".", "Closer", "{", "if", "flagGCEProjectID", "==", "\"", "\"", "&&", "flagGCELogName", "==", "\"", "\"", "&&", "flagGCEJWTFile", "==", "\"", "\"", "{", "return", "types", ".", "NopCloser", "\n", "}", "\n", "if", "flagGCEProjectID", "==", "\"", "\"", "||", "flagGCELogName", "==", "\"", "\"", "||", "flagGCEJWTFile", "==", "\"", "\"", "{", "exitf", "(", "\"", "\"", ")", "\n", "}", "\n", "ctx", ":=", "context", ".", "Background", "(", ")", "\n", "logc", ",", "err", ":=", "logging", ".", "NewClient", "(", "ctx", ",", "flagGCEProjectID", ",", "option", ".", "WithServiceAccountFile", "(", "flagGCEJWTFile", ")", ")", "\n", "if", "err", "!=", "nil", "{", "exitf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "if", "err", ":=", "logc", ".", "Ping", "(", "ctx", ")", ";", "err", "!=", "nil", "{", "exitf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "logw", ":=", "gclWriter", "{", "severity", ":", "logging", ".", "Debug", ",", "logger", ":", "logc", ".", "Logger", "(", "flagGCELogName", ")", ",", "}", "\n", "log", ".", "SetOutput", "(", "io", ".", "MultiWriter", "(", "os", ".", "Stderr", ",", "logw", ")", ")", "\n", "return", "logc", "\n", "}" ]
// if a non-nil logging Client is returned, it should be closed before the // program terminates to flush any buffered log entries.
[ "if", "a", "non", "-", "nil", "logging", "Client", "is", "returned", "it", "should", "be", "closed", "before", "the", "program", "terminates", "to", "flush", "any", "buffered", "log", "entries", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/server/perkeepd/logging.go#L70-L92
train
perkeep/perkeep
server/perkeepd/logging.go
setupLogging
func setupLogging() io.Closer { if *flagSyslog && runtime.GOOS == "windows" { exitf("-syslog not available on windows") } if fn := setupLoggingSyslog; fn != nil { if flusher := fn(); flusher != nil { return flusher } } if env.OnGCE() { lw, err := gce.LogWriter() if err != nil { log.Fatalf("Error setting up logging: %v", err) } log.SetOutput(lw) return lw } return maybeSetupGoogleCloudLogging() }
go
func setupLogging() io.Closer { if *flagSyslog && runtime.GOOS == "windows" { exitf("-syslog not available on windows") } if fn := setupLoggingSyslog; fn != nil { if flusher := fn(); flusher != nil { return flusher } } if env.OnGCE() { lw, err := gce.LogWriter() if err != nil { log.Fatalf("Error setting up logging: %v", err) } log.SetOutput(lw) return lw } return maybeSetupGoogleCloudLogging() }
[ "func", "setupLogging", "(", ")", "io", ".", "Closer", "{", "if", "*", "flagSyslog", "&&", "runtime", ".", "GOOS", "==", "\"", "\"", "{", "exitf", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "fn", ":=", "setupLoggingSyslog", ";", "fn", "!=", "nil", "{", "if", "flusher", ":=", "fn", "(", ")", ";", "flusher", "!=", "nil", "{", "return", "flusher", "\n", "}", "\n", "}", "\n", "if", "env", ".", "OnGCE", "(", ")", "{", "lw", ",", "err", ":=", "gce", ".", "LogWriter", "(", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Fatalf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "log", ".", "SetOutput", "(", "lw", ")", "\n", "return", "lw", "\n", "}", "\n", "return", "maybeSetupGoogleCloudLogging", "(", ")", "\n", "}" ]
// setupLogging sets up logging and returns an io.Closer that flushes logs.
[ "setupLogging", "sets", "up", "logging", "and", "returns", "an", "io", ".", "Closer", "that", "flushes", "logs", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/server/perkeepd/logging.go#L99-L117
train
perkeep/perkeep
pkg/search/websocket.go
writePump
func (c *wsConn) writePump() { ticker := time.NewTicker(pingPeriod) defer func() { ticker.Stop() c.ws.Close() }() for { select { case message, ok := <-c.send: if !ok { c.write(websocket.CloseMessage, []byte{}) return } if err := c.write(websocket.TextMessage, message); err != nil { return } case <-ticker.C: if err := c.write(websocket.PingMessage, []byte{}); err != nil { return } } } }
go
func (c *wsConn) writePump() { ticker := time.NewTicker(pingPeriod) defer func() { ticker.Stop() c.ws.Close() }() for { select { case message, ok := <-c.send: if !ok { c.write(websocket.CloseMessage, []byte{}) return } if err := c.write(websocket.TextMessage, message); err != nil { return } case <-ticker.C: if err := c.write(websocket.PingMessage, []byte{}); err != nil { return } } } }
[ "func", "(", "c", "*", "wsConn", ")", "writePump", "(", ")", "{", "ticker", ":=", "time", ".", "NewTicker", "(", "pingPeriod", ")", "\n", "defer", "func", "(", ")", "{", "ticker", ".", "Stop", "(", ")", "\n", "c", ".", "ws", ".", "Close", "(", ")", "\n", "}", "(", ")", "\n", "for", "{", "select", "{", "case", "message", ",", "ok", ":=", "<-", "c", ".", "send", ":", "if", "!", "ok", "{", "c", ".", "write", "(", "websocket", ".", "CloseMessage", ",", "[", "]", "byte", "{", "}", ")", "\n", "return", "\n", "}", "\n", "if", "err", ":=", "c", ".", "write", "(", "websocket", ".", "TextMessage", ",", "message", ")", ";", "err", "!=", "nil", "{", "return", "\n", "}", "\n", "case", "<-", "ticker", ".", "C", ":", "if", "err", ":=", "c", ".", "write", "(", "websocket", ".", "PingMessage", ",", "[", "]", "byte", "{", "}", ")", ";", "err", "!=", "nil", "{", "return", "\n", "}", "\n", "}", "\n", "}", "\n", "}" ]
// writePump pumps messages from the hub to the websocket connection.
[ "writePump", "pumps", "messages", "from", "the", "hub", "to", "the", "websocket", "connection", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/search/websocket.go#L279-L301
train
perkeep/perkeep
pkg/blobserver/google/drive/service/service.go
Get
func (s *DriveService) Get(ctx context.Context, title string) (*client.File, error) { // TODO: use field selectors query := fmt.Sprintf("'%s' in parents and title = '%s'", s.parentID, title) req := s.apiservice.Files.List().Context(ctx).Q(query) files, err := req.Do() if err != nil { return nil, err } if len(files.Items) < 1 { return nil, os.ErrNotExist } return files.Items[0], nil }
go
func (s *DriveService) Get(ctx context.Context, title string) (*client.File, error) { // TODO: use field selectors query := fmt.Sprintf("'%s' in parents and title = '%s'", s.parentID, title) req := s.apiservice.Files.List().Context(ctx).Q(query) files, err := req.Do() if err != nil { return nil, err } if len(files.Items) < 1 { return nil, os.ErrNotExist } return files.Items[0], nil }
[ "func", "(", "s", "*", "DriveService", ")", "Get", "(", "ctx", "context", ".", "Context", ",", "title", "string", ")", "(", "*", "client", ".", "File", ",", "error", ")", "{", "// TODO: use field selectors", "query", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "s", ".", "parentID", ",", "title", ")", "\n", "req", ":=", "s", ".", "apiservice", ".", "Files", ".", "List", "(", ")", ".", "Context", "(", "ctx", ")", ".", "Q", "(", "query", ")", "\n", "files", ",", "err", ":=", "req", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "if", "len", "(", "files", ".", "Items", ")", "<", "1", "{", "return", "nil", ",", "os", ".", "ErrNotExist", "\n", "}", "\n", "return", "files", ".", "Items", "[", "0", "]", ",", "nil", "\n", "}" ]
// Get retrieves a file with its title equal to the provided title and a child of // the parentID as given to New. If not found, os.ErrNotExist is returned.
[ "Get", "retrieves", "a", "file", "with", "its", "title", "equal", "to", "the", "provided", "title", "and", "a", "child", "of", "the", "parentID", "as", "given", "to", "New", ".", "If", "not", "found", "os", ".", "ErrNotExist", "is", "returned", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/google/drive/service/service.go#L65-L77
train
perkeep/perkeep
pkg/blobserver/google/drive/service/service.go
List
func (s *DriveService) List(pageToken string, limit int) (files []*client.File, next string, err error) { req := s.apiservice.Files.List() req.Q(fmt.Sprintf("'%s' in parents and mimeType != '%s'", s.parentID, MimeTypeDriveFolder)) if pageToken != "" { req.PageToken(pageToken) } if limit > 0 { req.MaxResults(int64(limit)) } result, err := req.Do() if err != nil { return } return result.Items, result.NextPageToken, err }
go
func (s *DriveService) List(pageToken string, limit int) (files []*client.File, next string, err error) { req := s.apiservice.Files.List() req.Q(fmt.Sprintf("'%s' in parents and mimeType != '%s'", s.parentID, MimeTypeDriveFolder)) if pageToken != "" { req.PageToken(pageToken) } if limit > 0 { req.MaxResults(int64(limit)) } result, err := req.Do() if err != nil { return } return result.Items, result.NextPageToken, err }
[ "func", "(", "s", "*", "DriveService", ")", "List", "(", "pageToken", "string", ",", "limit", "int", ")", "(", "files", "[", "]", "*", "client", ".", "File", ",", "next", "string", ",", "err", "error", ")", "{", "req", ":=", "s", ".", "apiservice", ".", "Files", ".", "List", "(", ")", "\n", "req", ".", "Q", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "s", ".", "parentID", ",", "MimeTypeDriveFolder", ")", ")", "\n\n", "if", "pageToken", "!=", "\"", "\"", "{", "req", ".", "PageToken", "(", "pageToken", ")", "\n", "}", "\n\n", "if", "limit", ">", "0", "{", "req", ".", "MaxResults", "(", "int64", "(", "limit", ")", ")", "\n", "}", "\n\n", "result", ",", "err", ":=", "req", ".", "Do", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\n", "}", "\n", "return", "result", ".", "Items", ",", "result", ".", "NextPageToken", ",", "err", "\n", "}" ]
// List returns a list of files. When limit is greater than zero a paginated list is returned // using the next response as a pageToken in subsequent calls.
[ "List", "returns", "a", "list", "of", "files", ".", "When", "limit", "is", "greater", "than", "zero", "a", "paginated", "list", "is", "returned", "using", "the", "next", "response", "as", "a", "pageToken", "in", "subsequent", "calls", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/google/drive/service/service.go#L81-L98
train
perkeep/perkeep
pkg/blobserver/google/drive/service/service.go
Upsert
func (s *DriveService) Upsert(ctx context.Context, title string, data io.Reader) (file *client.File, err error) { if file, err = s.Get(ctx, title); err != nil { if !os.IsNotExist(err) { return } } if file == nil { file = &client.File{Title: title} file.Parents = []*client.ParentReference{ {Id: s.parentID}, } file.MimeType = MimeTypeCamliBlob return s.apiservice.Files.Insert(file).Media(data).Context(ctx).Do() } // TODO: handle large blobs return s.apiservice.Files.Update(file.Id, file).Media(data).Context(ctx).Do() }
go
func (s *DriveService) Upsert(ctx context.Context, title string, data io.Reader) (file *client.File, err error) { if file, err = s.Get(ctx, title); err != nil { if !os.IsNotExist(err) { return } } if file == nil { file = &client.File{Title: title} file.Parents = []*client.ParentReference{ {Id: s.parentID}, } file.MimeType = MimeTypeCamliBlob return s.apiservice.Files.Insert(file).Media(data).Context(ctx).Do() } // TODO: handle large blobs return s.apiservice.Files.Update(file.Id, file).Media(data).Context(ctx).Do() }
[ "func", "(", "s", "*", "DriveService", ")", "Upsert", "(", "ctx", "context", ".", "Context", ",", "title", "string", ",", "data", "io", ".", "Reader", ")", "(", "file", "*", "client", ".", "File", ",", "err", "error", ")", "{", "if", "file", ",", "err", "=", "s", ".", "Get", "(", "ctx", ",", "title", ")", ";", "err", "!=", "nil", "{", "if", "!", "os", ".", "IsNotExist", "(", "err", ")", "{", "return", "\n", "}", "\n", "}", "\n", "if", "file", "==", "nil", "{", "file", "=", "&", "client", ".", "File", "{", "Title", ":", "title", "}", "\n", "file", ".", "Parents", "=", "[", "]", "*", "client", ".", "ParentReference", "{", "{", "Id", ":", "s", ".", "parentID", "}", ",", "}", "\n", "file", ".", "MimeType", "=", "MimeTypeCamliBlob", "\n", "return", "s", ".", "apiservice", ".", "Files", ".", "Insert", "(", "file", ")", ".", "Media", "(", "data", ")", ".", "Context", "(", "ctx", ")", ".", "Do", "(", ")", "\n", "}", "\n\n", "// TODO: handle large blobs", "return", "s", ".", "apiservice", ".", "Files", ".", "Update", "(", "file", ".", "Id", ",", "file", ")", ".", "Media", "(", "data", ")", ".", "Context", "(", "ctx", ")", ".", "Do", "(", ")", "\n", "}" ]
// Upsert inserts a file, or updates if such a file exists.
[ "Upsert", "inserts", "a", "file", "or", "updates", "if", "such", "a", "file", "exists", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/google/drive/service/service.go#L101-L118
train
perkeep/perkeep
pkg/blobserver/google/drive/service/service.go
Fetch
func (s *DriveService) Fetch(ctx context.Context, title string) (body io.ReadCloser, size uint32, err error) { file, err := s.Get(ctx, title) if err != nil { return } // TODO: maybe in the case of no download link, remove the file. // The file should have malformed or converted to a Docs file // unwantedly. // TODO(mpl): I do not think the above comment is accurate. It // looks like at least one case we do not get a DownloadUrl is when // the UI would make you pick a conversion format first (spreadsheet, // doc, etc). -> we should see if the API offers the possibility to do // that conversion. and we could pass the type(s) we want (pdf, xls, doc...) // as arguments (in an options struct) to Fetch. if file.DownloadUrl == "" { err = errNoDownload return } req, _ := http.NewRequest("GET", file.DownloadUrl, nil) req.WithContext(ctx) var resp *http.Response if resp, err = s.client.Transport.RoundTrip(req); err != nil { return } if file.FileSize > math.MaxUint32 || file.FileSize < 0 { err = errors.New("file too big") } return resp.Body, uint32(file.FileSize), err }
go
func (s *DriveService) Fetch(ctx context.Context, title string) (body io.ReadCloser, size uint32, err error) { file, err := s.Get(ctx, title) if err != nil { return } // TODO: maybe in the case of no download link, remove the file. // The file should have malformed or converted to a Docs file // unwantedly. // TODO(mpl): I do not think the above comment is accurate. It // looks like at least one case we do not get a DownloadUrl is when // the UI would make you pick a conversion format first (spreadsheet, // doc, etc). -> we should see if the API offers the possibility to do // that conversion. and we could pass the type(s) we want (pdf, xls, doc...) // as arguments (in an options struct) to Fetch. if file.DownloadUrl == "" { err = errNoDownload return } req, _ := http.NewRequest("GET", file.DownloadUrl, nil) req.WithContext(ctx) var resp *http.Response if resp, err = s.client.Transport.RoundTrip(req); err != nil { return } if file.FileSize > math.MaxUint32 || file.FileSize < 0 { err = errors.New("file too big") } return resp.Body, uint32(file.FileSize), err }
[ "func", "(", "s", "*", "DriveService", ")", "Fetch", "(", "ctx", "context", ".", "Context", ",", "title", "string", ")", "(", "body", "io", ".", "ReadCloser", ",", "size", "uint32", ",", "err", "error", ")", "{", "file", ",", "err", ":=", "s", ".", "Get", "(", "ctx", ",", "title", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\n", "}", "\n", "// TODO: maybe in the case of no download link, remove the file.", "// The file should have malformed or converted to a Docs file", "// unwantedly.", "// TODO(mpl): I do not think the above comment is accurate. It", "// looks like at least one case we do not get a DownloadUrl is when", "// the UI would make you pick a conversion format first (spreadsheet,", "// doc, etc). -> we should see if the API offers the possibility to do", "// that conversion. and we could pass the type(s) we want (pdf, xls, doc...)", "// as arguments (in an options struct) to Fetch.", "if", "file", ".", "DownloadUrl", "==", "\"", "\"", "{", "err", "=", "errNoDownload", "\n", "return", "\n", "}", "\n\n", "req", ",", "_", ":=", "http", ".", "NewRequest", "(", "\"", "\"", ",", "file", ".", "DownloadUrl", ",", "nil", ")", "\n", "req", ".", "WithContext", "(", "ctx", ")", "\n", "var", "resp", "*", "http", ".", "Response", "\n", "if", "resp", ",", "err", "=", "s", ".", "client", ".", "Transport", ".", "RoundTrip", "(", "req", ")", ";", "err", "!=", "nil", "{", "return", "\n", "}", "\n", "if", "file", ".", "FileSize", ">", "math", ".", "MaxUint32", "||", "file", ".", "FileSize", "<", "0", "{", "err", "=", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "return", "resp", ".", "Body", ",", "uint32", "(", "file", ".", "FileSize", ")", ",", "err", "\n", "}" ]
// Fetch retrieves the metadata and contents of a file.
[ "Fetch", "retrieves", "the", "metadata", "and", "contents", "of", "a", "file", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/google/drive/service/service.go#L123-L152
train
perkeep/perkeep
pkg/blobserver/google/drive/service/service.go
Stat
func (s *DriveService) Stat(ctx context.Context, title string) (int64, error) { file, err := s.Get(ctx, title) if err != nil || file == nil { return 0, err } return file.FileSize, err }
go
func (s *DriveService) Stat(ctx context.Context, title string) (int64, error) { file, err := s.Get(ctx, title) if err != nil || file == nil { return 0, err } return file.FileSize, err }
[ "func", "(", "s", "*", "DriveService", ")", "Stat", "(", "ctx", "context", ".", "Context", ",", "title", "string", ")", "(", "int64", ",", "error", ")", "{", "file", ",", "err", ":=", "s", ".", "Get", "(", "ctx", ",", "title", ")", "\n", "if", "err", "!=", "nil", "||", "file", "==", "nil", "{", "return", "0", ",", "err", "\n", "}", "\n", "return", "file", ".", "FileSize", ",", "err", "\n", "}" ]
// Stat retrieves file metadata and returns // file size. Returns error if file is not found.
[ "Stat", "retrieves", "file", "metadata", "and", "returns", "file", "size", ".", "Returns", "error", "if", "file", "is", "not", "found", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/google/drive/service/service.go#L156-L162
train
perkeep/perkeep
pkg/blobserver/google/drive/service/service.go
Trash
func (s *DriveService) Trash(ctx context.Context, title string) error { file, err := s.Get(ctx, title) if err != nil { if os.IsNotExist(err) { return nil } return err } _, err = s.apiservice.Files.Trash(file.Id).Context(ctx).Do() return err }
go
func (s *DriveService) Trash(ctx context.Context, title string) error { file, err := s.Get(ctx, title) if err != nil { if os.IsNotExist(err) { return nil } return err } _, err = s.apiservice.Files.Trash(file.Id).Context(ctx).Do() return err }
[ "func", "(", "s", "*", "DriveService", ")", "Trash", "(", "ctx", "context", ".", "Context", ",", "title", "string", ")", "error", "{", "file", ",", "err", ":=", "s", ".", "Get", "(", "ctx", ",", "title", ")", "\n", "if", "err", "!=", "nil", "{", "if", "os", ".", "IsNotExist", "(", "err", ")", "{", "return", "nil", "\n", "}", "\n", "return", "err", "\n", "}", "\n", "_", ",", "err", "=", "s", ".", "apiservice", ".", "Files", ".", "Trash", "(", "file", ".", "Id", ")", ".", "Context", "(", "ctx", ")", ".", "Do", "(", ")", "\n", "return", "err", "\n", "}" ]
// Trash trashes the file with the given title.
[ "Trash", "trashes", "the", "file", "with", "the", "given", "title", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/google/drive/service/service.go#L165-L175
train
perkeep/perkeep
server/perkeepd/ui/goui/mapquery/mapquery.go
Send
func (q *Query) Send() { if q.pending { q.Cleanup() return } q.pending = true go func() { resp, err := q.send() if err != nil { dom.GetWindow().Alert(fmt.Sprintf("%v", err)) } q.Callback(string(resp)) }() }
go
func (q *Query) Send() { if q.pending { q.Cleanup() return } q.pending = true go func() { resp, err := q.send() if err != nil { dom.GetWindow().Alert(fmt.Sprintf("%v", err)) } q.Callback(string(resp)) }() }
[ "func", "(", "q", "*", "Query", ")", "Send", "(", ")", "{", "if", "q", ".", "pending", "{", "q", ".", "Cleanup", "(", ")", "\n", "return", "\n", "}", "\n", "q", ".", "pending", "=", "true", "\n", "go", "func", "(", ")", "{", "resp", ",", "err", ":=", "q", ".", "send", "(", ")", "\n", "if", "err", "!=", "nil", "{", "dom", ".", "GetWindow", "(", ")", ".", "Alert", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "err", ")", ")", "\n", "}", "\n", "q", ".", "Callback", "(", "string", "(", "resp", ")", ")", "\n", "}", "(", ")", "\n", "}" ]
// Send sends the search query, and runs the Query's callback on success. It // returns immediately if there's already a query in flight.
[ "Send", "sends", "the", "search", "query", "and", "runs", "the", "Query", "s", "callback", "on", "success", ".", "It", "returns", "immediately", "if", "there", "s", "already", "a", "query", "in", "flight", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/server/perkeepd/ui/goui/mapquery/mapquery.go#L90-L103
train
perkeep/perkeep
pkg/blobserver/diskpacked/punch_linux.go
punchHoleLinux
func punchHoleLinux(file *os.File, offset int64, size int64) error { err := syscall.Fallocate(int(file.Fd()), falloc_fl_punch_hole|falloc_fl_keep_size, offset, size) if err == syscall.ENOSYS || err == syscall.EOPNOTSUPP { return errNoPunch } return err }
go
func punchHoleLinux(file *os.File, offset int64, size int64) error { err := syscall.Fallocate(int(file.Fd()), falloc_fl_punch_hole|falloc_fl_keep_size, offset, size) if err == syscall.ENOSYS || err == syscall.EOPNOTSUPP { return errNoPunch } return err }
[ "func", "punchHoleLinux", "(", "file", "*", "os", ".", "File", ",", "offset", "int64", ",", "size", "int64", ")", "error", "{", "err", ":=", "syscall", ".", "Fallocate", "(", "int", "(", "file", ".", "Fd", "(", ")", ")", ",", "falloc_fl_punch_hole", "|", "falloc_fl_keep_size", ",", "offset", ",", "size", ")", "\n", "if", "err", "==", "syscall", ".", "ENOSYS", "||", "err", "==", "syscall", ".", "EOPNOTSUPP", "{", "return", "errNoPunch", "\n", "}", "\n", "return", "err", "\n", "}" ]
// puncHoleLinux punches a hole into the given file starting at offset, // measuring "size" bytes
[ "puncHoleLinux", "punches", "a", "hole", "into", "the", "given", "file", "starting", "at", "offset", "measuring", "size", "bytes" ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/punch_linux.go#L38-L46
train
perkeep/perkeep
pkg/schema/filereader.go
NewFileReader
func NewFileReader(ctx context.Context, fetcher blob.Fetcher, fileBlobRef blob.Ref) (*FileReader, error) { // TODO(bradfitz): rename this into bytes reader? but for now it's still // named FileReader, but can also read a "bytes" schema. if !fileBlobRef.Valid() { return nil, errors.New("schema/filereader: NewFileReader blobref invalid") } rc, _, err := fetcher.Fetch(ctx, fileBlobRef) if err != nil { return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err) } defer rc.Close() ss, err := parseSuperset(rc) if err != nil { return nil, fmt.Errorf("schema/filereader: decoding file schema blob: %v", err) } ss.BlobRef = fileBlobRef if ss.Type != "file" && ss.Type != "bytes" { return nil, fmt.Errorf("schema/filereader: expected \"file\" or \"bytes\" schema blob, got %q", ss.Type) } fr, err := ss.NewFileReader(fetcher) if err != nil { return nil, fmt.Errorf("schema/filereader: creating FileReader for %s: %v", fileBlobRef, err) } return fr, nil }
go
func NewFileReader(ctx context.Context, fetcher blob.Fetcher, fileBlobRef blob.Ref) (*FileReader, error) { // TODO(bradfitz): rename this into bytes reader? but for now it's still // named FileReader, but can also read a "bytes" schema. if !fileBlobRef.Valid() { return nil, errors.New("schema/filereader: NewFileReader blobref invalid") } rc, _, err := fetcher.Fetch(ctx, fileBlobRef) if err != nil { return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err) } defer rc.Close() ss, err := parseSuperset(rc) if err != nil { return nil, fmt.Errorf("schema/filereader: decoding file schema blob: %v", err) } ss.BlobRef = fileBlobRef if ss.Type != "file" && ss.Type != "bytes" { return nil, fmt.Errorf("schema/filereader: expected \"file\" or \"bytes\" schema blob, got %q", ss.Type) } fr, err := ss.NewFileReader(fetcher) if err != nil { return nil, fmt.Errorf("schema/filereader: creating FileReader for %s: %v", fileBlobRef, err) } return fr, nil }
[ "func", "NewFileReader", "(", "ctx", "context", ".", "Context", ",", "fetcher", "blob", ".", "Fetcher", ",", "fileBlobRef", "blob", ".", "Ref", ")", "(", "*", "FileReader", ",", "error", ")", "{", "// TODO(bradfitz): rename this into bytes reader? but for now it's still", "// named FileReader, but can also read a \"bytes\" schema.", "if", "!", "fileBlobRef", ".", "Valid", "(", ")", "{", "return", "nil", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "rc", ",", "_", ",", "err", ":=", "fetcher", ".", "Fetch", "(", "ctx", ",", "fileBlobRef", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "defer", "rc", ".", "Close", "(", ")", "\n", "ss", ",", "err", ":=", "parseSuperset", "(", "rc", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "ss", ".", "BlobRef", "=", "fileBlobRef", "\n", "if", "ss", ".", "Type", "!=", "\"", "\"", "&&", "ss", ".", "Type", "!=", "\"", "\"", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\\\"", "\\\"", "\\\"", "\\\"", "\"", ",", "ss", ".", "Type", ")", "\n", "}", "\n", "fr", ",", "err", ":=", "ss", ".", "NewFileReader", "(", "fetcher", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "fileBlobRef", ",", "err", ")", "\n", "}", "\n", "return", "fr", ",", "nil", "\n", "}" ]
// NewFileReader returns a new FileReader reading the contents of fileBlobRef, // fetching blobs from fetcher. The fileBlobRef must be of a "bytes" or "file" // schema blob. // // The caller should call Close on the FileReader when done reading.
[ "NewFileReader", "returns", "a", "new", "FileReader", "reading", "the", "contents", "of", "fileBlobRef", "fetching", "blobs", "from", "fetcher", ".", "The", "fileBlobRef", "must", "be", "of", "a", "bytes", "or", "file", "schema", "blob", ".", "The", "caller", "should", "call", "Close", "on", "the", "FileReader", "when", "done", "reading", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/schema/filereader.go#L71-L95
train
perkeep/perkeep
pkg/schema/filereader.go
NewFileReader
func (ss *superset) NewFileReader(fetcher blob.Fetcher) (*FileReader, error) { if ss.Type != "file" && ss.Type != "bytes" { return nil, fmt.Errorf("schema/filereader: Superset not of type \"file\" or \"bytes\"") } size := int64(ss.SumPartsSize()) fr := &FileReader{ fetcher: fetcher, ss: ss, size: size, ssm: make(map[blob.Ref]*superset), } fr.SectionReader = io.NewSectionReader(fr, 0, size) return fr, nil }
go
func (ss *superset) NewFileReader(fetcher blob.Fetcher) (*FileReader, error) { if ss.Type != "file" && ss.Type != "bytes" { return nil, fmt.Errorf("schema/filereader: Superset not of type \"file\" or \"bytes\"") } size := int64(ss.SumPartsSize()) fr := &FileReader{ fetcher: fetcher, ss: ss, size: size, ssm: make(map[blob.Ref]*superset), } fr.SectionReader = io.NewSectionReader(fr, 0, size) return fr, nil }
[ "func", "(", "ss", "*", "superset", ")", "NewFileReader", "(", "fetcher", "blob", ".", "Fetcher", ")", "(", "*", "FileReader", ",", "error", ")", "{", "if", "ss", ".", "Type", "!=", "\"", "\"", "&&", "ss", ".", "Type", "!=", "\"", "\"", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\\\"", "\\\"", "\\\"", "\\\"", "\"", ")", "\n", "}", "\n", "size", ":=", "int64", "(", "ss", ".", "SumPartsSize", "(", ")", ")", "\n", "fr", ":=", "&", "FileReader", "{", "fetcher", ":", "fetcher", ",", "ss", ":", "ss", ",", "size", ":", "size", ",", "ssm", ":", "make", "(", "map", "[", "blob", ".", "Ref", "]", "*", "superset", ")", ",", "}", "\n", "fr", ".", "SectionReader", "=", "io", ".", "NewSectionReader", "(", "fr", ",", "0", ",", "size", ")", "\n", "return", "fr", ",", "nil", "\n", "}" ]
// NewFileReader returns a new FileReader, reading bytes and blobs // from the provided fetcher. // // NewFileReader does no fetch operation on the fetcher itself. The // fetcher is only used in subsequent read operations. // // An error is only returned if the type of the superset is not either // "file" or "bytes".
[ "NewFileReader", "returns", "a", "new", "FileReader", "reading", "bytes", "and", "blobs", "from", "the", "provided", "fetcher", ".", "NewFileReader", "does", "no", "fetch", "operation", "on", "the", "fetcher", "itself", ".", "The", "fetcher", "is", "only", "used", "in", "subsequent", "read", "operations", ".", "An", "error", "is", "only", "returned", "if", "the", "type", "of", "the", "superset", "is", "not", "either", "file", "or", "bytes", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/schema/filereader.go#L109-L122
train
perkeep/perkeep
pkg/schema/filereader.go
UnixMtime
func (fr *FileReader) UnixMtime() time.Time { t, err := time.Parse(time.RFC3339, fr.ss.UnixMtime) if err != nil { return time.Time{} } return t }
go
func (fr *FileReader) UnixMtime() time.Time { t, err := time.Parse(time.RFC3339, fr.ss.UnixMtime) if err != nil { return time.Time{} } return t }
[ "func", "(", "fr", "*", "FileReader", ")", "UnixMtime", "(", ")", "time", ".", "Time", "{", "t", ",", "err", ":=", "time", ".", "Parse", "(", "time", ".", "RFC3339", ",", "fr", ".", "ss", ".", "UnixMtime", ")", "\n", "if", "err", "!=", "nil", "{", "return", "time", ".", "Time", "{", "}", "\n", "}", "\n", "return", "t", "\n", "}" ]
// UnixMtime returns the file schema's UnixMtime field, or the zero value.
[ "UnixMtime", "returns", "the", "file", "schema", "s", "UnixMtime", "field", "or", "the", "zero", "value", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/schema/filereader.go#L155-L161
train
perkeep/perkeep
pkg/schema/filereader.go
ForeachChunk
func (fr *FileReader) ForeachChunk(ctx context.Context, fn func(schemaPath []blob.Ref, p BytesPart) error) error { return fr.foreachChunk(ctx, fn, nil) }
go
func (fr *FileReader) ForeachChunk(ctx context.Context, fn func(schemaPath []blob.Ref, p BytesPart) error) error { return fr.foreachChunk(ctx, fn, nil) }
[ "func", "(", "fr", "*", "FileReader", ")", "ForeachChunk", "(", "ctx", "context", ".", "Context", ",", "fn", "func", "(", "schemaPath", "[", "]", "blob", ".", "Ref", ",", "p", "BytesPart", ")", "error", ")", "error", "{", "return", "fr", ".", "foreachChunk", "(", "ctx", ",", "fn", ",", "nil", ")", "\n", "}" ]
// ForeachChunk calls fn for each chunk of fr, in order. // // The schemaPath argument will be the path from the "file" or "bytes" // schema blob down to possibly other "bytes" schema blobs, the final // one of which references the given BytesPart. The BytesPart will be // the actual chunk. The fn function will not be called with // BytesParts referencing a "BytesRef"; those are followed recursively // instead. The fn function must not retain or mutate schemaPath. // // If fn returns an error, iteration stops and that error is returned // from ForeachChunk. Other errors may be returned from ForeachChunk // if schema blob fetches fail.
[ "ForeachChunk", "calls", "fn", "for", "each", "chunk", "of", "fr", "in", "order", ".", "The", "schemaPath", "argument", "will", "be", "the", "path", "from", "the", "file", "or", "bytes", "schema", "blob", "down", "to", "possibly", "other", "bytes", "schema", "blobs", "the", "final", "one", "of", "which", "references", "the", "given", "BytesPart", ".", "The", "BytesPart", "will", "be", "the", "actual", "chunk", ".", "The", "fn", "function", "will", "not", "be", "called", "with", "BytesParts", "referencing", "a", "BytesRef", ";", "those", "are", "followed", "recursively", "instead", ".", "The", "fn", "function", "must", "not", "retain", "or", "mutate", "schemaPath", ".", "If", "fn", "returns", "an", "error", "iteration", "stops", "and", "that", "error", "is", "returned", "from", "ForeachChunk", ".", "Other", "errors", "may", "be", "returned", "from", "ForeachChunk", "if", "schema", "blob", "fetches", "fail", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/schema/filereader.go#L219-L221
train
perkeep/perkeep
internal/media/audio.go
HasID3v1Tag
func HasID3v1Tag(r readerutil.SizeReaderAt) (bool, error) { if r.Size() < ID3v1TagLength { return false, nil } buf := make([]byte, len(id3v1Magic)) if _, err := r.ReadAt(buf, r.Size()-ID3v1TagLength); err != nil { return false, fmt.Errorf("Failed to read ID3v1 data: %v", err) } if bytes.Equal(buf, id3v1Magic) { return true, nil } return false, nil }
go
func HasID3v1Tag(r readerutil.SizeReaderAt) (bool, error) { if r.Size() < ID3v1TagLength { return false, nil } buf := make([]byte, len(id3v1Magic)) if _, err := r.ReadAt(buf, r.Size()-ID3v1TagLength); err != nil { return false, fmt.Errorf("Failed to read ID3v1 data: %v", err) } if bytes.Equal(buf, id3v1Magic) { return true, nil } return false, nil }
[ "func", "HasID3v1Tag", "(", "r", "readerutil", ".", "SizeReaderAt", ")", "(", "bool", ",", "error", ")", "{", "if", "r", ".", "Size", "(", ")", "<", "ID3v1TagLength", "{", "return", "false", ",", "nil", "\n", "}", "\n\n", "buf", ":=", "make", "(", "[", "]", "byte", ",", "len", "(", "id3v1Magic", ")", ")", "\n", "if", "_", ",", "err", ":=", "r", ".", "ReadAt", "(", "buf", ",", "r", ".", "Size", "(", ")", "-", "ID3v1TagLength", ")", ";", "err", "!=", "nil", "{", "return", "false", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "if", "bytes", ".", "Equal", "(", "buf", ",", "id3v1Magic", ")", "{", "return", "true", ",", "nil", "\n", "}", "\n", "return", "false", ",", "nil", "\n", "}" ]
// HasID3v1Tag returns true if an ID3v1 tag is present at the end of r.
[ "HasID3v1Tag", "returns", "true", "if", "an", "ID3v1", "tag", "is", "present", "at", "the", "end", "of", "r", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/media/audio.go#L38-L51
train
perkeep/perkeep
app/publisher/js/file.go
renderFile
func renderFile() { var err error theFic, err = newFileItemContainer(fileThumbnailHeight) if err != nil { fmt.Printf("error creating file container: %v\n", err) return } if err := theFic.populate(); err != nil { fmt.Printf("Error initializing file container: %v", err) return } if err := theFic.render(); err != nil { fmt.Printf("Error rendering file container: %v", err) return } jQuery(js.Global).Call(jquery.KEYUP, func(e jquery.Event) { if e.Which == 37 { theFic.doPrev() go func() { if err := theFic.render(); err != nil { fmt.Printf("Error rendering file container: %v", err) } }() return } if e.Which == 39 { theFic.doNext() go func() { if err := theFic.render(); err != nil { fmt.Printf("Error rendering file container: %v", err) } }() return } }) }
go
func renderFile() { var err error theFic, err = newFileItemContainer(fileThumbnailHeight) if err != nil { fmt.Printf("error creating file container: %v\n", err) return } if err := theFic.populate(); err != nil { fmt.Printf("Error initializing file container: %v", err) return } if err := theFic.render(); err != nil { fmt.Printf("Error rendering file container: %v", err) return } jQuery(js.Global).Call(jquery.KEYUP, func(e jquery.Event) { if e.Which == 37 { theFic.doPrev() go func() { if err := theFic.render(); err != nil { fmt.Printf("Error rendering file container: %v", err) } }() return } if e.Which == 39 { theFic.doNext() go func() { if err := theFic.render(); err != nil { fmt.Printf("Error rendering file container: %v", err) } }() return } }) }
[ "func", "renderFile", "(", ")", "{", "var", "err", "error", "\n", "theFic", ",", "err", "=", "newFileItemContainer", "(", "fileThumbnailHeight", ")", "\n", "if", "err", "!=", "nil", "{", "fmt", ".", "Printf", "(", "\"", "\\n", "\"", ",", "err", ")", "\n", "return", "\n", "}", "\n", "if", "err", ":=", "theFic", ".", "populate", "(", ")", ";", "err", "!=", "nil", "{", "fmt", ".", "Printf", "(", "\"", "\"", ",", "err", ")", "\n", "return", "\n", "}", "\n", "if", "err", ":=", "theFic", ".", "render", "(", ")", ";", "err", "!=", "nil", "{", "fmt", ".", "Printf", "(", "\"", "\"", ",", "err", ")", "\n", "return", "\n", "}", "\n\n", "jQuery", "(", "js", ".", "Global", ")", ".", "Call", "(", "jquery", ".", "KEYUP", ",", "func", "(", "e", "jquery", ".", "Event", ")", "{", "if", "e", ".", "Which", "==", "37", "{", "theFic", ".", "doPrev", "(", ")", "\n", "go", "func", "(", ")", "{", "if", "err", ":=", "theFic", ".", "render", "(", ")", ";", "err", "!=", "nil", "{", "fmt", ".", "Printf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "}", "(", ")", "\n", "return", "\n", "}", "\n", "if", "e", ".", "Which", "==", "39", "{", "theFic", ".", "doNext", "(", ")", "\n", "go", "func", "(", ")", "{", "if", "err", ":=", "theFic", ".", "render", "(", ")", ";", "err", "!=", "nil", "{", "fmt", ".", "Printf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "}", "(", ")", "\n", "return", "\n", "}", "\n", "}", ")", "\n", "}" ]
// renderFile creates a fileItemContainer, populates it, renders it, and // binds the left and right arrow keys for to it for navigation.
[ "renderFile", "creates", "a", "fileItemContainer", "populates", "it", "renders", "it", "and", "binds", "the", "left", "and", "right", "arrow", "keys", "for", "to", "it", "for", "navigation", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/app/publisher/js/file.go#L58-L94
train
perkeep/perkeep
internal/closure/gendeps.go
GenDepsWithPath
func GenDepsWithPath(pathPrefix string, root http.FileSystem) ([]byte, error) { d, err := root.Open("/") if err != nil { return nil, fmt.Errorf("Failed to open root of %v: %v", root, err) } fi, err := d.Stat() if err != nil { return nil, err } if !fi.IsDir() { return nil, fmt.Errorf("root of %v is not a dir", root) } ent, err := d.Readdir(-1) if err != nil { return nil, fmt.Errorf("Could not read dir entries of root: %v", err) } var buf bytes.Buffer for _, info := range ent { name := info.Name() if !strings.HasSuffix(name, ".js") { continue } if strings.HasPrefix(name, ".#") { // Emacs noise. continue } if strings.HasPrefix(name, "goui.js") { // because it is too large for bufio.Scanner continue } f, err := root.Open(name) if err != nil { return nil, fmt.Errorf("Could not open %v: %v", name, err) } prov, req, err := parseProvidesRequires(info, name, f) f.Close() if err != nil { return nil, fmt.Errorf("Could not parse deps for %v: %v", name, err) } if len(prov) > 0 { fmt.Fprintf(&buf, "goog.addDependency(%q, %v, %v);\n", pathPrefix+name, jsList(prov), jsList(req)) } } return buf.Bytes(), nil }
go
func GenDepsWithPath(pathPrefix string, root http.FileSystem) ([]byte, error) { d, err := root.Open("/") if err != nil { return nil, fmt.Errorf("Failed to open root of %v: %v", root, err) } fi, err := d.Stat() if err != nil { return nil, err } if !fi.IsDir() { return nil, fmt.Errorf("root of %v is not a dir", root) } ent, err := d.Readdir(-1) if err != nil { return nil, fmt.Errorf("Could not read dir entries of root: %v", err) } var buf bytes.Buffer for _, info := range ent { name := info.Name() if !strings.HasSuffix(name, ".js") { continue } if strings.HasPrefix(name, ".#") { // Emacs noise. continue } if strings.HasPrefix(name, "goui.js") { // because it is too large for bufio.Scanner continue } f, err := root.Open(name) if err != nil { return nil, fmt.Errorf("Could not open %v: %v", name, err) } prov, req, err := parseProvidesRequires(info, name, f) f.Close() if err != nil { return nil, fmt.Errorf("Could not parse deps for %v: %v", name, err) } if len(prov) > 0 { fmt.Fprintf(&buf, "goog.addDependency(%q, %v, %v);\n", pathPrefix+name, jsList(prov), jsList(req)) } } return buf.Bytes(), nil }
[ "func", "GenDepsWithPath", "(", "pathPrefix", "string", ",", "root", "http", ".", "FileSystem", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "d", ",", "err", ":=", "root", ".", "Open", "(", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "root", ",", "err", ")", "\n", "}", "\n", "fi", ",", "err", ":=", "d", ".", "Stat", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "if", "!", "fi", ".", "IsDir", "(", ")", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "root", ")", "\n", "}", "\n", "ent", ",", "err", ":=", "d", ".", "Readdir", "(", "-", "1", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "var", "buf", "bytes", ".", "Buffer", "\n", "for", "_", ",", "info", ":=", "range", "ent", "{", "name", ":=", "info", ".", "Name", "(", ")", "\n", "if", "!", "strings", ".", "HasSuffix", "(", "name", ",", "\"", "\"", ")", "{", "continue", "\n", "}", "\n", "if", "strings", ".", "HasPrefix", "(", "name", ",", "\"", "\"", ")", "{", "// Emacs noise.", "continue", "\n", "}", "\n", "if", "strings", ".", "HasPrefix", "(", "name", ",", "\"", "\"", ")", "{", "// because it is too large for bufio.Scanner", "continue", "\n", "}", "\n", "f", ",", "err", ":=", "root", ".", "Open", "(", "name", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "name", ",", "err", ")", "\n", "}", "\n", "prov", ",", "req", ",", "err", ":=", "parseProvidesRequires", "(", "info", ",", "name", ",", "f", ")", "\n", "f", ".", "Close", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "name", ",", "err", ")", "\n", "}", "\n", "if", "len", "(", "prov", ")", ">", "0", "{", "fmt", ".", "Fprintf", "(", "&", "buf", ",", "\"", "\\n", "\"", ",", "pathPrefix", "+", "name", ",", "jsList", "(", "prov", ")", ",", "jsList", "(", "req", ")", ")", "\n", "}", "\n", "}", "\n", "return", "buf", ".", "Bytes", "(", ")", ",", "nil", "\n", "}" ]
// GenDepsWithPath is like GenDeps, but you can specify a path where the files are to be found at runtime relative to Closure's base.js.
[ "GenDepsWithPath", "is", "like", "GenDeps", "but", "you", "can", "specify", "a", "path", "where", "the", "files", "are", "to", "be", "found", "at", "runtime", "relative", "to", "Closure", "s", "base", ".", "js", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/closure/gendeps.go#L47-L91
train
perkeep/perkeep
internal/closure/gendeps.go
ParseDeps
func ParseDeps(r io.Reader) (providedBy map[string]string, requires map[string][]string, err error) { providedBy = make(map[string]string) requires = make(map[string][]string) scanner := bufio.NewScanner(r) for scanner.Scan() { l := scanner.Text() if strings.HasPrefix(l, "//") { continue } if l == "" { continue } m := depsRx.FindStringSubmatch(l) if m == nil { return nil, nil, fmt.Errorf("Invalid line in deps: %q", l) } jsfile := m[1] provides := strings.Split(m[2], ", ") var required []string if m[5] != "" { required = strings.Split( strings.Replace(strings.Replace(m[5], "'", "", -1), `"`, "", -1), ", ") } for _, v := range provides { namespace := strings.Trim(v, `'"`) if otherjs, ok := providedBy[namespace]; ok { return nil, nil, fmt.Errorf("Name %v is provided by both %v and %v", namespace, jsfile, otherjs) } providedBy[namespace] = jsfile if _, ok := requires[namespace]; ok { return nil, nil, fmt.Errorf("Name %v has two sets of dependencies", namespace) } if required != nil { requires[namespace] = required } } } if err := scanner.Err(); err != nil { return nil, nil, err } return providedBy, requires, nil }
go
func ParseDeps(r io.Reader) (providedBy map[string]string, requires map[string][]string, err error) { providedBy = make(map[string]string) requires = make(map[string][]string) scanner := bufio.NewScanner(r) for scanner.Scan() { l := scanner.Text() if strings.HasPrefix(l, "//") { continue } if l == "" { continue } m := depsRx.FindStringSubmatch(l) if m == nil { return nil, nil, fmt.Errorf("Invalid line in deps: %q", l) } jsfile := m[1] provides := strings.Split(m[2], ", ") var required []string if m[5] != "" { required = strings.Split( strings.Replace(strings.Replace(m[5], "'", "", -1), `"`, "", -1), ", ") } for _, v := range provides { namespace := strings.Trim(v, `'"`) if otherjs, ok := providedBy[namespace]; ok { return nil, nil, fmt.Errorf("Name %v is provided by both %v and %v", namespace, jsfile, otherjs) } providedBy[namespace] = jsfile if _, ok := requires[namespace]; ok { return nil, nil, fmt.Errorf("Name %v has two sets of dependencies", namespace) } if required != nil { requires[namespace] = required } } } if err := scanner.Err(); err != nil { return nil, nil, err } return providedBy, requires, nil }
[ "func", "ParseDeps", "(", "r", "io", ".", "Reader", ")", "(", "providedBy", "map", "[", "string", "]", "string", ",", "requires", "map", "[", "string", "]", "[", "]", "string", ",", "err", "error", ")", "{", "providedBy", "=", "make", "(", "map", "[", "string", "]", "string", ")", "\n", "requires", "=", "make", "(", "map", "[", "string", "]", "[", "]", "string", ")", "\n", "scanner", ":=", "bufio", ".", "NewScanner", "(", "r", ")", "\n", "for", "scanner", ".", "Scan", "(", ")", "{", "l", ":=", "scanner", ".", "Text", "(", ")", "\n", "if", "strings", ".", "HasPrefix", "(", "l", ",", "\"", "\"", ")", "{", "continue", "\n", "}", "\n", "if", "l", "==", "\"", "\"", "{", "continue", "\n", "}", "\n", "m", ":=", "depsRx", ".", "FindStringSubmatch", "(", "l", ")", "\n", "if", "m", "==", "nil", "{", "return", "nil", ",", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "l", ")", "\n", "}", "\n", "jsfile", ":=", "m", "[", "1", "]", "\n", "provides", ":=", "strings", ".", "Split", "(", "m", "[", "2", "]", ",", "\"", "\"", ")", "\n", "var", "required", "[", "]", "string", "\n", "if", "m", "[", "5", "]", "!=", "\"", "\"", "{", "required", "=", "strings", ".", "Split", "(", "strings", ".", "Replace", "(", "strings", ".", "Replace", "(", "m", "[", "5", "]", ",", "\"", "\"", ",", "\"", "\"", ",", "-", "1", ")", ",", "`\"`", ",", "\"", "\"", ",", "-", "1", ")", ",", "\"", "\"", ")", "\n", "}", "\n", "for", "_", ",", "v", ":=", "range", "provides", "{", "namespace", ":=", "strings", ".", "Trim", "(", "v", ",", "`'\"`", ")", "\n", "if", "otherjs", ",", "ok", ":=", "providedBy", "[", "namespace", "]", ";", "ok", "{", "return", "nil", ",", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "namespace", ",", "jsfile", ",", "otherjs", ")", "\n", "}", "\n", "providedBy", "[", "namespace", "]", "=", "jsfile", "\n", "if", "_", ",", "ok", ":=", "requires", "[", "namespace", "]", ";", "ok", "{", "return", "nil", ",", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "namespace", ")", "\n", "}", "\n", "if", "required", "!=", "nil", "{", "requires", "[", "namespace", "]", "=", "required", "\n", "}", "\n", "}", "\n", "}", "\n", "if", "err", ":=", "scanner", ".", "Err", "(", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "nil", ",", "err", "\n", "}", "\n", "return", "providedBy", ",", "requires", ",", "nil", "\n", "}" ]
// ParseDeps reads closure namespace dependency lines and // returns a map giving the js file provider for each namespace, // and a map giving the namespace dependencies for each namespace.
[ "ParseDeps", "reads", "closure", "namespace", "dependency", "lines", "and", "returns", "a", "map", "giving", "the", "js", "file", "provider", "for", "each", "namespace", "and", "a", "map", "giving", "the", "namespace", "dependencies", "for", "each", "namespace", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/closure/gendeps.go#L162-L203
train
perkeep/perkeep
internal/closure/gendeps.go
DeepParseDeps
func DeepParseDeps(r io.Reader) (map[string][]string, error) { providedBy, requires, err := ParseDeps(r) if err != nil { return nil, err } filesDeps := make(map[string][]string) var deeperDeps func(namespace string) []string deeperDeps = func(namespace string) []string { if jsdeps, ok := filesDeps[namespace]; ok { return jsdeps } jsfiles := []string{providedBy[namespace]} for _, dep := range requires[namespace] { jsfiles = append(jsfiles, deeperDeps(dep)...) } return jsfiles } for namespace := range providedBy { filesDeps[namespace] = deeperDeps(namespace) } return filesDeps, nil }
go
func DeepParseDeps(r io.Reader) (map[string][]string, error) { providedBy, requires, err := ParseDeps(r) if err != nil { return nil, err } filesDeps := make(map[string][]string) var deeperDeps func(namespace string) []string deeperDeps = func(namespace string) []string { if jsdeps, ok := filesDeps[namespace]; ok { return jsdeps } jsfiles := []string{providedBy[namespace]} for _, dep := range requires[namespace] { jsfiles = append(jsfiles, deeperDeps(dep)...) } return jsfiles } for namespace := range providedBy { filesDeps[namespace] = deeperDeps(namespace) } return filesDeps, nil }
[ "func", "DeepParseDeps", "(", "r", "io", ".", "Reader", ")", "(", "map", "[", "string", "]", "[", "]", "string", ",", "error", ")", "{", "providedBy", ",", "requires", ",", "err", ":=", "ParseDeps", "(", "r", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "filesDeps", ":=", "make", "(", "map", "[", "string", "]", "[", "]", "string", ")", "\n", "var", "deeperDeps", "func", "(", "namespace", "string", ")", "[", "]", "string", "\n", "deeperDeps", "=", "func", "(", "namespace", "string", ")", "[", "]", "string", "{", "if", "jsdeps", ",", "ok", ":=", "filesDeps", "[", "namespace", "]", ";", "ok", "{", "return", "jsdeps", "\n", "}", "\n", "jsfiles", ":=", "[", "]", "string", "{", "providedBy", "[", "namespace", "]", "}", "\n", "for", "_", ",", "dep", ":=", "range", "requires", "[", "namespace", "]", "{", "jsfiles", "=", "append", "(", "jsfiles", ",", "deeperDeps", "(", "dep", ")", "...", ")", "\n", "}", "\n", "return", "jsfiles", "\n", "}", "\n", "for", "namespace", ":=", "range", "providedBy", "{", "filesDeps", "[", "namespace", "]", "=", "deeperDeps", "(", "namespace", ")", "\n", "}", "\n", "return", "filesDeps", ",", "nil", "\n", "}" ]
// DeepParseDeps reads closure namespace dependency lines and // returns a map giving all the required js files for each namespace.
[ "DeepParseDeps", "reads", "closure", "namespace", "dependency", "lines", "and", "returns", "a", "map", "giving", "all", "the", "required", "js", "files", "for", "each", "namespace", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/closure/gendeps.go#L207-L228
train
perkeep/perkeep
pkg/types/clientconfig/config.go
Alias
func (conf *Config) Alias(server string) string { longestMatch := "" serverAlias := "" for alias, serverConf := range conf.Servers { if strings.HasPrefix(server, serverConf.Server) { if len(serverConf.Server) > len(longestMatch) { longestMatch = serverConf.Server serverAlias = alias } } } return serverAlias }
go
func (conf *Config) Alias(server string) string { longestMatch := "" serverAlias := "" for alias, serverConf := range conf.Servers { if strings.HasPrefix(server, serverConf.Server) { if len(serverConf.Server) > len(longestMatch) { longestMatch = serverConf.Server serverAlias = alias } } } return serverAlias }
[ "func", "(", "conf", "*", "Config", ")", "Alias", "(", "server", "string", ")", "string", "{", "longestMatch", ":=", "\"", "\"", "\n", "serverAlias", ":=", "\"", "\"", "\n", "for", "alias", ",", "serverConf", ":=", "range", "conf", ".", "Servers", "{", "if", "strings", ".", "HasPrefix", "(", "server", ",", "serverConf", ".", "Server", ")", "{", "if", "len", "(", "serverConf", ".", "Server", ")", ">", "len", "(", "longestMatch", ")", "{", "longestMatch", "=", "serverConf", ".", "Server", "\n", "serverAlias", "=", "alias", "\n", "}", "\n", "}", "\n", "}", "\n", "return", "serverAlias", "\n", "}" ]
// Alias returns the alias of the server from conf that matches server, or the // empty string if no match. A match means the server from the config is a // prefix of the input server. The longest match prevails.
[ "Alias", "returns", "the", "alias", "of", "the", "server", "from", "conf", "that", "matches", "server", "or", "the", "empty", "string", "if", "no", "match", ".", "A", "match", "means", "the", "server", "from", "the", "config", "is", "a", "prefix", "of", "the", "input", "server", ".", "The", "longest", "match", "prevails", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/types/clientconfig/config.go#L52-L64
train
perkeep/perkeep
pkg/blobserver/replica/replica.go
StatBlobs
func (sto *replicaStorage) StatBlobs(ctx context.Context, blobs []blob.Ref, fn func(blob.SizedRef) error) error { var ( mu sync.Mutex // serializes calls to fn, guards need need = make(map[blob.Ref]bool) failed bool ) for _, br := range blobs { need[br] = true } group, ctx := errgroup.WithContext(ctx) for _, replica := range sto.readReplicas { replica := replica group.Go(func() error { return replica.StatBlobs(ctx, blobs, func(sb blob.SizedRef) error { mu.Lock() defer mu.Unlock() if failed { return nil } if !need[sb.Ref] { // dup, lost race from other replica return nil } delete(need, sb.Ref) if err := fn(sb); err != nil { failed = true return err } return nil }) }) } return group.Wait() }
go
func (sto *replicaStorage) StatBlobs(ctx context.Context, blobs []blob.Ref, fn func(blob.SizedRef) error) error { var ( mu sync.Mutex // serializes calls to fn, guards need need = make(map[blob.Ref]bool) failed bool ) for _, br := range blobs { need[br] = true } group, ctx := errgroup.WithContext(ctx) for _, replica := range sto.readReplicas { replica := replica group.Go(func() error { return replica.StatBlobs(ctx, blobs, func(sb blob.SizedRef) error { mu.Lock() defer mu.Unlock() if failed { return nil } if !need[sb.Ref] { // dup, lost race from other replica return nil } delete(need, sb.Ref) if err := fn(sb); err != nil { failed = true return err } return nil }) }) } return group.Wait() }
[ "func", "(", "sto", "*", "replicaStorage", ")", "StatBlobs", "(", "ctx", "context", ".", "Context", ",", "blobs", "[", "]", "blob", ".", "Ref", ",", "fn", "func", "(", "blob", ".", "SizedRef", ")", "error", ")", "error", "{", "var", "(", "mu", "sync", ".", "Mutex", "// serializes calls to fn, guards need", "\n", "need", "=", "make", "(", "map", "[", "blob", ".", "Ref", "]", "bool", ")", "\n", "failed", "bool", "\n", ")", "\n", "for", "_", ",", "br", ":=", "range", "blobs", "{", "need", "[", "br", "]", "=", "true", "\n", "}", "\n\n", "group", ",", "ctx", ":=", "errgroup", ".", "WithContext", "(", "ctx", ")", "\n\n", "for", "_", ",", "replica", ":=", "range", "sto", ".", "readReplicas", "{", "replica", ":=", "replica", "\n", "group", ".", "Go", "(", "func", "(", ")", "error", "{", "return", "replica", ".", "StatBlobs", "(", "ctx", ",", "blobs", ",", "func", "(", "sb", "blob", ".", "SizedRef", ")", "error", "{", "mu", ".", "Lock", "(", ")", "\n", "defer", "mu", ".", "Unlock", "(", ")", "\n", "if", "failed", "{", "return", "nil", "\n", "}", "\n", "if", "!", "need", "[", "sb", ".", "Ref", "]", "{", "// dup, lost race from other replica", "return", "nil", "\n", "}", "\n", "delete", "(", "need", ",", "sb", ".", "Ref", ")", "\n", "if", "err", ":=", "fn", "(", "sb", ")", ";", "err", "!=", "nil", "{", "failed", "=", "true", "\n", "return", "err", "\n", "}", "\n", "return", "nil", "\n", "}", ")", "\n", "}", ")", "\n", "}", "\n\n", "return", "group", ".", "Wait", "(", ")", "\n", "}" ]
// StatBlobs stats all read replicas.
[ "StatBlobs", "stats", "all", "read", "replicas", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/replica/replica.go#L149-L185
train
perkeep/perkeep
pkg/importer/gphotos/gphotos.go
CallbackURLParameters
func (im imp) CallbackURLParameters(acctRef blob.Ref) url.Values { return url.Values{} }
go
func (im imp) CallbackURLParameters(acctRef blob.Ref) url.Values { return url.Values{} }
[ "func", "(", "im", "imp", ")", "CallbackURLParameters", "(", "acctRef", "blob", ".", "Ref", ")", "url", ".", "Values", "{", "return", "url", ".", "Values", "{", "}", "\n", "}" ]
// CallbackURLParameters returns the needed callback parameters - empty for Google gphotos.
[ "CallbackURLParameters", "returns", "the", "needed", "callback", "parameters", "-", "empty", "for", "Google", "gphotos", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/importer/gphotos/gphotos.go#L149-L151
train
perkeep/perkeep
pkg/importer/gphotos/gphotos.go
decodeToken
func decodeToken(encoded string) *oauth2.Token { t := new(oauth2.Token) f := strings.Fields(encoded) if len(f) > 0 { t.AccessToken = f[0] } if len(f) > 1 { t.RefreshToken = f[1] } if len(f) > 2 && f[2] != "0" { sec, err := strconv.ParseInt(f[2], 10, 64) if err == nil { t.Expiry = time.Unix(sec, 0) } } return t }
go
func decodeToken(encoded string) *oauth2.Token { t := new(oauth2.Token) f := strings.Fields(encoded) if len(f) > 0 { t.AccessToken = f[0] } if len(f) > 1 { t.RefreshToken = f[1] } if len(f) > 2 && f[2] != "0" { sec, err := strconv.ParseInt(f[2], 10, 64) if err == nil { t.Expiry = time.Unix(sec, 0) } } return t }
[ "func", "decodeToken", "(", "encoded", "string", ")", "*", "oauth2", ".", "Token", "{", "t", ":=", "new", "(", "oauth2", ".", "Token", ")", "\n", "f", ":=", "strings", ".", "Fields", "(", "encoded", ")", "\n", "if", "len", "(", "f", ")", ">", "0", "{", "t", ".", "AccessToken", "=", "f", "[", "0", "]", "\n", "}", "\n", "if", "len", "(", "f", ")", ">", "1", "{", "t", ".", "RefreshToken", "=", "f", "[", "1", "]", "\n", "}", "\n", "if", "len", "(", "f", ")", ">", "2", "&&", "f", "[", "2", "]", "!=", "\"", "\"", "{", "sec", ",", "err", ":=", "strconv", ".", "ParseInt", "(", "f", "[", "2", "]", ",", "10", ",", "64", ")", "\n", "if", "err", "==", "nil", "{", "t", ".", "Expiry", "=", "time", ".", "Unix", "(", "sec", ",", "0", ")", "\n", "}", "\n", "}", "\n", "return", "t", "\n", "}" ]
// decodeToken parses an access token, refresh token, and optional // expiry unix timestamp separated by spaces into an oauth2.Token. // It returns as much as it can.
[ "decodeToken", "parses", "an", "access", "token", "refresh", "token", "and", "optional", "expiry", "unix", "timestamp", "separated", "by", "spaces", "into", "an", "oauth2", ".", "Token", ".", "It", "returns", "as", "much", "as", "it", "can", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/importer/gphotos/gphotos.go#L214-L230
train
perkeep/perkeep
pkg/blobserver/encrypt/encrypt.go
encryptBlob
func (s *storage) encryptBlob(ciphertext, plaintext []byte) []byte { if s.key == [32]byte{} { // Safety check, we really don't want this to happen. panic("no passphrase set") } var nonce [24]byte s.randNonce(&nonce) ciphertext = append(ciphertext, version) ciphertext = append(ciphertext, nonce[:]...) return secretbox.Seal(ciphertext, plaintext, &nonce, &s.key) }
go
func (s *storage) encryptBlob(ciphertext, plaintext []byte) []byte { if s.key == [32]byte{} { // Safety check, we really don't want this to happen. panic("no passphrase set") } var nonce [24]byte s.randNonce(&nonce) ciphertext = append(ciphertext, version) ciphertext = append(ciphertext, nonce[:]...) return secretbox.Seal(ciphertext, plaintext, &nonce, &s.key) }
[ "func", "(", "s", "*", "storage", ")", "encryptBlob", "(", "ciphertext", ",", "plaintext", "[", "]", "byte", ")", "[", "]", "byte", "{", "if", "s", ".", "key", "==", "[", "32", "]", "byte", "{", "}", "{", "// Safety check, we really don't want this to happen.", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "var", "nonce", "[", "24", "]", "byte", "\n", "s", ".", "randNonce", "(", "&", "nonce", ")", "\n", "ciphertext", "=", "append", "(", "ciphertext", ",", "version", ")", "\n", "ciphertext", "=", "append", "(", "ciphertext", ",", "nonce", "[", ":", "]", "...", ")", "\n", "return", "secretbox", ".", "Seal", "(", "ciphertext", ",", "plaintext", ",", "&", "nonce", ",", "&", "s", ".", "key", ")", "\n", "}" ]
// encryptBlob encrypts plaintext and appends the result to ciphertext, // which must not overlap plaintext.
[ "encryptBlob", "encrypts", "plaintext", "and", "appends", "the", "result", "to", "ciphertext", "which", "must", "not", "overlap", "plaintext", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/encrypt/encrypt.go#L123-L133
train
perkeep/perkeep
pkg/blobserver/encrypt/encrypt.go
decryptBlob
func (s *storage) decryptBlob(plaintext, ciphertext []byte) ([]byte, error) { if len(ciphertext) < overhead { return nil, errors.New("blob too short to be encrypted") } if ciphertext[0] != version { return nil, errors.New("unknown encrypted blob version") } var nonce [24]byte copy(nonce[:], ciphertext[1:]) plaintext, success := secretbox.Open(plaintext, ciphertext[25:], &nonce, &s.key) if !success { return nil, errors.New("encrypted blob failed authentication") } return plaintext, nil }
go
func (s *storage) decryptBlob(plaintext, ciphertext []byte) ([]byte, error) { if len(ciphertext) < overhead { return nil, errors.New("blob too short to be encrypted") } if ciphertext[0] != version { return nil, errors.New("unknown encrypted blob version") } var nonce [24]byte copy(nonce[:], ciphertext[1:]) plaintext, success := secretbox.Open(plaintext, ciphertext[25:], &nonce, &s.key) if !success { return nil, errors.New("encrypted blob failed authentication") } return plaintext, nil }
[ "func", "(", "s", "*", "storage", ")", "decryptBlob", "(", "plaintext", ",", "ciphertext", "[", "]", "byte", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "if", "len", "(", "ciphertext", ")", "<", "overhead", "{", "return", "nil", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "ciphertext", "[", "0", "]", "!=", "version", "{", "return", "nil", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "var", "nonce", "[", "24", "]", "byte", "\n", "copy", "(", "nonce", "[", ":", "]", ",", "ciphertext", "[", "1", ":", "]", ")", "\n", "plaintext", ",", "success", ":=", "secretbox", ".", "Open", "(", "plaintext", ",", "ciphertext", "[", "25", ":", "]", ",", "&", "nonce", ",", "&", "s", ".", "key", ")", "\n", "if", "!", "success", "{", "return", "nil", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "return", "plaintext", ",", "nil", "\n", "}" ]
// decryptBlob decrypts ciphertext and appends the result to plaintext, // which must not overlap ciphertext.
[ "decryptBlob", "decrypts", "ciphertext", "and", "appends", "the", "result", "to", "plaintext", "which", "must", "not", "overlap", "ciphertext", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/encrypt/encrypt.go#L137-L151
train
perkeep/perkeep
pkg/sorted/sqlite/dbschema.go
InitDB
func InitDB(path string) error { db, err := sql.Open("sqlite3", path) if err != nil { return err } defer db.Close() for _, tableSQL := range SQLCreateTables() { if _, err := db.Exec(tableSQL); err != nil { return err } } // Use Write Ahead Logging which improves SQLite concurrency. // Requires SQLite >= 3.7.0 if _, err := db.Exec("PRAGMA journal_mode = WAL"); err != nil { return err } // Check if the WAL mode was set correctly var journalMode string if err = db.QueryRow("PRAGMA journal_mode").Scan(&journalMode); err != nil { log.Fatalf("Unable to determine sqlite3 journal_mode: %v", err) } if journalMode != "wal" { log.Fatal("SQLite Write Ahead Logging (introducted in v3.7.0) is required. See http://perkeep.org/issue/114") } _, err = db.Exec(fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, SchemaVersion())) return err }
go
func InitDB(path string) error { db, err := sql.Open("sqlite3", path) if err != nil { return err } defer db.Close() for _, tableSQL := range SQLCreateTables() { if _, err := db.Exec(tableSQL); err != nil { return err } } // Use Write Ahead Logging which improves SQLite concurrency. // Requires SQLite >= 3.7.0 if _, err := db.Exec("PRAGMA journal_mode = WAL"); err != nil { return err } // Check if the WAL mode was set correctly var journalMode string if err = db.QueryRow("PRAGMA journal_mode").Scan(&journalMode); err != nil { log.Fatalf("Unable to determine sqlite3 journal_mode: %v", err) } if journalMode != "wal" { log.Fatal("SQLite Write Ahead Logging (introducted in v3.7.0) is required. See http://perkeep.org/issue/114") } _, err = db.Exec(fmt.Sprintf(`REPLACE INTO meta VALUES ('version', '%d')`, SchemaVersion())) return err }
[ "func", "InitDB", "(", "path", "string", ")", "error", "{", "db", ",", "err", ":=", "sql", ".", "Open", "(", "\"", "\"", ",", "path", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "defer", "db", ".", "Close", "(", ")", "\n", "for", "_", ",", "tableSQL", ":=", "range", "SQLCreateTables", "(", ")", "{", "if", "_", ",", "err", ":=", "db", ".", "Exec", "(", "tableSQL", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n\n", "// Use Write Ahead Logging which improves SQLite concurrency.", "// Requires SQLite >= 3.7.0", "if", "_", ",", "err", ":=", "db", ".", "Exec", "(", "\"", "\"", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Check if the WAL mode was set correctly", "var", "journalMode", "string", "\n", "if", "err", "=", "db", ".", "QueryRow", "(", "\"", "\"", ")", ".", "Scan", "(", "&", "journalMode", ")", ";", "err", "!=", "nil", "{", "log", ".", "Fatalf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "if", "journalMode", "!=", "\"", "\"", "{", "log", ".", "Fatal", "(", "\"", "\"", ")", "\n", "}", "\n\n", "_", ",", "err", "=", "db", ".", "Exec", "(", "fmt", ".", "Sprintf", "(", "`REPLACE INTO meta VALUES ('version', '%d')`", ",", "SchemaVersion", "(", ")", ")", ")", "\n", "return", "err", "\n", "}" ]
// InitDB creates a new sqlite database based on the file at path.
[ "InitDB", "creates", "a", "new", "sqlite", "database", "based", "on", "the", "file", "at", "path", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/sorted/sqlite/dbschema.go#L49-L78
train
perkeep/perkeep
dev/devcam/hook.go
hookGofmt
func (c *hookCmd) hookGofmt() error { if os.Getenv("GIT_GOFMT_HOOK") == "off" { printf("gofmt disabled by $GIT_GOFMT_HOOK=off\n") return nil } files, err := c.runGofmt() if err != nil { printf("gofmt hook reported errors:\n\t%v\n", strings.Replace(strings.TrimSpace(err.Error()), "\n", "\n\t", -1)) return errors.New("gofmt errors") } if len(files) == 0 { return nil } printf("You need to format with gofmt:\n\tgofmt -w %s\n", strings.Join(files, " ")) return errors.New("gofmt required") }
go
func (c *hookCmd) hookGofmt() error { if os.Getenv("GIT_GOFMT_HOOK") == "off" { printf("gofmt disabled by $GIT_GOFMT_HOOK=off\n") return nil } files, err := c.runGofmt() if err != nil { printf("gofmt hook reported errors:\n\t%v\n", strings.Replace(strings.TrimSpace(err.Error()), "\n", "\n\t", -1)) return errors.New("gofmt errors") } if len(files) == 0 { return nil } printf("You need to format with gofmt:\n\tgofmt -w %s\n", strings.Join(files, " ")) return errors.New("gofmt required") }
[ "func", "(", "c", "*", "hookCmd", ")", "hookGofmt", "(", ")", "error", "{", "if", "os", ".", "Getenv", "(", "\"", "\"", ")", "==", "\"", "\"", "{", "printf", "(", "\"", "\\n", "\"", ")", "\n", "return", "nil", "\n", "}", "\n\n", "files", ",", "err", ":=", "c", ".", "runGofmt", "(", ")", "\n", "if", "err", "!=", "nil", "{", "printf", "(", "\"", "\\n", "\\t", "\\n", "\"", ",", "strings", ".", "Replace", "(", "strings", ".", "TrimSpace", "(", "err", ".", "Error", "(", ")", ")", ",", "\"", "\\n", "\"", ",", "\"", "\\n", "\\t", "\"", ",", "-", "1", ")", ")", "\n", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "len", "(", "files", ")", "==", "0", "{", "return", "nil", "\n", "}", "\n", "printf", "(", "\"", "\\n", "\\t", "\\n", "\"", ",", "strings", ".", "Join", "(", "files", ",", "\"", "\"", ")", ")", "\n", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}" ]
// hookGofmt runs a gofmt check on the local files matching the files in the // git staging area. // An error is returned if something went wrong or if some of the files need // gofmting. In the latter case, the instruction is printed.
[ "hookGofmt", "runs", "a", "gofmt", "check", "on", "the", "local", "files", "matching", "the", "files", "in", "the", "git", "staging", "area", ".", "An", "error", "is", "returned", "if", "something", "went", "wrong", "or", "if", "some", "of", "the", "files", "need", "gofmting", ".", "In", "the", "latter", "case", "the", "instruction", "is", "printed", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/dev/devcam/hook.go#L228-L245
train
perkeep/perkeep
dev/devcam/hook.go
runGofmt
func (c *hookCmd) runGofmt() (files []string, err error) { repo, err := repoRoot() if err != nil { return nil, err } if !strings.HasSuffix(repo, string(filepath.Separator)) { repo += string(filepath.Separator) } out, err := cmdOutputDirErr(".", "git", "diff-index", "--name-only", "--diff-filter=ACM", "--cached", "HEAD", "--", ":(glob)**/*.go", ":!/vendor/") if err != nil { return nil, err } indexFiles := addRoot(repo, nonBlankLines(out)) if len(indexFiles) == 0 { return } args := []string{"-l"} // TODO(mpl): it would be nice to TrimPrefix the pwd from each file to get a shorter output. // However, since git sets the pwd to GIT_DIR before running the pre-commit hook, we lost // the actual pwd from when we ran `git commit`, so no dice so far. for _, file := range indexFiles { args = append(args, file) } if c.verbose { fmt.Fprintln(cmdmain.Stderr, commandString("gofmt", args)) } cmd := exec.Command("gofmt", args...) var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err = cmd.Run() if err != nil { // Error but no stderr: usually can't find gofmt. if stderr.Len() == 0 { return nil, fmt.Errorf("invoking gofmt: %v", err) } return nil, fmt.Errorf("%s: %v", stderr.String(), err) } // Build file list. files = lines(stdout.String()) sort.Strings(files) return files, nil }
go
func (c *hookCmd) runGofmt() (files []string, err error) { repo, err := repoRoot() if err != nil { return nil, err } if !strings.HasSuffix(repo, string(filepath.Separator)) { repo += string(filepath.Separator) } out, err := cmdOutputDirErr(".", "git", "diff-index", "--name-only", "--diff-filter=ACM", "--cached", "HEAD", "--", ":(glob)**/*.go", ":!/vendor/") if err != nil { return nil, err } indexFiles := addRoot(repo, nonBlankLines(out)) if len(indexFiles) == 0 { return } args := []string{"-l"} // TODO(mpl): it would be nice to TrimPrefix the pwd from each file to get a shorter output. // However, since git sets the pwd to GIT_DIR before running the pre-commit hook, we lost // the actual pwd from when we ran `git commit`, so no dice so far. for _, file := range indexFiles { args = append(args, file) } if c.verbose { fmt.Fprintln(cmdmain.Stderr, commandString("gofmt", args)) } cmd := exec.Command("gofmt", args...) var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err = cmd.Run() if err != nil { // Error but no stderr: usually can't find gofmt. if stderr.Len() == 0 { return nil, fmt.Errorf("invoking gofmt: %v", err) } return nil, fmt.Errorf("%s: %v", stderr.String(), err) } // Build file list. files = lines(stdout.String()) sort.Strings(files) return files, nil }
[ "func", "(", "c", "*", "hookCmd", ")", "runGofmt", "(", ")", "(", "files", "[", "]", "string", ",", "err", "error", ")", "{", "repo", ",", "err", ":=", "repoRoot", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "if", "!", "strings", ".", "HasSuffix", "(", "repo", ",", "string", "(", "filepath", ".", "Separator", ")", ")", "{", "repo", "+=", "string", "(", "filepath", ".", "Separator", ")", "\n", "}", "\n\n", "out", ",", "err", ":=", "cmdOutputDirErr", "(", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "indexFiles", ":=", "addRoot", "(", "repo", ",", "nonBlankLines", "(", "out", ")", ")", "\n", "if", "len", "(", "indexFiles", ")", "==", "0", "{", "return", "\n", "}", "\n\n", "args", ":=", "[", "]", "string", "{", "\"", "\"", "}", "\n", "// TODO(mpl): it would be nice to TrimPrefix the pwd from each file to get a shorter output.", "// However, since git sets the pwd to GIT_DIR before running the pre-commit hook, we lost", "// the actual pwd from when we ran `git commit`, so no dice so far.", "for", "_", ",", "file", ":=", "range", "indexFiles", "{", "args", "=", "append", "(", "args", ",", "file", ")", "\n", "}", "\n\n", "if", "c", ".", "verbose", "{", "fmt", ".", "Fprintln", "(", "cmdmain", ".", "Stderr", ",", "commandString", "(", "\"", "\"", ",", "args", ")", ")", "\n", "}", "\n", "cmd", ":=", "exec", ".", "Command", "(", "\"", "\"", ",", "args", "...", ")", "\n", "var", "stdout", ",", "stderr", "bytes", ".", "Buffer", "\n", "cmd", ".", "Stdout", "=", "&", "stdout", "\n", "cmd", ".", "Stderr", "=", "&", "stderr", "\n", "err", "=", "cmd", ".", "Run", "(", ")", "\n\n", "if", "err", "!=", "nil", "{", "// Error but no stderr: usually can't find gofmt.", "if", "stderr", ".", "Len", "(", ")", "==", "0", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "stderr", ".", "String", "(", ")", ",", "err", ")", "\n", "}", "\n\n", "// Build file list.", "files", "=", "lines", "(", "stdout", ".", "String", "(", ")", ")", "\n", "sort", ".", "Strings", "(", "files", ")", "\n", "return", "files", ",", "nil", "\n", "}" ]
// runGofmt runs the external gofmt command over the local version of staged files. // It returns the files that need gofmting.
[ "runGofmt", "runs", "the", "external", "gofmt", "command", "over", "the", "local", "version", "of", "staged", "files", ".", "It", "returns", "the", "files", "that", "need", "gofmting", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/dev/devcam/hook.go#L260-L307
train
perkeep/perkeep
dev/devcam/hook.go
nonBlankLines
func nonBlankLines(text string) []string { var out []string for _, s := range lines(text) { if strings.TrimSpace(s) != "" { out = append(out, s) } } return out }
go
func nonBlankLines(text string) []string { var out []string for _, s := range lines(text) { if strings.TrimSpace(s) != "" { out = append(out, s) } } return out }
[ "func", "nonBlankLines", "(", "text", "string", ")", "[", "]", "string", "{", "var", "out", "[", "]", "string", "\n", "for", "_", ",", "s", ":=", "range", "lines", "(", "text", ")", "{", "if", "strings", ".", "TrimSpace", "(", "s", ")", "!=", "\"", "\"", "{", "out", "=", "append", "(", "out", ",", "s", ")", "\n", "}", "\n", "}", "\n", "return", "out", "\n", "}" ]
// nonBlankLines returns the non-blank lines in text.
[ "nonBlankLines", "returns", "the", "non", "-", "blank", "lines", "in", "text", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/dev/devcam/hook.go#L322-L330
train
perkeep/perkeep
dev/devcam/hook.go
filter
func filter(f func(string) bool, list []string) []string { var out []string for _, x := range list { if f(x) { out = append(out, x) } } return out }
go
func filter(f func(string) bool, list []string) []string { var out []string for _, x := range list { if f(x) { out = append(out, x) } } return out }
[ "func", "filter", "(", "f", "func", "(", "string", ")", "bool", ",", "list", "[", "]", "string", ")", "[", "]", "string", "{", "var", "out", "[", "]", "string", "\n", "for", "_", ",", "x", ":=", "range", "list", "{", "if", "f", "(", "x", ")", "{", "out", "=", "append", "(", "out", ",", "x", ")", "\n", "}", "\n", "}", "\n", "return", "out", "\n", "}" ]
// filter returns the elements in list satisfying f.
[ "filter", "returns", "the", "elements", "in", "list", "satisfying", "f", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/dev/devcam/hook.go#L333-L341
train
perkeep/perkeep
internal/osutil/osutil.go
DirExists
func DirExists(dir string) bool { fi, err := os.Stat(dir) return err == nil && fi.IsDir() }
go
func DirExists(dir string) bool { fi, err := os.Stat(dir) return err == nil && fi.IsDir() }
[ "func", "DirExists", "(", "dir", "string", ")", "bool", "{", "fi", ",", "err", ":=", "os", ".", "Stat", "(", "dir", ")", "\n", "return", "err", "==", "nil", "&&", "fi", ".", "IsDir", "(", ")", "\n", "}" ]
// DirExists reports whether dir exists. Errors are ignored and are // reported as false.
[ "DirExists", "reports", "whether", "dir", "exists", ".", "Errors", "are", "ignored", "and", "are", "reported", "as", "false", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/osutil/osutil.go#L33-L36
train
perkeep/perkeep
pkg/types/camtypes/errors.go
Err
func Err(key string) error { v, ok := camErrors[key] if !ok { panic(fmt.Sprintf("unknown/unregistered error key %v", key)) } return v }
go
func Err(key string) error { v, ok := camErrors[key] if !ok { panic(fmt.Sprintf("unknown/unregistered error key %v", key)) } return v }
[ "func", "Err", "(", "key", "string", ")", "error", "{", "v", ",", "ok", ":=", "camErrors", "[", "key", "]", "\n", "if", "!", "ok", "{", "panic", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "key", ")", ")", "\n", "}", "\n", "return", "v", "\n", "}" ]
// Err returns the error registered for key. // It panics for an unregistered key.
[ "Err", "returns", "the", "error", "registered", "for", "key", ".", "It", "panics", "for", "an", "unregistered", "key", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/types/camtypes/errors.go#L68-L74
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
IsDir
func IsDir(dir string) (bool, error) { _, err := os.Stat(filepath.Join(dir, defaultIndexFile)) if os.IsNotExist(err) { return false, nil } return err == nil, err }
go
func IsDir(dir string) (bool, error) { _, err := os.Stat(filepath.Join(dir, defaultIndexFile)) if os.IsNotExist(err) { return false, nil } return err == nil, err }
[ "func", "IsDir", "(", "dir", "string", ")", "(", "bool", ",", "error", ")", "{", "_", ",", "err", ":=", "os", ".", "Stat", "(", "filepath", ".", "Join", "(", "dir", ",", "defaultIndexFile", ")", ")", "\n", "if", "os", ".", "IsNotExist", "(", "err", ")", "{", "return", "false", ",", "nil", "\n", "}", "\n", "return", "err", "==", "nil", ",", "err", "\n", "}" ]
// IsDir reports whether dir is a diskpacked directory.
[ "IsDir", "reports", "whether", "dir", "is", "a", "diskpacked", "directory", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L115-L121
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
New
func New(dir string) (blobserver.Storage, error) { var maxSize int64 if ok, _ := IsDir(dir); ok { // TODO: detect existing max size from size of files, if obvious, // and set maxSize to that? } return newStorage(dir, maxSize, nil) }
go
func New(dir string) (blobserver.Storage, error) { var maxSize int64 if ok, _ := IsDir(dir); ok { // TODO: detect existing max size from size of files, if obvious, // and set maxSize to that? } return newStorage(dir, maxSize, nil) }
[ "func", "New", "(", "dir", "string", ")", "(", "blobserver", ".", "Storage", ",", "error", ")", "{", "var", "maxSize", "int64", "\n", "if", "ok", ",", "_", ":=", "IsDir", "(", "dir", ")", ";", "ok", "{", "// TODO: detect existing max size from size of files, if obvious,", "// and set maxSize to that?", "}", "\n", "return", "newStorage", "(", "dir", ",", "maxSize", ",", "nil", ")", "\n", "}" ]
// New returns a diskpacked storage implementation, adding blobs to // the provided directory. It doesn't delete any existing blob pack // files.
[ "New", "returns", "a", "diskpacked", "storage", "implementation", "adding", "blobs", "to", "the", "provided", "directory", ".", "It", "doesn", "t", "delete", "any", "existing", "blob", "pack", "files", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L126-L133
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
newIndex
func newIndex(root string, indexConf jsonconfig.Obj) (sorted.KeyValue, error) { if len(indexConf) > 0 { return sorted.NewKeyValueMaybeWipe(indexConf) } return sorted.NewKeyValueMaybeWipe(jsonconfig.Obj{ "type": defaultIndexType, "file": filepath.Join(root, defaultIndexFile), }) }
go
func newIndex(root string, indexConf jsonconfig.Obj) (sorted.KeyValue, error) { if len(indexConf) > 0 { return sorted.NewKeyValueMaybeWipe(indexConf) } return sorted.NewKeyValueMaybeWipe(jsonconfig.Obj{ "type": defaultIndexType, "file": filepath.Join(root, defaultIndexFile), }) }
[ "func", "newIndex", "(", "root", "string", ",", "indexConf", "jsonconfig", ".", "Obj", ")", "(", "sorted", ".", "KeyValue", ",", "error", ")", "{", "if", "len", "(", "indexConf", ")", ">", "0", "{", "return", "sorted", ".", "NewKeyValueMaybeWipe", "(", "indexConf", ")", "\n", "}", "\n", "return", "sorted", ".", "NewKeyValueMaybeWipe", "(", "jsonconfig", ".", "Obj", "{", "\"", "\"", ":", "defaultIndexType", ",", "\"", "\"", ":", "filepath", ".", "Join", "(", "root", ",", "defaultIndexFile", ")", ",", "}", ")", "\n", "}" ]
// newIndex returns a new sorted.KeyValue, using either the given config, or the default.
[ "newIndex", "returns", "a", "new", "sorted", ".", "KeyValue", "using", "either", "the", "given", "config", "or", "the", "default", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L136-L144
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
openForRead
func (s *storage) openForRead(n int) error { if n > len(s.fds) { panic(fmt.Sprintf("openForRead called out of order got %d, expected %d", n, len(s.fds))) } fn := s.filename(n) f, err := os.Open(fn) if err != nil { return err } openFdsVar.Add(s.root, 1) debug.Printf("diskpacked: opened for read %q", fn) s.fds = append(s.fds, f) return nil }
go
func (s *storage) openForRead(n int) error { if n > len(s.fds) { panic(fmt.Sprintf("openForRead called out of order got %d, expected %d", n, len(s.fds))) } fn := s.filename(n) f, err := os.Open(fn) if err != nil { return err } openFdsVar.Add(s.root, 1) debug.Printf("diskpacked: opened for read %q", fn) s.fds = append(s.fds, f) return nil }
[ "func", "(", "s", "*", "storage", ")", "openForRead", "(", "n", "int", ")", "error", "{", "if", "n", ">", "len", "(", "s", ".", "fds", ")", "{", "panic", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "n", ",", "len", "(", "s", ".", "fds", ")", ")", ")", "\n", "}", "\n\n", "fn", ":=", "s", ".", "filename", "(", "n", ")", "\n", "f", ",", "err", ":=", "os", ".", "Open", "(", "fn", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "openFdsVar", ".", "Add", "(", "s", ".", "root", ",", "1", ")", "\n", "debug", ".", "Printf", "(", "\"", "\"", ",", "fn", ")", "\n", "s", ".", "fds", "=", "append", "(", "s", ".", "fds", ",", "f", ")", "\n", "return", "nil", "\n", "}" ]
// openForRead will open pack file n for read and keep a handle to it in // s.fds. os.IsNotExist returned if n >= the number of pack files in s.root. // This function is not thread safe, s.mu should be locked by the caller.
[ "openForRead", "will", "open", "pack", "file", "n", "for", "read", "and", "keep", "a", "handle", "to", "it", "in", "s", ".", "fds", ".", "os", ".", "IsNotExist", "returned", "if", "n", ">", "=", "the", "number", "of", "pack", "files", "in", "s", ".", "root", ".", "This", "function", "is", "not", "thread", "safe", "s", ".", "mu", "should", "be", "locked", "by", "the", "caller", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L212-L226
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
openForWrite
func (s *storage) openForWrite(n int) error { fn := s.filename(n) l, err := lock.Lock(fn + ".lock") if err != nil { return err } f, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE, 0666) if err != nil { l.Close() return err } openFdsVar.Add(s.root, 1) debug.Printf("diskpacked: opened for write %q", fn) s.size, err = f.Seek(0, os.SEEK_END) if err != nil { f.Close() l.Close() return err } s.writer = f s.writeLock = l return nil }
go
func (s *storage) openForWrite(n int) error { fn := s.filename(n) l, err := lock.Lock(fn + ".lock") if err != nil { return err } f, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE, 0666) if err != nil { l.Close() return err } openFdsVar.Add(s.root, 1) debug.Printf("diskpacked: opened for write %q", fn) s.size, err = f.Seek(0, os.SEEK_END) if err != nil { f.Close() l.Close() return err } s.writer = f s.writeLock = l return nil }
[ "func", "(", "s", "*", "storage", ")", "openForWrite", "(", "n", "int", ")", "error", "{", "fn", ":=", "s", ".", "filename", "(", "n", ")", "\n", "l", ",", "err", ":=", "lock", ".", "Lock", "(", "fn", "+", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "f", ",", "err", ":=", "os", ".", "OpenFile", "(", "fn", ",", "os", ".", "O_RDWR", "|", "os", ".", "O_CREATE", ",", "0666", ")", "\n", "if", "err", "!=", "nil", "{", "l", ".", "Close", "(", ")", "\n", "return", "err", "\n", "}", "\n", "openFdsVar", ".", "Add", "(", "s", ".", "root", ",", "1", ")", "\n", "debug", ".", "Printf", "(", "\"", "\"", ",", "fn", ")", "\n\n", "s", ".", "size", ",", "err", "=", "f", ".", "Seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "\n", "if", "err", "!=", "nil", "{", "f", ".", "Close", "(", ")", "\n", "l", ".", "Close", "(", ")", "\n", "return", "err", "\n", "}", "\n\n", "s", ".", "writer", "=", "f", "\n", "s", ".", "writeLock", "=", "l", "\n", "return", "nil", "\n", "}" ]
// openForWrite will create or open pack file n for writes, create a lock // visible external to the process and seek to the end of the file ready for // appending new data. // This function is not thread safe, s.mu should be locked by the caller.
[ "openForWrite", "will", "create", "or", "open", "pack", "file", "n", "for", "writes", "create", "a", "lock", "visible", "external", "to", "the", "process", "and", "seek", "to", "the", "end", "of", "the", "file", "ready", "for", "appending", "new", "data", ".", "This", "function", "is", "not", "thread", "safe", "s", ".", "mu", "should", "be", "locked", "by", "the", "caller", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L232-L256
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
closePack
func (s *storage) closePack() error { var err error if s.writer != nil { err = s.writer.Close() openFdsVar.Add(s.root, -1) s.writer = nil } if s.writeLock != nil { lerr := s.writeLock.Close() if err == nil { err = lerr } s.writeLock = nil } return err }
go
func (s *storage) closePack() error { var err error if s.writer != nil { err = s.writer.Close() openFdsVar.Add(s.root, -1) s.writer = nil } if s.writeLock != nil { lerr := s.writeLock.Close() if err == nil { err = lerr } s.writeLock = nil } return err }
[ "func", "(", "s", "*", "storage", ")", "closePack", "(", ")", "error", "{", "var", "err", "error", "\n", "if", "s", ".", "writer", "!=", "nil", "{", "err", "=", "s", ".", "writer", ".", "Close", "(", ")", "\n", "openFdsVar", ".", "Add", "(", "s", ".", "root", ",", "-", "1", ")", "\n", "s", ".", "writer", "=", "nil", "\n", "}", "\n", "if", "s", ".", "writeLock", "!=", "nil", "{", "lerr", ":=", "s", ".", "writeLock", ".", "Close", "(", ")", "\n", "if", "err", "==", "nil", "{", "err", "=", "lerr", "\n", "}", "\n", "s", ".", "writeLock", "=", "nil", "\n", "}", "\n", "return", "err", "\n", "}" ]
// closePack opens any pack file currently open for writing.
[ "closePack", "opens", "any", "pack", "file", "currently", "open", "for", "writing", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L259-L274
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
nextPack
func (s *storage) nextPack() error { debug.Println("diskpacked: nextPack") s.size = 0 if err := s.closePack(); err != nil { return err } n := len(s.fds) if err := s.openForWrite(n); err != nil { return err } return s.openForRead(n) }
go
func (s *storage) nextPack() error { debug.Println("diskpacked: nextPack") s.size = 0 if err := s.closePack(); err != nil { return err } n := len(s.fds) if err := s.openForWrite(n); err != nil { return err } return s.openForRead(n) }
[ "func", "(", "s", "*", "storage", ")", "nextPack", "(", ")", "error", "{", "debug", ".", "Println", "(", "\"", "\"", ")", "\n", "s", ".", "size", "=", "0", "\n", "if", "err", ":=", "s", ".", "closePack", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "n", ":=", "len", "(", "s", ".", "fds", ")", "\n", "if", "err", ":=", "s", ".", "openForWrite", "(", "n", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "return", "s", ".", "openForRead", "(", "n", ")", "\n", "}" ]
// nextPack will close the current writer and release its lock if open, // open the next pack file in sequence for writing, grab its lock, set it // to the currently active writer, and open another copy for read-only use. // This function is not thread safe, s.mu should be locked by the caller.
[ "nextPack", "will", "close", "the", "current", "writer", "and", "release", "its", "lock", "if", "open", "open", "the", "next", "pack", "file", "in", "sequence", "for", "writing", "grab", "its", "lock", "set", "it", "to", "the", "currently", "active", "writer", "and", "open", "another", "copy", "for", "read", "-", "only", "use", ".", "This", "function", "is", "not", "thread", "safe", "s", ".", "mu", "should", "be", "locked", "by", "the", "caller", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L280-L291
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
openAllPacks
func (s *storage) openAllPacks() error { debug.Println("diskpacked: openAllPacks") n := 0 for { err := s.openForRead(n) if os.IsNotExist(err) { break } if err != nil { return err } n++ } if n == 0 { // If no pack files are found, we create one open for read and write. return s.nextPack() } // If 1 or more pack files are found, open the last one read and write. return s.openForWrite(n - 1) }
go
func (s *storage) openAllPacks() error { debug.Println("diskpacked: openAllPacks") n := 0 for { err := s.openForRead(n) if os.IsNotExist(err) { break } if err != nil { return err } n++ } if n == 0 { // If no pack files are found, we create one open for read and write. return s.nextPack() } // If 1 or more pack files are found, open the last one read and write. return s.openForWrite(n - 1) }
[ "func", "(", "s", "*", "storage", ")", "openAllPacks", "(", ")", "error", "{", "debug", ".", "Println", "(", "\"", "\"", ")", "\n", "n", ":=", "0", "\n", "for", "{", "err", ":=", "s", ".", "openForRead", "(", "n", ")", "\n", "if", "os", ".", "IsNotExist", "(", "err", ")", "{", "break", "\n", "}", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "n", "++", "\n", "}", "\n\n", "if", "n", "==", "0", "{", "// If no pack files are found, we create one open for read and write.", "return", "s", ".", "nextPack", "(", ")", "\n", "}", "\n\n", "// If 1 or more pack files are found, open the last one read and write.", "return", "s", ".", "openForWrite", "(", "n", "-", "1", ")", "\n", "}" ]
// openAllPacks opens read-only each pack file in s.root, populating s.fds. // The latest pack file will also have a writable handle opened. // This function is not thread safe, s.mu should be locked by the caller.
[ "openAllPacks", "opens", "read", "-", "only", "each", "pack", "file", "in", "s", ".", "root", "populating", "s", ".", "fds", ".", "The", "latest", "pack", "file", "will", "also", "have", "a", "writable", "handle", "opened", ".", "This", "function", "is", "not", "thread", "safe", "s", ".", "mu", "should", "be", "locked", "by", "the", "caller", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L296-L317
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
Close
func (s *storage) Close() error { s.mu.Lock() defer s.mu.Unlock() if s.closed { return nil } var closeErr error s.closed = true if err := s.index.Close(); err != nil { log.Println("diskpacked: closing index:", err) } for _, f := range s.fds { err := f.Close() openFdsVar.Add(s.root, -1) if err != nil { closeErr = err } } if err := s.closePack(); err != nil && closeErr == nil { closeErr = err } return closeErr }
go
func (s *storage) Close() error { s.mu.Lock() defer s.mu.Unlock() if s.closed { return nil } var closeErr error s.closed = true if err := s.index.Close(); err != nil { log.Println("diskpacked: closing index:", err) } for _, f := range s.fds { err := f.Close() openFdsVar.Add(s.root, -1) if err != nil { closeErr = err } } if err := s.closePack(); err != nil && closeErr == nil { closeErr = err } return closeErr }
[ "func", "(", "s", "*", "storage", ")", "Close", "(", ")", "error", "{", "s", ".", "mu", ".", "Lock", "(", ")", "\n", "defer", "s", ".", "mu", ".", "Unlock", "(", ")", "\n", "if", "s", ".", "closed", "{", "return", "nil", "\n", "}", "\n", "var", "closeErr", "error", "\n", "s", ".", "closed", "=", "true", "\n", "if", "err", ":=", "s", ".", "index", ".", "Close", "(", ")", ";", "err", "!=", "nil", "{", "log", ".", "Println", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "for", "_", ",", "f", ":=", "range", "s", ".", "fds", "{", "err", ":=", "f", ".", "Close", "(", ")", "\n", "openFdsVar", ".", "Add", "(", "s", ".", "root", ",", "-", "1", ")", "\n", "if", "err", "!=", "nil", "{", "closeErr", "=", "err", "\n", "}", "\n", "}", "\n", "if", "err", ":=", "s", ".", "closePack", "(", ")", ";", "err", "!=", "nil", "&&", "closeErr", "==", "nil", "{", "closeErr", "=", "err", "\n", "}", "\n", "return", "closeErr", "\n", "}" ]
// Close index and all opened fds, with locking.
[ "Close", "index", "and", "all", "opened", "fds", "with", "locking", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L320-L342
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
fetch
func (s *storage) fetch(ctx context.Context, br blob.Ref, offset, length int64) (rc io.ReadCloser, size uint32, err error) { meta, err := s.meta(br) if err != nil { return nil, 0, err } if meta.file >= len(s.fds) { return nil, 0, fmt.Errorf("diskpacked: attempt to fetch blob from out of range pack file %d > %d", meta.file, len(s.fds)) } rac := s.fds[meta.file] var rs io.ReadSeeker if length == -1 { // normal Fetch mode rs = io.NewSectionReader(rac, meta.offset, int64(meta.size)) } else { if offset > int64(meta.size) { return nil, 0, blob.ErrOutOfRangeOffsetSubFetch } else if offset+length > int64(meta.size) { length = int64(meta.size) - offset } rs = io.NewSectionReader(rac, meta.offset+offset, length) } fn := rac.Name() // Ensure entry is in map. readVar.Add(fn, 0) if v, ok := readVar.Get(fn).(*expvar.Int); ok { rs = readerutil.NewStatsReadSeeker(v, rs) } readTotVar.Add(s.root, 0) if v, ok := readTotVar.Get(s.root).(*expvar.Int); ok { rs = readerutil.NewStatsReadSeeker(v, rs) } rsc := struct { io.ReadSeeker io.Closer }{ rs, types.NopCloser, } return rsc, meta.size, nil }
go
func (s *storage) fetch(ctx context.Context, br blob.Ref, offset, length int64) (rc io.ReadCloser, size uint32, err error) { meta, err := s.meta(br) if err != nil { return nil, 0, err } if meta.file >= len(s.fds) { return nil, 0, fmt.Errorf("diskpacked: attempt to fetch blob from out of range pack file %d > %d", meta.file, len(s.fds)) } rac := s.fds[meta.file] var rs io.ReadSeeker if length == -1 { // normal Fetch mode rs = io.NewSectionReader(rac, meta.offset, int64(meta.size)) } else { if offset > int64(meta.size) { return nil, 0, blob.ErrOutOfRangeOffsetSubFetch } else if offset+length > int64(meta.size) { length = int64(meta.size) - offset } rs = io.NewSectionReader(rac, meta.offset+offset, length) } fn := rac.Name() // Ensure entry is in map. readVar.Add(fn, 0) if v, ok := readVar.Get(fn).(*expvar.Int); ok { rs = readerutil.NewStatsReadSeeker(v, rs) } readTotVar.Add(s.root, 0) if v, ok := readTotVar.Get(s.root).(*expvar.Int); ok { rs = readerutil.NewStatsReadSeeker(v, rs) } rsc := struct { io.ReadSeeker io.Closer }{ rs, types.NopCloser, } return rsc, meta.size, nil }
[ "func", "(", "s", "*", "storage", ")", "fetch", "(", "ctx", "context", ".", "Context", ",", "br", "blob", ".", "Ref", ",", "offset", ",", "length", "int64", ")", "(", "rc", "io", ".", "ReadCloser", ",", "size", "uint32", ",", "err", "error", ")", "{", "meta", ",", "err", ":=", "s", ".", "meta", "(", "br", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "0", ",", "err", "\n", "}", "\n\n", "if", "meta", ".", "file", ">=", "len", "(", "s", ".", "fds", ")", "{", "return", "nil", ",", "0", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "meta", ".", "file", ",", "len", "(", "s", ".", "fds", ")", ")", "\n", "}", "\n", "rac", ":=", "s", ".", "fds", "[", "meta", ".", "file", "]", "\n", "var", "rs", "io", ".", "ReadSeeker", "\n", "if", "length", "==", "-", "1", "{", "// normal Fetch mode", "rs", "=", "io", ".", "NewSectionReader", "(", "rac", ",", "meta", ".", "offset", ",", "int64", "(", "meta", ".", "size", ")", ")", "\n", "}", "else", "{", "if", "offset", ">", "int64", "(", "meta", ".", "size", ")", "{", "return", "nil", ",", "0", ",", "blob", ".", "ErrOutOfRangeOffsetSubFetch", "\n", "}", "else", "if", "offset", "+", "length", ">", "int64", "(", "meta", ".", "size", ")", "{", "length", "=", "int64", "(", "meta", ".", "size", ")", "-", "offset", "\n", "}", "\n", "rs", "=", "io", ".", "NewSectionReader", "(", "rac", ",", "meta", ".", "offset", "+", "offset", ",", "length", ")", "\n", "}", "\n", "fn", ":=", "rac", ".", "Name", "(", ")", "\n", "// Ensure entry is in map.", "readVar", ".", "Add", "(", "fn", ",", "0", ")", "\n", "if", "v", ",", "ok", ":=", "readVar", ".", "Get", "(", "fn", ")", ".", "(", "*", "expvar", ".", "Int", ")", ";", "ok", "{", "rs", "=", "readerutil", ".", "NewStatsReadSeeker", "(", "v", ",", "rs", ")", "\n", "}", "\n", "readTotVar", ".", "Add", "(", "s", ".", "root", ",", "0", ")", "\n", "if", "v", ",", "ok", ":=", "readTotVar", ".", "Get", "(", "s", ".", "root", ")", ".", "(", "*", "expvar", ".", "Int", ")", ";", "ok", "{", "rs", "=", "readerutil", ".", "NewStatsReadSeeker", "(", "v", ",", "rs", ")", "\n", "}", "\n", "rsc", ":=", "struct", "{", "io", ".", "ReadSeeker", "\n", "io", ".", "Closer", "\n", "}", "{", "rs", ",", "types", ".", "NopCloser", ",", "}", "\n", "return", "rsc", ",", "meta", ".", "size", ",", "nil", "\n", "}" ]
// length of -1 means all
[ "length", "of", "-", "1", "means", "all" ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L357-L397
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
RemoveBlobs
func (s *storage) RemoveBlobs(ctx context.Context, blobs []blob.Ref) error { batch := s.index.BeginBatch() var wg syncutil.Group for _, br := range blobs { br := br removeGate.Start() batch.Delete(br.String()) wg.Go(func() error { defer removeGate.Done() if err := s.delete(br); err != nil && err != os.ErrNotExist { return err } return nil }) } err1 := wg.Err() err2 := s.index.CommitBatch(batch) if err1 != nil { return err1 } return err2 }
go
func (s *storage) RemoveBlobs(ctx context.Context, blobs []blob.Ref) error { batch := s.index.BeginBatch() var wg syncutil.Group for _, br := range blobs { br := br removeGate.Start() batch.Delete(br.String()) wg.Go(func() error { defer removeGate.Done() if err := s.delete(br); err != nil && err != os.ErrNotExist { return err } return nil }) } err1 := wg.Err() err2 := s.index.CommitBatch(batch) if err1 != nil { return err1 } return err2 }
[ "func", "(", "s", "*", "storage", ")", "RemoveBlobs", "(", "ctx", "context", ".", "Context", ",", "blobs", "[", "]", "blob", ".", "Ref", ")", "error", "{", "batch", ":=", "s", ".", "index", ".", "BeginBatch", "(", ")", "\n", "var", "wg", "syncutil", ".", "Group", "\n", "for", "_", ",", "br", ":=", "range", "blobs", "{", "br", ":=", "br", "\n", "removeGate", ".", "Start", "(", ")", "\n", "batch", ".", "Delete", "(", "br", ".", "String", "(", ")", ")", "\n", "wg", ".", "Go", "(", "func", "(", ")", "error", "{", "defer", "removeGate", ".", "Done", "(", ")", "\n", "if", "err", ":=", "s", ".", "delete", "(", "br", ")", ";", "err", "!=", "nil", "&&", "err", "!=", "os", ".", "ErrNotExist", "{", "return", "err", "\n", "}", "\n", "return", "nil", "\n", "}", ")", "\n", "}", "\n", "err1", ":=", "wg", ".", "Err", "(", ")", "\n", "err2", ":=", "s", ".", "index", ".", "CommitBatch", "(", "batch", ")", "\n", "if", "err1", "!=", "nil", "{", "return", "err1", "\n", "}", "\n", "return", "err2", "\n", "}" ]
// RemoveBlobs removes the blobs from index and pads data with zero bytes
[ "RemoveBlobs", "removes", "the", "blobs", "from", "index", "and", "pads", "data", "with", "zero", "bytes" ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L406-L427
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
StreamBlobs
func (s *storage) StreamBlobs(ctx context.Context, dest chan<- blobserver.BlobAndToken, contToken string) error { defer close(dest) fileNum, offset, err := parseContToken(contToken) if err != nil { return errors.New("diskpacked: invalid continuation token") } debug.Printf("Continuing blob streaming from pack %s, offset %d", s.filename(fileNum), offset) fd, err := os.Open(s.filename(fileNum)) if err != nil { return err } // fd will change over time; Close whichever is current when we exit. defer func() { if fd != nil { // may be nil on os.Open error below fd.Close() } }() // ContToken always refers to the exact next place we will read from. // Note that seeking past the end is legal on Unix and for io.Seeker, // but that will just result in a mostly harmless EOF. // // TODO: probably be stricter here and don't allow seek past // the end, since we know the size of closed files and the // size of the file diskpacked currently still writing. _, err = fd.Seek(offset, os.SEEK_SET) if err != nil { return err } const ioBufSize = 256 * 1024 // We'll use bufio to avoid read system call overhead. r := bufio.NewReaderSize(fd, ioBufSize) for { // Are we at the EOF of this pack? if _, err := r.Peek(1); err != nil { if err != io.EOF { return err } // EOF case; continue to the next pack, if any. fileNum++ offset = 0 fd.Close() // Close the previous pack fd, err = os.Open(s.filename(fileNum)) if os.IsNotExist(err) { // We reached the end. return nil } else if err != nil { return err } r.Reset(fd) continue } thisOffset := offset // of current blob's header consumed, digest, size, err := readHeader(r) if err != nil { return err } offset += int64(consumed) if deletedBlobRef.Match(digest) { // Skip over deletion padding if _, err := io.CopyN(ioutil.Discard, r, int64(size)); err != nil { return err } offset += int64(size) continue } ref, ok := blob.ParseBytes(digest) if !ok { return fmt.Errorf("diskpacked: Invalid blobref %q", digest) } // Finally, read and send the blob. // TODO: remove this allocation per blob. We can make one instead // outside of the loop, guarded by a mutex, and re-use it, only to // lock the mutex and clone it if somebody actually calls ReadFull // on the *blob.Blob. Otherwise callers just scanning all the blobs // to see if they have everything incur lots of garbage if they // don't open any blobs. data := make([]byte, size) if _, err := io.ReadFull(r, data); err != nil { return err } offset += int64(size) blob := blob.NewBlob(ref, size, func(context.Context) ([]byte, error) { return data, nil }) select { case dest <- blobserver.BlobAndToken{ Blob: blob, Token: fmt.Sprintf("%d %d", fileNum, thisOffset), }: // Nothing. case <-ctx.Done(): return ctx.Err() } } }
go
func (s *storage) StreamBlobs(ctx context.Context, dest chan<- blobserver.BlobAndToken, contToken string) error { defer close(dest) fileNum, offset, err := parseContToken(contToken) if err != nil { return errors.New("diskpacked: invalid continuation token") } debug.Printf("Continuing blob streaming from pack %s, offset %d", s.filename(fileNum), offset) fd, err := os.Open(s.filename(fileNum)) if err != nil { return err } // fd will change over time; Close whichever is current when we exit. defer func() { if fd != nil { // may be nil on os.Open error below fd.Close() } }() // ContToken always refers to the exact next place we will read from. // Note that seeking past the end is legal on Unix and for io.Seeker, // but that will just result in a mostly harmless EOF. // // TODO: probably be stricter here and don't allow seek past // the end, since we know the size of closed files and the // size of the file diskpacked currently still writing. _, err = fd.Seek(offset, os.SEEK_SET) if err != nil { return err } const ioBufSize = 256 * 1024 // We'll use bufio to avoid read system call overhead. r := bufio.NewReaderSize(fd, ioBufSize) for { // Are we at the EOF of this pack? if _, err := r.Peek(1); err != nil { if err != io.EOF { return err } // EOF case; continue to the next pack, if any. fileNum++ offset = 0 fd.Close() // Close the previous pack fd, err = os.Open(s.filename(fileNum)) if os.IsNotExist(err) { // We reached the end. return nil } else if err != nil { return err } r.Reset(fd) continue } thisOffset := offset // of current blob's header consumed, digest, size, err := readHeader(r) if err != nil { return err } offset += int64(consumed) if deletedBlobRef.Match(digest) { // Skip over deletion padding if _, err := io.CopyN(ioutil.Discard, r, int64(size)); err != nil { return err } offset += int64(size) continue } ref, ok := blob.ParseBytes(digest) if !ok { return fmt.Errorf("diskpacked: Invalid blobref %q", digest) } // Finally, read and send the blob. // TODO: remove this allocation per blob. We can make one instead // outside of the loop, guarded by a mutex, and re-use it, only to // lock the mutex and clone it if somebody actually calls ReadFull // on the *blob.Blob. Otherwise callers just scanning all the blobs // to see if they have everything incur lots of garbage if they // don't open any blobs. data := make([]byte, size) if _, err := io.ReadFull(r, data); err != nil { return err } offset += int64(size) blob := blob.NewBlob(ref, size, func(context.Context) ([]byte, error) { return data, nil }) select { case dest <- blobserver.BlobAndToken{ Blob: blob, Token: fmt.Sprintf("%d %d", fileNum, thisOffset), }: // Nothing. case <-ctx.Done(): return ctx.Err() } } }
[ "func", "(", "s", "*", "storage", ")", "StreamBlobs", "(", "ctx", "context", ".", "Context", ",", "dest", "chan", "<-", "blobserver", ".", "BlobAndToken", ",", "contToken", "string", ")", "error", "{", "defer", "close", "(", "dest", ")", "\n\n", "fileNum", ",", "offset", ",", "err", ":=", "parseContToken", "(", "contToken", ")", "\n", "if", "err", "!=", "nil", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "debug", ".", "Printf", "(", "\"", "\"", ",", "s", ".", "filename", "(", "fileNum", ")", ",", "offset", ")", "\n\n", "fd", ",", "err", ":=", "os", ".", "Open", "(", "s", ".", "filename", "(", "fileNum", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// fd will change over time; Close whichever is current when we exit.", "defer", "func", "(", ")", "{", "if", "fd", "!=", "nil", "{", "// may be nil on os.Open error below", "fd", ".", "Close", "(", ")", "\n", "}", "\n", "}", "(", ")", "\n\n", "// ContToken always refers to the exact next place we will read from.", "// Note that seeking past the end is legal on Unix and for io.Seeker,", "// but that will just result in a mostly harmless EOF.", "//", "// TODO: probably be stricter here and don't allow seek past", "// the end, since we know the size of closed files and the", "// size of the file diskpacked currently still writing.", "_", ",", "err", "=", "fd", ".", "Seek", "(", "offset", ",", "os", ".", "SEEK_SET", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "const", "ioBufSize", "=", "256", "*", "1024", "\n\n", "// We'll use bufio to avoid read system call overhead.", "r", ":=", "bufio", ".", "NewReaderSize", "(", "fd", ",", "ioBufSize", ")", "\n\n", "for", "{", "// Are we at the EOF of this pack?", "if", "_", ",", "err", ":=", "r", ".", "Peek", "(", "1", ")", ";", "err", "!=", "nil", "{", "if", "err", "!=", "io", ".", "EOF", "{", "return", "err", "\n", "}", "\n", "// EOF case; continue to the next pack, if any.", "fileNum", "++", "\n", "offset", "=", "0", "\n", "fd", ".", "Close", "(", ")", "// Close the previous pack", "\n", "fd", ",", "err", "=", "os", ".", "Open", "(", "s", ".", "filename", "(", "fileNum", ")", ")", "\n", "if", "os", ".", "IsNotExist", "(", "err", ")", "{", "// We reached the end.", "return", "nil", "\n", "}", "else", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "r", ".", "Reset", "(", "fd", ")", "\n", "continue", "\n", "}", "\n\n", "thisOffset", ":=", "offset", "// of current blob's header", "\n", "consumed", ",", "digest", ",", "size", ",", "err", ":=", "readHeader", "(", "r", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "offset", "+=", "int64", "(", "consumed", ")", "\n", "if", "deletedBlobRef", ".", "Match", "(", "digest", ")", "{", "// Skip over deletion padding", "if", "_", ",", "err", ":=", "io", ".", "CopyN", "(", "ioutil", ".", "Discard", ",", "r", ",", "int64", "(", "size", ")", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "offset", "+=", "int64", "(", "size", ")", "\n", "continue", "\n", "}", "\n\n", "ref", ",", "ok", ":=", "blob", ".", "ParseBytes", "(", "digest", ")", "\n", "if", "!", "ok", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "digest", ")", "\n", "}", "\n\n", "// Finally, read and send the blob.", "// TODO: remove this allocation per blob. We can make one instead", "// outside of the loop, guarded by a mutex, and re-use it, only to", "// lock the mutex and clone it if somebody actually calls ReadFull", "// on the *blob.Blob. Otherwise callers just scanning all the blobs", "// to see if they have everything incur lots of garbage if they", "// don't open any blobs.", "data", ":=", "make", "(", "[", "]", "byte", ",", "size", ")", "\n", "if", "_", ",", "err", ":=", "io", ".", "ReadFull", "(", "r", ",", "data", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "offset", "+=", "int64", "(", "size", ")", "\n", "blob", ":=", "blob", ".", "NewBlob", "(", "ref", ",", "size", ",", "func", "(", "context", ".", "Context", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "return", "data", ",", "nil", "\n", "}", ")", "\n", "select", "{", "case", "dest", "<-", "blobserver", ".", "BlobAndToken", "{", "Blob", ":", "blob", ",", "Token", ":", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "fileNum", ",", "thisOffset", ")", ",", "}", ":", "// Nothing.", "case", "<-", "ctx", ".", "Done", "(", ")", ":", "return", "ctx", ".", "Err", "(", ")", "\n", "}", "\n", "}", "\n", "}" ]
// StreamBlobs Implements the blobserver.StreamBlobs interface.
[ "StreamBlobs", "Implements", "the", "blobserver", ".", "StreamBlobs", "interface", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L529-L635
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
append
func (s *storage) append(br blob.SizedRef, r io.Reader) error { s.mu.Lock() defer s.mu.Unlock() if s.closed { return errors.New("diskpacked: write to closed storage") } // to be able to undo the append origOffset := s.size fn := s.writer.Name() n, err := fmt.Fprintf(s.writer, "[%v %v]", br.Ref.String(), br.Size) s.size += int64(n) writeVar.Add(fn, int64(n)) writeTotVar.Add(s.root, int64(n)) if err != nil { return err } // TODO(adg): remove this seek and the offset check once confident offset, err := s.writer.Seek(0, os.SEEK_CUR) if err != nil { return err } if offset != s.size { return fmt.Errorf("diskpacked: seek says offset = %d, we think %d", offset, s.size) } offset = s.size // make this a declaration once the above is removed n2, err := io.Copy(s.writer, r) s.size += n2 writeVar.Add(fn, int64(n)) writeTotVar.Add(s.root, int64(n)) if err != nil { return err } if n2 != int64(br.Size) { return fmt.Errorf("diskpacked: written blob size %d didn't match size %d", n, br.Size) } if err = s.writer.Sync(); err != nil { return err } packIdx := len(s.fds) - 1 if s.size > s.maxFileSize { if err := s.nextPack(); err != nil { return err } } err = s.index.Set(br.Ref.String(), blobMeta{packIdx, offset, br.Size}.String()) if err != nil { if _, seekErr := s.writer.Seek(origOffset, os.SEEK_SET); seekErr != nil { log.Printf("ERROR seeking back to the original offset: %v", seekErr) } else if truncErr := s.writer.Truncate(origOffset); truncErr != nil { log.Printf("ERROR truncating file after index error: %v", truncErr) } else { s.size = origOffset } } return err }
go
func (s *storage) append(br blob.SizedRef, r io.Reader) error { s.mu.Lock() defer s.mu.Unlock() if s.closed { return errors.New("diskpacked: write to closed storage") } // to be able to undo the append origOffset := s.size fn := s.writer.Name() n, err := fmt.Fprintf(s.writer, "[%v %v]", br.Ref.String(), br.Size) s.size += int64(n) writeVar.Add(fn, int64(n)) writeTotVar.Add(s.root, int64(n)) if err != nil { return err } // TODO(adg): remove this seek and the offset check once confident offset, err := s.writer.Seek(0, os.SEEK_CUR) if err != nil { return err } if offset != s.size { return fmt.Errorf("diskpacked: seek says offset = %d, we think %d", offset, s.size) } offset = s.size // make this a declaration once the above is removed n2, err := io.Copy(s.writer, r) s.size += n2 writeVar.Add(fn, int64(n)) writeTotVar.Add(s.root, int64(n)) if err != nil { return err } if n2 != int64(br.Size) { return fmt.Errorf("diskpacked: written blob size %d didn't match size %d", n, br.Size) } if err = s.writer.Sync(); err != nil { return err } packIdx := len(s.fds) - 1 if s.size > s.maxFileSize { if err := s.nextPack(); err != nil { return err } } err = s.index.Set(br.Ref.String(), blobMeta{packIdx, offset, br.Size}.String()) if err != nil { if _, seekErr := s.writer.Seek(origOffset, os.SEEK_SET); seekErr != nil { log.Printf("ERROR seeking back to the original offset: %v", seekErr) } else if truncErr := s.writer.Truncate(origOffset); truncErr != nil { log.Printf("ERROR truncating file after index error: %v", truncErr) } else { s.size = origOffset } } return err }
[ "func", "(", "s", "*", "storage", ")", "append", "(", "br", "blob", ".", "SizedRef", ",", "r", "io", ".", "Reader", ")", "error", "{", "s", ".", "mu", ".", "Lock", "(", ")", "\n", "defer", "s", ".", "mu", ".", "Unlock", "(", ")", "\n", "if", "s", ".", "closed", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n\n", "// to be able to undo the append", "origOffset", ":=", "s", ".", "size", "\n\n", "fn", ":=", "s", ".", "writer", ".", "Name", "(", ")", "\n", "n", ",", "err", ":=", "fmt", ".", "Fprintf", "(", "s", ".", "writer", ",", "\"", "\"", ",", "br", ".", "Ref", ".", "String", "(", ")", ",", "br", ".", "Size", ")", "\n", "s", ".", "size", "+=", "int64", "(", "n", ")", "\n", "writeVar", ".", "Add", "(", "fn", ",", "int64", "(", "n", ")", ")", "\n", "writeTotVar", ".", "Add", "(", "s", ".", "root", ",", "int64", "(", "n", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// TODO(adg): remove this seek and the offset check once confident", "offset", ",", "err", ":=", "s", ".", "writer", ".", "Seek", "(", "0", ",", "os", ".", "SEEK_CUR", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "offset", "!=", "s", ".", "size", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "offset", ",", "s", ".", "size", ")", "\n", "}", "\n", "offset", "=", "s", ".", "size", "// make this a declaration once the above is removed", "\n\n", "n2", ",", "err", ":=", "io", ".", "Copy", "(", "s", ".", "writer", ",", "r", ")", "\n", "s", ".", "size", "+=", "n2", "\n", "writeVar", ".", "Add", "(", "fn", ",", "int64", "(", "n", ")", ")", "\n", "writeTotVar", ".", "Add", "(", "s", ".", "root", ",", "int64", "(", "n", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "n2", "!=", "int64", "(", "br", ".", "Size", ")", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "n", ",", "br", ".", "Size", ")", "\n", "}", "\n", "if", "err", "=", "s", ".", "writer", ".", "Sync", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "packIdx", ":=", "len", "(", "s", ".", "fds", ")", "-", "1", "\n", "if", "s", ".", "size", ">", "s", ".", "maxFileSize", "{", "if", "err", ":=", "s", ".", "nextPack", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "err", "=", "s", ".", "index", ".", "Set", "(", "br", ".", "Ref", ".", "String", "(", ")", ",", "blobMeta", "{", "packIdx", ",", "offset", ",", "br", ".", "Size", "}", ".", "String", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "if", "_", ",", "seekErr", ":=", "s", ".", "writer", ".", "Seek", "(", "origOffset", ",", "os", ".", "SEEK_SET", ")", ";", "seekErr", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "seekErr", ")", "\n", "}", "else", "if", "truncErr", ":=", "s", ".", "writer", ".", "Truncate", "(", "origOffset", ")", ";", "truncErr", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "truncErr", ")", "\n", "}", "else", "{", "s", ".", "size", "=", "origOffset", "\n", "}", "\n", "}", "\n", "return", "err", "\n", "}" ]
// append writes the provided blob to the current data file.
[ "append", "writes", "the", "provided", "blob", "to", "the", "current", "data", "file", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L661-L722
train
perkeep/perkeep
pkg/blobserver/diskpacked/diskpacked.go
meta
func (s *storage) meta(br blob.Ref) (m blobMeta, err error) { ms, err := s.index.Get(br.String()) if err != nil { if err == sorted.ErrNotFound { err = os.ErrNotExist } return } m, ok := parseBlobMeta(ms) if !ok { err = fmt.Errorf("diskpacked: bad blob metadata: %q", ms) } return }
go
func (s *storage) meta(br blob.Ref) (m blobMeta, err error) { ms, err := s.index.Get(br.String()) if err != nil { if err == sorted.ErrNotFound { err = os.ErrNotExist } return } m, ok := parseBlobMeta(ms) if !ok { err = fmt.Errorf("diskpacked: bad blob metadata: %q", ms) } return }
[ "func", "(", "s", "*", "storage", ")", "meta", "(", "br", "blob", ".", "Ref", ")", "(", "m", "blobMeta", ",", "err", "error", ")", "{", "ms", ",", "err", ":=", "s", ".", "index", ".", "Get", "(", "br", ".", "String", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "if", "err", "==", "sorted", ".", "ErrNotFound", "{", "err", "=", "os", ".", "ErrNotExist", "\n", "}", "\n", "return", "\n", "}", "\n", "m", ",", "ok", ":=", "parseBlobMeta", "(", "ms", ")", "\n", "if", "!", "ok", "{", "err", "=", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "ms", ")", "\n", "}", "\n", "return", "\n", "}" ]
// meta fetches the metadata for the specified blob from the index.
[ "meta", "fetches", "the", "metadata", "for", "the", "specified", "blob", "from", "the", "index", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/diskpacked/diskpacked.go#L725-L738
train
perkeep/perkeep
cmd/pk-put/files.go
DumpStats
func DumpStats(sr *statspkg.Receiver, destFile string) { sr.Lock() defer sr.Unlock() f, err := os.Create(destFile) if err != nil { log.Fatal(err) } var sum int64 for _, size := range sr.Have { fmt.Fprintf(f, "%d\n", size) } fmt.Printf("In-memory blob stats: %d blobs, %d bytes\n", len(sr.Have), sum) err = f.Close() if err != nil { log.Fatal(err) } }
go
func DumpStats(sr *statspkg.Receiver, destFile string) { sr.Lock() defer sr.Unlock() f, err := os.Create(destFile) if err != nil { log.Fatal(err) } var sum int64 for _, size := range sr.Have { fmt.Fprintf(f, "%d\n", size) } fmt.Printf("In-memory blob stats: %d blobs, %d bytes\n", len(sr.Have), sum) err = f.Close() if err != nil { log.Fatal(err) } }
[ "func", "DumpStats", "(", "sr", "*", "statspkg", ".", "Receiver", ",", "destFile", "string", ")", "{", "sr", ".", "Lock", "(", ")", "\n", "defer", "sr", ".", "Unlock", "(", ")", "\n\n", "f", ",", "err", ":=", "os", ".", "Create", "(", "destFile", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Fatal", "(", "err", ")", "\n", "}", "\n\n", "var", "sum", "int64", "\n", "for", "_", ",", "size", ":=", "range", "sr", ".", "Have", "{", "fmt", ".", "Fprintf", "(", "f", ",", "\"", "\\n", "\"", ",", "size", ")", "\n", "}", "\n", "fmt", ".", "Printf", "(", "\"", "\\n", "\"", ",", "len", "(", "sr", ".", "Have", ")", ",", "sum", ")", "\n\n", "err", "=", "f", ".", "Close", "(", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Fatal", "(", "err", ")", "\n", "}", "\n", "}" ]
// DumpStats creates the destFile and writes a line per received blob, // with its blob size.
[ "DumpStats", "creates", "the", "destFile", "and", "writes", "a", "line", "per", "received", "blob", "with", "its", "blob", "size", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/cmd/pk-put/files.go#L310-L329
train
perkeep/perkeep
cmd/pk-put/files.go
statReceiver
func (up *Uploader) statReceiver(n *node) blobserver.StatReceiver { statReceiver := up.altStatReceiver if statReceiver == nil { // TODO(mpl): simplify the altStatReceiver situation as well, // see TODO in cmd/pk-put/uploader.go statReceiver = up.Client } if android.IsChild() && n != nil && n.fi.Mode()&os.ModeType == 0 { return android.StatusReceiver{Sr: statReceiver, Path: n.fullPath} } return statReceiver }
go
func (up *Uploader) statReceiver(n *node) blobserver.StatReceiver { statReceiver := up.altStatReceiver if statReceiver == nil { // TODO(mpl): simplify the altStatReceiver situation as well, // see TODO in cmd/pk-put/uploader.go statReceiver = up.Client } if android.IsChild() && n != nil && n.fi.Mode()&os.ModeType == 0 { return android.StatusReceiver{Sr: statReceiver, Path: n.fullPath} } return statReceiver }
[ "func", "(", "up", "*", "Uploader", ")", "statReceiver", "(", "n", "*", "node", ")", "blobserver", ".", "StatReceiver", "{", "statReceiver", ":=", "up", ".", "altStatReceiver", "\n", "if", "statReceiver", "==", "nil", "{", "// TODO(mpl): simplify the altStatReceiver situation as well,", "// see TODO in cmd/pk-put/uploader.go", "statReceiver", "=", "up", ".", "Client", "\n", "}", "\n", "if", "android", ".", "IsChild", "(", ")", "&&", "n", "!=", "nil", "&&", "n", ".", "fi", ".", "Mode", "(", ")", "&", "os", ".", "ModeType", "==", "0", "{", "return", "android", ".", "StatusReceiver", "{", "Sr", ":", "statReceiver", ",", "Path", ":", "n", ".", "fullPath", "}", "\n", "}", "\n", "return", "statReceiver", "\n", "}" ]
// statReceiver returns the StatReceiver used for checking for and uploading blobs. // // The optional provided node is only used for conditionally printing out status info to stdout.
[ "statReceiver", "returns", "the", "StatReceiver", "used", "for", "checking", "for", "and", "uploading", "blobs", ".", "The", "optional", "provided", "node", "is", "only", "used", "for", "conditionally", "printing", "out", "status", "info", "to", "stdout", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/cmd/pk-put/files.go#L435-L446
train
perkeep/perkeep
cmd/pk-put/files.go
NewTreeUpload
func (up *Uploader) NewTreeUpload(dir string) *TreeUpload { tu := up.NewRootlessTreeUpload() tu.rootless = false tu.base = dir return tu }
go
func (up *Uploader) NewTreeUpload(dir string) *TreeUpload { tu := up.NewRootlessTreeUpload() tu.rootless = false tu.base = dir return tu }
[ "func", "(", "up", "*", "Uploader", ")", "NewTreeUpload", "(", "dir", "string", ")", "*", "TreeUpload", "{", "tu", ":=", "up", ".", "NewRootlessTreeUpload", "(", ")", "\n", "tu", ".", "rootless", "=", "false", "\n", "tu", ".", "base", "=", "dir", "\n", "return", "tu", "\n", "}" ]
// NewTreeUpload returns a TreeUpload. It doesn't begin uploading any files until a // call to Start
[ "NewTreeUpload", "returns", "a", "TreeUpload", ".", "It", "doesn", "t", "begin", "uploading", "any", "files", "until", "a", "call", "to", "Start" ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/cmd/pk-put/files.go#L801-L806
train
perkeep/perkeep
pkg/blobserver/overlay/overlay.go
ReceiveBlob
func (sto *overlayStorage) ReceiveBlob(ctx context.Context, br blob.Ref, src io.Reader) (sb blob.SizedRef, err error) { sb, err = sto.upper.ReceiveBlob(ctx, br, src) if err == nil && sto.deleted != nil { err = sto.deleted.Delete(br.String()) } return sb, err }
go
func (sto *overlayStorage) ReceiveBlob(ctx context.Context, br blob.Ref, src io.Reader) (sb blob.SizedRef, err error) { sb, err = sto.upper.ReceiveBlob(ctx, br, src) if err == nil && sto.deleted != nil { err = sto.deleted.Delete(br.String()) } return sb, err }
[ "func", "(", "sto", "*", "overlayStorage", ")", "ReceiveBlob", "(", "ctx", "context", ".", "Context", ",", "br", "blob", ".", "Ref", ",", "src", "io", ".", "Reader", ")", "(", "sb", "blob", ".", "SizedRef", ",", "err", "error", ")", "{", "sb", ",", "err", "=", "sto", ".", "upper", ".", "ReceiveBlob", "(", "ctx", ",", "br", ",", "src", ")", "\n", "if", "err", "==", "nil", "&&", "sto", ".", "deleted", "!=", "nil", "{", "err", "=", "sto", ".", "deleted", ".", "Delete", "(", "br", ".", "String", "(", ")", ")", "\n", "}", "\n", "return", "sb", ",", "err", "\n", "}" ]
// ReceiveBlob stores received blobs on the upper layer.
[ "ReceiveBlob", "stores", "received", "blobs", "on", "the", "upper", "layer", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/overlay/overlay.go#L120-L126
train
perkeep/perkeep
pkg/blobserver/overlay/overlay.go
RemoveBlobs
func (sto *overlayStorage) RemoveBlobs(ctx context.Context, blobs []blob.Ref) error { if sto.deleted == nil { return blobserver.ErrNotImplemented } err := sto.upper.RemoveBlobs(ctx, blobs) if err != nil { return err } m := sto.deleted.BeginBatch() for _, br := range blobs { m.Set(br.String(), "1") } return sto.deleted.CommitBatch(m) }
go
func (sto *overlayStorage) RemoveBlobs(ctx context.Context, blobs []blob.Ref) error { if sto.deleted == nil { return blobserver.ErrNotImplemented } err := sto.upper.RemoveBlobs(ctx, blobs) if err != nil { return err } m := sto.deleted.BeginBatch() for _, br := range blobs { m.Set(br.String(), "1") } return sto.deleted.CommitBatch(m) }
[ "func", "(", "sto", "*", "overlayStorage", ")", "RemoveBlobs", "(", "ctx", "context", ".", "Context", ",", "blobs", "[", "]", "blob", ".", "Ref", ")", "error", "{", "if", "sto", ".", "deleted", "==", "nil", "{", "return", "blobserver", ".", "ErrNotImplemented", "\n", "}", "\n\n", "err", ":=", "sto", ".", "upper", ".", "RemoveBlobs", "(", "ctx", ",", "blobs", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "m", ":=", "sto", ".", "deleted", ".", "BeginBatch", "(", ")", "\n", "for", "_", ",", "br", ":=", "range", "blobs", "{", "m", ".", "Set", "(", "br", ".", "String", "(", ")", ",", "\"", "\"", ")", "\n", "}", "\n", "return", "sto", ".", "deleted", ".", "CommitBatch", "(", "m", ")", "\n", "}" ]
// RemoveBlobs marks the given blobs as deleted, and removes them if they are in the upper layer.
[ "RemoveBlobs", "marks", "the", "given", "blobs", "as", "deleted", "and", "removes", "them", "if", "they", "are", "in", "the", "upper", "layer", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/overlay/overlay.go#L129-L144
train
perkeep/perkeep
pkg/blobserver/overlay/overlay.go
Fetch
func (sto *overlayStorage) Fetch(ctx context.Context, br blob.Ref) (file io.ReadCloser, size uint32, err error) { if sto.isDeleted(br) { return nil, 0, os.ErrNotExist } file, size, err = sto.upper.Fetch(ctx, br) if err != os.ErrNotExist { return file, size, err } return sto.lower.Fetch(ctx, br) }
go
func (sto *overlayStorage) Fetch(ctx context.Context, br blob.Ref) (file io.ReadCloser, size uint32, err error) { if sto.isDeleted(br) { return nil, 0, os.ErrNotExist } file, size, err = sto.upper.Fetch(ctx, br) if err != os.ErrNotExist { return file, size, err } return sto.lower.Fetch(ctx, br) }
[ "func", "(", "sto", "*", "overlayStorage", ")", "Fetch", "(", "ctx", "context", ".", "Context", ",", "br", "blob", ".", "Ref", ")", "(", "file", "io", ".", "ReadCloser", ",", "size", "uint32", ",", "err", "error", ")", "{", "if", "sto", ".", "isDeleted", "(", "br", ")", "{", "return", "nil", ",", "0", ",", "os", ".", "ErrNotExist", "\n", "}", "\n\n", "file", ",", "size", ",", "err", "=", "sto", ".", "upper", ".", "Fetch", "(", "ctx", ",", "br", ")", "\n", "if", "err", "!=", "os", ".", "ErrNotExist", "{", "return", "file", ",", "size", ",", "err", "\n", "}", "\n\n", "return", "sto", ".", "lower", ".", "Fetch", "(", "ctx", ",", "br", ")", "\n", "}" ]
// Fetch the blob by trying first the upper and then lower. // The lower storage is checked only if the blob was not deleleted in sto itself.
[ "Fetch", "the", "blob", "by", "trying", "first", "the", "upper", "and", "then", "lower", ".", "The", "lower", "storage", "is", "checked", "only", "if", "the", "blob", "was", "not", "deleleted", "in", "sto", "itself", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/overlay/overlay.go#L165-L176
train
perkeep/perkeep
pkg/blobserver/overlay/overlay.go
EnumerateBlobs
func (sto *overlayStorage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) enums := []blobserver.BlobEnumerator{sto.lower, sto.upper} // Ensure that we send limit blobs if possible. sent := 0 for sent < limit { ch := make(chan blob.SizedRef) errch := make(chan error, 1) go func() { errch <- blobserver.MergedEnumerate(ctx, ch, enums, after, limit-sent) }() var last blob.Ref // Yield all blobs that weren't deleted from ch to destch. seen := 0 for sbr := range ch { seen++ if !sto.isDeleted(sbr.Ref) { log.Println(sent, sbr.Ref) dest <- sbr sent++ } last = sbr.Ref } if err := <-errch; err != nil { return err } // if no blob was received, enumeration is finished if seen == 0 { return nil } // resume enumeration after the last blob seen after = last.String() } return nil }
go
func (sto *overlayStorage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) enums := []blobserver.BlobEnumerator{sto.lower, sto.upper} // Ensure that we send limit blobs if possible. sent := 0 for sent < limit { ch := make(chan blob.SizedRef) errch := make(chan error, 1) go func() { errch <- blobserver.MergedEnumerate(ctx, ch, enums, after, limit-sent) }() var last blob.Ref // Yield all blobs that weren't deleted from ch to destch. seen := 0 for sbr := range ch { seen++ if !sto.isDeleted(sbr.Ref) { log.Println(sent, sbr.Ref) dest <- sbr sent++ } last = sbr.Ref } if err := <-errch; err != nil { return err } // if no blob was received, enumeration is finished if seen == 0 { return nil } // resume enumeration after the last blob seen after = last.String() } return nil }
[ "func", "(", "sto", "*", "overlayStorage", ")", "EnumerateBlobs", "(", "ctx", "context", ".", "Context", ",", "dest", "chan", "<-", "blob", ".", "SizedRef", ",", "after", "string", ",", "limit", "int", ")", "error", "{", "defer", "close", "(", "dest", ")", "\n\n", "enums", ":=", "[", "]", "blobserver", ".", "BlobEnumerator", "{", "sto", ".", "lower", ",", "sto", ".", "upper", "}", "\n\n", "// Ensure that we send limit blobs if possible.", "sent", ":=", "0", "\n", "for", "sent", "<", "limit", "{", "ch", ":=", "make", "(", "chan", "blob", ".", "SizedRef", ")", "\n", "errch", ":=", "make", "(", "chan", "error", ",", "1", ")", "\n", "go", "func", "(", ")", "{", "errch", "<-", "blobserver", ".", "MergedEnumerate", "(", "ctx", ",", "ch", ",", "enums", ",", "after", ",", "limit", "-", "sent", ")", "\n", "}", "(", ")", "\n\n", "var", "last", "blob", ".", "Ref", "\n\n", "// Yield all blobs that weren't deleted from ch to destch.", "seen", ":=", "0", "\n", "for", "sbr", ":=", "range", "ch", "{", "seen", "++", "\n", "if", "!", "sto", ".", "isDeleted", "(", "sbr", ".", "Ref", ")", "{", "log", ".", "Println", "(", "sent", ",", "sbr", ".", "Ref", ")", "\n", "dest", "<-", "sbr", "\n", "sent", "++", "\n", "}", "\n", "last", "=", "sbr", ".", "Ref", "\n", "}", "\n\n", "if", "err", ":=", "<-", "errch", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// if no blob was received, enumeration is finished", "if", "seen", "==", "0", "{", "return", "nil", "\n", "}", "\n\n", "// resume enumeration after the last blob seen", "after", "=", "last", ".", "String", "(", ")", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// EnumerateBlobs enumerates blobs of the lower and upper layers.
[ "EnumerateBlobs", "enumerates", "blobs", "of", "the", "lower", "and", "upper", "layers", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/pkg/blobserver/overlay/overlay.go#L209-L251
train
perkeep/perkeep
internal/geocode/geocode.go
GetAPIKeyPath
func GetAPIKeyPath() (string, error) { dir, err := osutil.PerkeepConfigDir() if err != nil { return "", fmt.Errorf("could not get config dir: %v", err) } return filepath.Join(dir, apiKeyName), nil }
go
func GetAPIKeyPath() (string, error) { dir, err := osutil.PerkeepConfigDir() if err != nil { return "", fmt.Errorf("could not get config dir: %v", err) } return filepath.Join(dir, apiKeyName), nil }
[ "func", "GetAPIKeyPath", "(", ")", "(", "string", ",", "error", ")", "{", "dir", ",", "err", ":=", "osutil", ".", "PerkeepConfigDir", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "return", "filepath", ".", "Join", "(", "dir", ",", "apiKeyName", ")", ",", "nil", "\n", "}" ]
// GetAPIKeyPath returns the file path to the Google geocoding API key.
[ "GetAPIKeyPath", "returns", "the", "file", "path", "to", "the", "Google", "geocoding", "API", "key", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/geocode/geocode.go#L77-L83
train
perkeep/perkeep
internal/geocode/geocode.go
GetAPIKey
func GetAPIKey() (string, error) { mu.RLock() key := apiKey mu.RUnlock() if apiKey != "" { return key, nil } mu.Lock() defer mu.Unlock() dir, err := osutil.PerkeepConfigDir() if err != nil { return "", err } slurp, err := wkfs.ReadFile(filepath.Join(dir, apiKeyName)) if os.IsNotExist(err) { return "", ErrNoGoogleKey } if err != nil { return "", err } key = strings.TrimSpace(string(slurp)) if key == "" { return "", ErrNoGoogleKey } apiKey = key return key, nil }
go
func GetAPIKey() (string, error) { mu.RLock() key := apiKey mu.RUnlock() if apiKey != "" { return key, nil } mu.Lock() defer mu.Unlock() dir, err := osutil.PerkeepConfigDir() if err != nil { return "", err } slurp, err := wkfs.ReadFile(filepath.Join(dir, apiKeyName)) if os.IsNotExist(err) { return "", ErrNoGoogleKey } if err != nil { return "", err } key = strings.TrimSpace(string(slurp)) if key == "" { return "", ErrNoGoogleKey } apiKey = key return key, nil }
[ "func", "GetAPIKey", "(", ")", "(", "string", ",", "error", ")", "{", "mu", ".", "RLock", "(", ")", "\n", "key", ":=", "apiKey", "\n", "mu", ".", "RUnlock", "(", ")", "\n", "if", "apiKey", "!=", "\"", "\"", "{", "return", "key", ",", "nil", "\n", "}", "\n", "mu", ".", "Lock", "(", ")", "\n", "defer", "mu", ".", "Unlock", "(", ")", "\n\n", "dir", ",", "err", ":=", "osutil", ".", "PerkeepConfigDir", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "err", "\n", "}", "\n", "slurp", ",", "err", ":=", "wkfs", ".", "ReadFile", "(", "filepath", ".", "Join", "(", "dir", ",", "apiKeyName", ")", ")", "\n", "if", "os", ".", "IsNotExist", "(", "err", ")", "{", "return", "\"", "\"", ",", "ErrNoGoogleKey", "\n", "}", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "err", "\n", "}", "\n", "key", "=", "strings", ".", "TrimSpace", "(", "string", "(", "slurp", ")", ")", "\n", "if", "key", "==", "\"", "\"", "{", "return", "\"", "\"", ",", "ErrNoGoogleKey", "\n", "}", "\n", "apiKey", "=", "key", "\n", "return", "key", ",", "nil", "\n", "}" ]
// GetAPIKey returns the Google geocoding API key stored in the Perkeep // configuration directory as google-geocode.key.
[ "GetAPIKey", "returns", "the", "Google", "geocoding", "API", "key", "stored", "in", "the", "Perkeep", "configuration", "directory", "as", "google", "-", "geocode", ".", "key", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/geocode/geocode.go#L87-L114
train
perkeep/perkeep
internal/geocode/geocode.go
Lookup
func Lookup(ctx context.Context, address string) ([]Rect, error) { if AltLookupFn != nil { return AltLookupFn(ctx, address) } mu.RLock() rects, ok := cache[address] mu.RUnlock() if ok { return rects, nil } key, err := GetAPIKey() if err != nil { return nil, err } rectsi, err := sf.Do(address, func() (interface{}, error) { // TODO: static data files from OpenStreetMap, Wikipedia, etc? urlStr := "https://maps.googleapis.com/maps/api/geocode/json?address=" + url.QueryEscape(address) + "&sensor=false&key=" + url.QueryEscape(key) res, err := ctxhttp.Get(ctx, ctxutil.Client(ctx), urlStr) if err != nil { log.Printf("geocode: HTTP error doing Google lookup: %v", err) return nil, err } defer res.Body.Close() rects, err := decodeGoogleResponse(res.Body) if err != nil { log.Printf("geocode: error decoding Google geocode response for %q: %v", address, err) } else { log.Printf("geocode: Google lookup (%q) = %#v", address, rects) } if err == nil { mu.Lock() cache[address] = rects mu.Unlock() } return rects, err }) if err != nil { return nil, err } return rectsi.([]Rect), nil }
go
func Lookup(ctx context.Context, address string) ([]Rect, error) { if AltLookupFn != nil { return AltLookupFn(ctx, address) } mu.RLock() rects, ok := cache[address] mu.RUnlock() if ok { return rects, nil } key, err := GetAPIKey() if err != nil { return nil, err } rectsi, err := sf.Do(address, func() (interface{}, error) { // TODO: static data files from OpenStreetMap, Wikipedia, etc? urlStr := "https://maps.googleapis.com/maps/api/geocode/json?address=" + url.QueryEscape(address) + "&sensor=false&key=" + url.QueryEscape(key) res, err := ctxhttp.Get(ctx, ctxutil.Client(ctx), urlStr) if err != nil { log.Printf("geocode: HTTP error doing Google lookup: %v", err) return nil, err } defer res.Body.Close() rects, err := decodeGoogleResponse(res.Body) if err != nil { log.Printf("geocode: error decoding Google geocode response for %q: %v", address, err) } else { log.Printf("geocode: Google lookup (%q) = %#v", address, rects) } if err == nil { mu.Lock() cache[address] = rects mu.Unlock() } return rects, err }) if err != nil { return nil, err } return rectsi.([]Rect), nil }
[ "func", "Lookup", "(", "ctx", "context", ".", "Context", ",", "address", "string", ")", "(", "[", "]", "Rect", ",", "error", ")", "{", "if", "AltLookupFn", "!=", "nil", "{", "return", "AltLookupFn", "(", "ctx", ",", "address", ")", "\n", "}", "\n\n", "mu", ".", "RLock", "(", ")", "\n", "rects", ",", "ok", ":=", "cache", "[", "address", "]", "\n", "mu", ".", "RUnlock", "(", ")", "\n", "if", "ok", "{", "return", "rects", ",", "nil", "\n", "}", "\n\n", "key", ",", "err", ":=", "GetAPIKey", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "rectsi", ",", "err", ":=", "sf", ".", "Do", "(", "address", ",", "func", "(", ")", "(", "interface", "{", "}", ",", "error", ")", "{", "// TODO: static data files from OpenStreetMap, Wikipedia, etc?", "urlStr", ":=", "\"", "\"", "+", "url", ".", "QueryEscape", "(", "address", ")", "+", "\"", "\"", "+", "url", ".", "QueryEscape", "(", "key", ")", "\n", "res", ",", "err", ":=", "ctxhttp", ".", "Get", "(", "ctx", ",", "ctxutil", ".", "Client", "(", "ctx", ")", ",", "urlStr", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "err", ")", "\n", "return", "nil", ",", "err", "\n", "}", "\n", "defer", "res", ".", "Body", ".", "Close", "(", ")", "\n", "rects", ",", "err", ":=", "decodeGoogleResponse", "(", "res", ".", "Body", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "address", ",", "err", ")", "\n", "}", "else", "{", "log", ".", "Printf", "(", "\"", "\"", ",", "address", ",", "rects", ")", "\n", "}", "\n", "if", "err", "==", "nil", "{", "mu", ".", "Lock", "(", ")", "\n", "cache", "[", "address", "]", "=", "rects", "\n", "mu", ".", "Unlock", "(", ")", "\n", "}", "\n", "return", "rects", ",", "err", "\n", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "return", "rectsi", ".", "(", "[", "]", "Rect", ")", ",", "nil", "\n", "}" ]
// Lookup returns rectangles for the given address. Currently the only // implementation is the Google geocoding service.
[ "Lookup", "returns", "rectangles", "for", "the", "given", "address", ".", "Currently", "the", "only", "implementation", "is", "the", "Google", "geocoding", "service", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/internal/geocode/geocode.go#L120-L163
train
perkeep/perkeep
website/pk-web/pkweb.go
htmlFmt
func htmlFmt(w io.Writer, format string, x ...interface{}) string { writeAny(w, true, x[0]) return "" }
go
func htmlFmt(w io.Writer, format string, x ...interface{}) string { writeAny(w, true, x[0]) return "" }
[ "func", "htmlFmt", "(", "w", "io", ".", "Writer", ",", "format", "string", ",", "x", "...", "interface", "{", "}", ")", "string", "{", "writeAny", "(", "w", ",", "true", ",", "x", "[", "0", "]", ")", "\n", "return", "\"", "\"", "\n", "}" ]
// Template formatter for "html" format.
[ "Template", "formatter", "for", "html", "format", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/website/pk-web/pkweb.go#L133-L136
train
perkeep/perkeep
website/pk-web/pkweb.go
htmlEscFmt
func htmlEscFmt(w io.Writer, format string, x ...interface{}) string { var buf bytes.Buffer writeAny(&buf, false, x[0]) template.HTMLEscape(w, buf.Bytes()) return "" }
go
func htmlEscFmt(w io.Writer, format string, x ...interface{}) string { var buf bytes.Buffer writeAny(&buf, false, x[0]) template.HTMLEscape(w, buf.Bytes()) return "" }
[ "func", "htmlEscFmt", "(", "w", "io", ".", "Writer", ",", "format", "string", ",", "x", "...", "interface", "{", "}", ")", "string", "{", "var", "buf", "bytes", ".", "Buffer", "\n", "writeAny", "(", "&", "buf", ",", "false", ",", "x", "[", "0", "]", ")", "\n", "template", ".", "HTMLEscape", "(", "w", ",", "buf", ".", "Bytes", "(", ")", ")", "\n", "return", "\"", "\"", "\n", "}" ]
// Template formatter for "htmlesc" format.
[ "Template", "formatter", "for", "htmlesc", "format", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/website/pk-web/pkweb.go#L139-L144
train
perkeep/perkeep
website/pk-web/pkweb.go
writeAny
func writeAny(w io.Writer, html bool, x interface{}) { switch v := x.(type) { case []byte: writeText(w, v, html) case string: writeText(w, []byte(v), html) default: if html { var buf bytes.Buffer fmt.Fprint(&buf, x) writeText(w, buf.Bytes(), true) } else { fmt.Fprint(w, x) } } }
go
func writeAny(w io.Writer, html bool, x interface{}) { switch v := x.(type) { case []byte: writeText(w, v, html) case string: writeText(w, []byte(v), html) default: if html { var buf bytes.Buffer fmt.Fprint(&buf, x) writeText(w, buf.Bytes(), true) } else { fmt.Fprint(w, x) } } }
[ "func", "writeAny", "(", "w", "io", ".", "Writer", ",", "html", "bool", ",", "x", "interface", "{", "}", ")", "{", "switch", "v", ":=", "x", ".", "(", "type", ")", "{", "case", "[", "]", "byte", ":", "writeText", "(", "w", ",", "v", ",", "html", ")", "\n", "case", "string", ":", "writeText", "(", "w", ",", "[", "]", "byte", "(", "v", ")", ",", "html", ")", "\n", "default", ":", "if", "html", "{", "var", "buf", "bytes", ".", "Buffer", "\n", "fmt", ".", "Fprint", "(", "&", "buf", ",", "x", ")", "\n", "writeText", "(", "w", ",", "buf", ".", "Bytes", "(", ")", ",", "true", ")", "\n", "}", "else", "{", "fmt", ".", "Fprint", "(", "w", ",", "x", ")", "\n", "}", "\n", "}", "\n", "}" ]
// Write anything to w; optionally html-escaped.
[ "Write", "anything", "to", "w", ";", "optionally", "html", "-", "escaped", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/website/pk-web/pkweb.go#L147-L162
train
perkeep/perkeep
website/pk-web/pkweb.go
writeText
func writeText(w io.Writer, text []byte, html bool) { if html { template.HTMLEscape(w, text) return } w.Write(text) }
go
func writeText(w io.Writer, text []byte, html bool) { if html { template.HTMLEscape(w, text) return } w.Write(text) }
[ "func", "writeText", "(", "w", "io", ".", "Writer", ",", "text", "[", "]", "byte", ",", "html", "bool", ")", "{", "if", "html", "{", "template", ".", "HTMLEscape", "(", "w", ",", "text", ")", "\n", "return", "\n", "}", "\n", "w", ".", "Write", "(", "text", ")", "\n", "}" ]
// Write text to w; optionally html-escaped.
[ "Write", "text", "to", "w", ";", "optionally", "html", "-", "escaped", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/website/pk-web/pkweb.go#L165-L171
train
perkeep/perkeep
website/pk-web/pkweb.go
redirectPath
func redirectPath(u *url.URL) string { // Redirect old gitweb URLs to gerrit. Example: // /code/?p=camlistore.git;a=commit;h=b0d2a8f0e5f27bbfc025a96ec3c7896b42d198ed if strings.HasPrefix(u.Path, "/code/") { m := gitwebCommit.FindStringSubmatch(u.RawQuery) if len(m) == 2 { return gerritURLPrefix + m[1] } } if strings.HasPrefix(u.Path, "/gw/") { path := strings.TrimPrefix(u.Path, "/gw/") if commitHash.MatchString(path) { // Assume it's a commit return gerritURLPrefix + path } return gerritURLPrefix + "master/" + path } if strings.HasPrefix(u.Path, "/docs/") { return "/doc/" + strings.TrimPrefix(u.Path, "/docs/") } // strip directory index files for _, x := range indexFiles { if strings.HasSuffix(u.Path, "/"+x) { return strings.TrimSuffix(u.Path, x) } } // strip common file extensions for _, x := range fileExtensions { if strings.HasSuffix(u.Path, x) { return strings.TrimSuffix(u.Path, x) } } return "" }
go
func redirectPath(u *url.URL) string { // Redirect old gitweb URLs to gerrit. Example: // /code/?p=camlistore.git;a=commit;h=b0d2a8f0e5f27bbfc025a96ec3c7896b42d198ed if strings.HasPrefix(u.Path, "/code/") { m := gitwebCommit.FindStringSubmatch(u.RawQuery) if len(m) == 2 { return gerritURLPrefix + m[1] } } if strings.HasPrefix(u.Path, "/gw/") { path := strings.TrimPrefix(u.Path, "/gw/") if commitHash.MatchString(path) { // Assume it's a commit return gerritURLPrefix + path } return gerritURLPrefix + "master/" + path } if strings.HasPrefix(u.Path, "/docs/") { return "/doc/" + strings.TrimPrefix(u.Path, "/docs/") } // strip directory index files for _, x := range indexFiles { if strings.HasSuffix(u.Path, "/"+x) { return strings.TrimSuffix(u.Path, x) } } // strip common file extensions for _, x := range fileExtensions { if strings.HasSuffix(u.Path, x) { return strings.TrimSuffix(u.Path, x) } } return "" }
[ "func", "redirectPath", "(", "u", "*", "url", ".", "URL", ")", "string", "{", "// Redirect old gitweb URLs to gerrit. Example:", "// /code/?p=camlistore.git;a=commit;h=b0d2a8f0e5f27bbfc025a96ec3c7896b42d198ed", "if", "strings", ".", "HasPrefix", "(", "u", ".", "Path", ",", "\"", "\"", ")", "{", "m", ":=", "gitwebCommit", ".", "FindStringSubmatch", "(", "u", ".", "RawQuery", ")", "\n", "if", "len", "(", "m", ")", "==", "2", "{", "return", "gerritURLPrefix", "+", "m", "[", "1", "]", "\n", "}", "\n", "}", "\n\n", "if", "strings", ".", "HasPrefix", "(", "u", ".", "Path", ",", "\"", "\"", ")", "{", "path", ":=", "strings", ".", "TrimPrefix", "(", "u", ".", "Path", ",", "\"", "\"", ")", "\n", "if", "commitHash", ".", "MatchString", "(", "path", ")", "{", "// Assume it's a commit", "return", "gerritURLPrefix", "+", "path", "\n", "}", "\n", "return", "gerritURLPrefix", "+", "\"", "\"", "+", "path", "\n", "}", "\n\n", "if", "strings", ".", "HasPrefix", "(", "u", ".", "Path", ",", "\"", "\"", ")", "{", "return", "\"", "\"", "+", "strings", ".", "TrimPrefix", "(", "u", ".", "Path", ",", "\"", "\"", ")", "\n", "}", "\n\n", "// strip directory index files", "for", "_", ",", "x", ":=", "range", "indexFiles", "{", "if", "strings", ".", "HasSuffix", "(", "u", ".", "Path", ",", "\"", "\"", "+", "x", ")", "{", "return", "strings", ".", "TrimSuffix", "(", "u", ".", "Path", ",", "x", ")", "\n", "}", "\n", "}", "\n\n", "// strip common file extensions", "for", "_", ",", "x", ":=", "range", "fileExtensions", "{", "if", "strings", ".", "HasSuffix", "(", "u", ".", "Path", ",", "x", ")", "{", "return", "strings", ".", "TrimSuffix", "(", "u", ".", "Path", ",", "x", ")", "\n", "}", "\n", "}", "\n\n", "return", "\"", "\"", "\n", "}" ]
// empty return value means don't redirect.
[ "empty", "return", "value", "means", "don", "t", "redirect", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/website/pk-web/pkweb.go#L269-L307
train
perkeep/perkeep
website/pk-web/pkweb.go
serveFile
func serveFile(w http.ResponseWriter, r *http.Request, absPath string) { if !strings.HasSuffix(absPath, ".html") && !strings.HasSuffix(absPath, ".md") { http.ServeFile(w, r, absPath) return } data, err := ioutil.ReadFile(absPath) if err != nil { serveError(w, r, absPath, err) return } // AutoHeadingIDs is the only extension missing data = blackfriday.Run(data, blackfriday.WithExtensions(blackfriday.CommonExtensions|blackfriday.AutoHeadingIDs)) title := "" if m := h1TitlePattern.FindSubmatch(data); len(m) > 1 { title = string(m[1]) } servePage(w, r, pageParams{ title: title, content: data, }) }
go
func serveFile(w http.ResponseWriter, r *http.Request, absPath string) { if !strings.HasSuffix(absPath, ".html") && !strings.HasSuffix(absPath, ".md") { http.ServeFile(w, r, absPath) return } data, err := ioutil.ReadFile(absPath) if err != nil { serveError(w, r, absPath, err) return } // AutoHeadingIDs is the only extension missing data = blackfriday.Run(data, blackfriday.WithExtensions(blackfriday.CommonExtensions|blackfriday.AutoHeadingIDs)) title := "" if m := h1TitlePattern.FindSubmatch(data); len(m) > 1 { title = string(m[1]) } servePage(w, r, pageParams{ title: title, content: data, }) }
[ "func", "serveFile", "(", "w", "http", ".", "ResponseWriter", ",", "r", "*", "http", ".", "Request", ",", "absPath", "string", ")", "{", "if", "!", "strings", ".", "HasSuffix", "(", "absPath", ",", "\"", "\"", ")", "&&", "!", "strings", ".", "HasSuffix", "(", "absPath", ",", "\"", "\"", ")", "{", "http", ".", "ServeFile", "(", "w", ",", "r", ",", "absPath", ")", "\n", "return", "\n", "}", "\n\n", "data", ",", "err", ":=", "ioutil", ".", "ReadFile", "(", "absPath", ")", "\n", "if", "err", "!=", "nil", "{", "serveError", "(", "w", ",", "r", ",", "absPath", ",", "err", ")", "\n", "return", "\n", "}", "\n\n", "// AutoHeadingIDs is the only extension missing", "data", "=", "blackfriday", ".", "Run", "(", "data", ",", "blackfriday", ".", "WithExtensions", "(", "blackfriday", ".", "CommonExtensions", "|", "blackfriday", ".", "AutoHeadingIDs", ")", ")", "\n\n", "title", ":=", "\"", "\"", "\n", "if", "m", ":=", "h1TitlePattern", ".", "FindSubmatch", "(", "data", ")", ";", "len", "(", "m", ")", ">", "1", "{", "title", "=", "string", "(", "m", "[", "1", "]", ")", "\n", "}", "\n\n", "servePage", "(", "w", ",", "r", ",", "pageParams", "{", "title", ":", "title", ",", "content", ":", "data", ",", "}", ")", "\n", "}" ]
// serveFile serves a file from disk, converting any markdown to HTML.
[ "serveFile", "serves", "a", "file", "from", "disk", "converting", "any", "markdown", "to", "HTML", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/website/pk-web/pkweb.go#L436-L460
train
perkeep/perkeep
website/pk-web/pkweb.go
runAsChild
func runAsChild(res string) { cmdName, err := exec.LookPath(res) if err != nil { log.Fatalf("Could not find %v in $PATH: %v", res, err) } cmd := exec.Command(cmdName) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout log.Printf("Running %v", res) if err := cmd.Start(); err != nil { log.Fatalf("Program %v failed to start: %v", res, err) } go func() { if err := cmd.Wait(); err != nil { log.Fatalf("Program %s did not end successfully: %v", res, err) } }() }
go
func runAsChild(res string) { cmdName, err := exec.LookPath(res) if err != nil { log.Fatalf("Could not find %v in $PATH: %v", res, err) } cmd := exec.Command(cmdName) cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout log.Printf("Running %v", res) if err := cmd.Start(); err != nil { log.Fatalf("Program %v failed to start: %v", res, err) } go func() { if err := cmd.Wait(); err != nil { log.Fatalf("Program %s did not end successfully: %v", res, err) } }() }
[ "func", "runAsChild", "(", "res", "string", ")", "{", "cmdName", ",", "err", ":=", "exec", ".", "LookPath", "(", "res", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Fatalf", "(", "\"", "\"", ",", "res", ",", "err", ")", "\n", "}", "\n", "cmd", ":=", "exec", ".", "Command", "(", "cmdName", ")", "\n", "cmd", ".", "Stderr", "=", "os", ".", "Stderr", "\n", "cmd", ".", "Stdout", "=", "os", ".", "Stdout", "\n", "log", ".", "Printf", "(", "\"", "\"", ",", "res", ")", "\n", "if", "err", ":=", "cmd", ".", "Start", "(", ")", ";", "err", "!=", "nil", "{", "log", ".", "Fatalf", "(", "\"", "\"", ",", "res", ",", "err", ")", "\n", "}", "\n", "go", "func", "(", ")", "{", "if", "err", ":=", "cmd", ".", "Wait", "(", ")", ";", "err", "!=", "nil", "{", "log", ".", "Fatalf", "(", "\"", "\"", ",", "res", ",", "err", ")", "\n", "}", "\n", "}", "(", ")", "\n", "}" ]
// runAsChild runs res as a child process and // does not wait for it to finish.
[ "runAsChild", "runs", "res", "as", "a", "child", "process", "and", "does", "not", "wait", "for", "it", "to", "finish", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/website/pk-web/pkweb.go#L490-L507
train
perkeep/perkeep
website/pk-web/pkweb.go
runDemoBlobServerContainer
func runDemoBlobServerContainer(name string) error { removeContainer(name) cmd := exec.Command("docker", "run", "--rm", "--name="+name, "-e", "CAMLI_ROOT="+prodSrcDir+"/website/blobserver-example/root", "-e", "CAMLI_PASSWORD="+randHex(20), "-v", pkSrcDir()+":"+prodSrcDir, "--net=host", "--workdir="+prodSrcDir, "camlistore/demoblobserver", "camlistored", "--openbrowser=false", "--listen=:3179", "--configfile="+prodSrcDir+"/website/blobserver-example/example-blobserver-config.json") stderr := &writerutil.PrefixSuffixSaver{N: 32 << 10} cmd.Stderr = stderr if err := cmd.Run(); err != nil { return fmt.Errorf("failed to run demo blob server: %v, stderr: %v", err, string(stderr.Bytes())) } return nil }
go
func runDemoBlobServerContainer(name string) error { removeContainer(name) cmd := exec.Command("docker", "run", "--rm", "--name="+name, "-e", "CAMLI_ROOT="+prodSrcDir+"/website/blobserver-example/root", "-e", "CAMLI_PASSWORD="+randHex(20), "-v", pkSrcDir()+":"+prodSrcDir, "--net=host", "--workdir="+prodSrcDir, "camlistore/demoblobserver", "camlistored", "--openbrowser=false", "--listen=:3179", "--configfile="+prodSrcDir+"/website/blobserver-example/example-blobserver-config.json") stderr := &writerutil.PrefixSuffixSaver{N: 32 << 10} cmd.Stderr = stderr if err := cmd.Run(); err != nil { return fmt.Errorf("failed to run demo blob server: %v, stderr: %v", err, string(stderr.Bytes())) } return nil }
[ "func", "runDemoBlobServerContainer", "(", "name", "string", ")", "error", "{", "removeContainer", "(", "name", ")", "\n", "cmd", ":=", "exec", ".", "Command", "(", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", "+", "name", ",", "\"", "\"", ",", "\"", "\"", "+", "prodSrcDir", "+", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", "+", "randHex", "(", "20", ")", ",", "\"", "\"", ",", "pkSrcDir", "(", ")", "+", "\"", "\"", "+", "prodSrcDir", ",", "\"", "\"", ",", "\"", "\"", "+", "prodSrcDir", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", "+", "prodSrcDir", "+", "\"", "\"", ")", "\n", "stderr", ":=", "&", "writerutil", ".", "PrefixSuffixSaver", "{", "N", ":", "32", "<<", "10", "}", "\n", "cmd", ".", "Stderr", "=", "stderr", "\n", "if", "err", ":=", "cmd", ".", "Run", "(", ")", ";", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ",", "string", "(", "stderr", ".", "Bytes", "(", ")", ")", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// runDemoBlobServerContainer runs the demo blobserver as name in a docker // container. It is not run in daemon mode, so it never returns if successful.
[ "runDemoBlobServerContainer", "runs", "the", "demo", "blobserver", "as", "name", "in", "a", "docker", "container", ".", "It", "is", "not", "run", "in", "daemon", "mode", "so", "it", "never", "returns", "if", "successful", "." ]
e28bbbd1588d64df8ab7a82393afd39d64c061f7
https://github.com/perkeep/perkeep/blob/e28bbbd1588d64df8ab7a82393afd39d64c061f7/website/pk-web/pkweb.go#L704-L725
train