repo
stringlengths
5
54
path
stringlengths
4
155
func_name
stringlengths
1
118
original_string
stringlengths
52
85.5k
language
stringclasses
1 value
code
stringlengths
52
85.5k
code_tokens
sequence
docstring
stringlengths
6
2.61k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
85
252
partition
stringclasses
1 value
btcsuite/btcd
database/ffldb/dbcache.go
flush
func (c *dbCache) flush() error { c.lastFlush = time.Now() // Sync the current write file associated with the block store. This is // necessary before writing the metadata to prevent the case where the // metadata contains information about a block which actually hasn't // been written yet in unexpected shutdown scenarios. if err := c.store.syncBlocks(); err != nil { return err } // Since the cached keys to be added and removed use an immutable treap, // a snapshot is simply obtaining the root of the tree under the lock // which is used to atomically swap the root. c.cacheLock.RLock() cachedKeys := c.cachedKeys cachedRemove := c.cachedRemove c.cacheLock.RUnlock() // Nothing to do if there is no data to flush. if cachedKeys.Len() == 0 && cachedRemove.Len() == 0 { return nil } // Perform all leveldb updates using an atomic transaction. if err := c.commitTreaps(cachedKeys, cachedRemove); err != nil { return err } // Clear the cache since it has been flushed. c.cacheLock.Lock() c.cachedKeys = treap.NewImmutable() c.cachedRemove = treap.NewImmutable() c.cacheLock.Unlock() return nil }
go
func (c *dbCache) flush() error { c.lastFlush = time.Now() // Sync the current write file associated with the block store. This is // necessary before writing the metadata to prevent the case where the // metadata contains information about a block which actually hasn't // been written yet in unexpected shutdown scenarios. if err := c.store.syncBlocks(); err != nil { return err } // Since the cached keys to be added and removed use an immutable treap, // a snapshot is simply obtaining the root of the tree under the lock // which is used to atomically swap the root. c.cacheLock.RLock() cachedKeys := c.cachedKeys cachedRemove := c.cachedRemove c.cacheLock.RUnlock() // Nothing to do if there is no data to flush. if cachedKeys.Len() == 0 && cachedRemove.Len() == 0 { return nil } // Perform all leveldb updates using an atomic transaction. if err := c.commitTreaps(cachedKeys, cachedRemove); err != nil { return err } // Clear the cache since it has been flushed. c.cacheLock.Lock() c.cachedKeys = treap.NewImmutable() c.cachedRemove = treap.NewImmutable() c.cacheLock.Unlock() return nil }
[ "func", "(", "c", "*", "dbCache", ")", "flush", "(", ")", "error", "{", "c", ".", "lastFlush", "=", "time", ".", "Now", "(", ")", "\n\n", "// Sync the current write file associated with the block store. This is", "// necessary before writing the metadata to prevent the case where the", "// metadata contains information about a block which actually hasn't", "// been written yet in unexpected shutdown scenarios.", "if", "err", ":=", "c", ".", "store", ".", "syncBlocks", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Since the cached keys to be added and removed use an immutable treap,", "// a snapshot is simply obtaining the root of the tree under the lock", "// which is used to atomically swap the root.", "c", ".", "cacheLock", ".", "RLock", "(", ")", "\n", "cachedKeys", ":=", "c", ".", "cachedKeys", "\n", "cachedRemove", ":=", "c", ".", "cachedRemove", "\n", "c", ".", "cacheLock", ".", "RUnlock", "(", ")", "\n\n", "// Nothing to do if there is no data to flush.", "if", "cachedKeys", ".", "Len", "(", ")", "==", "0", "&&", "cachedRemove", ".", "Len", "(", ")", "==", "0", "{", "return", "nil", "\n", "}", "\n\n", "// Perform all leveldb updates using an atomic transaction.", "if", "err", ":=", "c", ".", "commitTreaps", "(", "cachedKeys", ",", "cachedRemove", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Clear the cache since it has been flushed.", "c", ".", "cacheLock", ".", "Lock", "(", ")", "\n", "c", ".", "cachedKeys", "=", "treap", ".", "NewImmutable", "(", ")", "\n", "c", ".", "cachedRemove", "=", "treap", ".", "NewImmutable", "(", ")", "\n", "c", ".", "cacheLock", ".", "Unlock", "(", ")", "\n\n", "return", "nil", "\n", "}" ]
// flush flushes the database cache to persistent storage. This involes syncing // the block store and replaying all transactions that have been applied to the // cache to the underlying database. // // This function MUST be called with the database write lock held.
[ "flush", "flushes", "the", "database", "cache", "to", "persistent", "storage", ".", "This", "involes", "syncing", "the", "block", "store", "and", "replaying", "all", "transactions", "that", "have", "been", "applied", "to", "the", "cache", "to", "the", "underlying", "database", ".", "This", "function", "MUST", "be", "called", "with", "the", "database", "write", "lock", "held", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/database/ffldb/dbcache.go#L488-L524
train
btcsuite/btcd
database/ffldb/dbcache.go
needsFlush
func (c *dbCache) needsFlush(tx *transaction) bool { // A flush is needed when more time has elapsed than the configured // flush interval. if time.Since(c.lastFlush) > c.flushInterval { return true } // A flush is needed when the size of the database cache exceeds the // specified max cache size. The total calculated size is multiplied by // 1.5 here to account for additional memory consumption that will be // needed during the flush as well as old nodes in the cache that are // referenced by the snapshot used by the transaction. snap := tx.snapshot totalSize := snap.pendingKeys.Size() + snap.pendingRemove.Size() totalSize = uint64(float64(totalSize) * 1.5) return totalSize > c.maxSize }
go
func (c *dbCache) needsFlush(tx *transaction) bool { // A flush is needed when more time has elapsed than the configured // flush interval. if time.Since(c.lastFlush) > c.flushInterval { return true } // A flush is needed when the size of the database cache exceeds the // specified max cache size. The total calculated size is multiplied by // 1.5 here to account for additional memory consumption that will be // needed during the flush as well as old nodes in the cache that are // referenced by the snapshot used by the transaction. snap := tx.snapshot totalSize := snap.pendingKeys.Size() + snap.pendingRemove.Size() totalSize = uint64(float64(totalSize) * 1.5) return totalSize > c.maxSize }
[ "func", "(", "c", "*", "dbCache", ")", "needsFlush", "(", "tx", "*", "transaction", ")", "bool", "{", "// A flush is needed when more time has elapsed than the configured", "// flush interval.", "if", "time", ".", "Since", "(", "c", ".", "lastFlush", ")", ">", "c", ".", "flushInterval", "{", "return", "true", "\n", "}", "\n\n", "// A flush is needed when the size of the database cache exceeds the", "// specified max cache size. The total calculated size is multiplied by", "// 1.5 here to account for additional memory consumption that will be", "// needed during the flush as well as old nodes in the cache that are", "// referenced by the snapshot used by the transaction.", "snap", ":=", "tx", ".", "snapshot", "\n", "totalSize", ":=", "snap", ".", "pendingKeys", ".", "Size", "(", ")", "+", "snap", ".", "pendingRemove", ".", "Size", "(", ")", "\n", "totalSize", "=", "uint64", "(", "float64", "(", "totalSize", ")", "*", "1.5", ")", "\n", "return", "totalSize", ">", "c", ".", "maxSize", "\n", "}" ]
// needsFlush returns whether or not the database cache needs to be flushed to // persistent storage based on its current size, whether or not adding all of // the entries in the passed database transaction would cause it to exceed the // configured limit, and how much time has elapsed since the last time the cache // was flushed. // // This function MUST be called with the database write lock held.
[ "needsFlush", "returns", "whether", "or", "not", "the", "database", "cache", "needs", "to", "be", "flushed", "to", "persistent", "storage", "based", "on", "its", "current", "size", "whether", "or", "not", "adding", "all", "of", "the", "entries", "in", "the", "passed", "database", "transaction", "would", "cause", "it", "to", "exceed", "the", "configured", "limit", "and", "how", "much", "time", "has", "elapsed", "since", "the", "last", "time", "the", "cache", "was", "flushed", ".", "This", "function", "MUST", "be", "called", "with", "the", "database", "write", "lock", "held", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/database/ffldb/dbcache.go#L533-L549
train
btcsuite/btcd
database/ffldb/dbcache.go
commitTx
func (c *dbCache) commitTx(tx *transaction) error { // Flush the cache and write the current transaction directly to the // database if a flush is needed. if c.needsFlush(tx) { if err := c.flush(); err != nil { return err } // Perform all leveldb updates using an atomic transaction. err := c.commitTreaps(tx.pendingKeys, tx.pendingRemove) if err != nil { return err } // Clear the transaction entries since they have been committed. tx.pendingKeys = nil tx.pendingRemove = nil return nil } // At this point a database flush is not needed, so atomically commit // the transaction to the cache. // Since the cached keys to be added and removed use an immutable treap, // a snapshot is simply obtaining the root of the tree under the lock // which is used to atomically swap the root. c.cacheLock.RLock() newCachedKeys := c.cachedKeys newCachedRemove := c.cachedRemove c.cacheLock.RUnlock() // Apply every key to add in the database transaction to the cache. tx.pendingKeys.ForEach(func(k, v []byte) bool { newCachedRemove = newCachedRemove.Delete(k) newCachedKeys = newCachedKeys.Put(k, v) return true }) tx.pendingKeys = nil // Apply every key to remove in the database transaction to the cache. tx.pendingRemove.ForEach(func(k, v []byte) bool { newCachedKeys = newCachedKeys.Delete(k) newCachedRemove = newCachedRemove.Put(k, nil) return true }) tx.pendingRemove = nil // Atomically replace the immutable treaps which hold the cached keys to // add and delete. c.cacheLock.Lock() c.cachedKeys = newCachedKeys c.cachedRemove = newCachedRemove c.cacheLock.Unlock() return nil }
go
func (c *dbCache) commitTx(tx *transaction) error { // Flush the cache and write the current transaction directly to the // database if a flush is needed. if c.needsFlush(tx) { if err := c.flush(); err != nil { return err } // Perform all leveldb updates using an atomic transaction. err := c.commitTreaps(tx.pendingKeys, tx.pendingRemove) if err != nil { return err } // Clear the transaction entries since they have been committed. tx.pendingKeys = nil tx.pendingRemove = nil return nil } // At this point a database flush is not needed, so atomically commit // the transaction to the cache. // Since the cached keys to be added and removed use an immutable treap, // a snapshot is simply obtaining the root of the tree under the lock // which is used to atomically swap the root. c.cacheLock.RLock() newCachedKeys := c.cachedKeys newCachedRemove := c.cachedRemove c.cacheLock.RUnlock() // Apply every key to add in the database transaction to the cache. tx.pendingKeys.ForEach(func(k, v []byte) bool { newCachedRemove = newCachedRemove.Delete(k) newCachedKeys = newCachedKeys.Put(k, v) return true }) tx.pendingKeys = nil // Apply every key to remove in the database transaction to the cache. tx.pendingRemove.ForEach(func(k, v []byte) bool { newCachedKeys = newCachedKeys.Delete(k) newCachedRemove = newCachedRemove.Put(k, nil) return true }) tx.pendingRemove = nil // Atomically replace the immutable treaps which hold the cached keys to // add and delete. c.cacheLock.Lock() c.cachedKeys = newCachedKeys c.cachedRemove = newCachedRemove c.cacheLock.Unlock() return nil }
[ "func", "(", "c", "*", "dbCache", ")", "commitTx", "(", "tx", "*", "transaction", ")", "error", "{", "// Flush the cache and write the current transaction directly to the", "// database if a flush is needed.", "if", "c", ".", "needsFlush", "(", "tx", ")", "{", "if", "err", ":=", "c", ".", "flush", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Perform all leveldb updates using an atomic transaction.", "err", ":=", "c", ".", "commitTreaps", "(", "tx", ".", "pendingKeys", ",", "tx", ".", "pendingRemove", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Clear the transaction entries since they have been committed.", "tx", ".", "pendingKeys", "=", "nil", "\n", "tx", ".", "pendingRemove", "=", "nil", "\n", "return", "nil", "\n", "}", "\n\n", "// At this point a database flush is not needed, so atomically commit", "// the transaction to the cache.", "// Since the cached keys to be added and removed use an immutable treap,", "// a snapshot is simply obtaining the root of the tree under the lock", "// which is used to atomically swap the root.", "c", ".", "cacheLock", ".", "RLock", "(", ")", "\n", "newCachedKeys", ":=", "c", ".", "cachedKeys", "\n", "newCachedRemove", ":=", "c", ".", "cachedRemove", "\n", "c", ".", "cacheLock", ".", "RUnlock", "(", ")", "\n\n", "// Apply every key to add in the database transaction to the cache.", "tx", ".", "pendingKeys", ".", "ForEach", "(", "func", "(", "k", ",", "v", "[", "]", "byte", ")", "bool", "{", "newCachedRemove", "=", "newCachedRemove", ".", "Delete", "(", "k", ")", "\n", "newCachedKeys", "=", "newCachedKeys", ".", "Put", "(", "k", ",", "v", ")", "\n", "return", "true", "\n", "}", ")", "\n", "tx", ".", "pendingKeys", "=", "nil", "\n\n", "// Apply every key to remove in the database transaction to the cache.", "tx", ".", "pendingRemove", ".", "ForEach", "(", "func", "(", "k", ",", "v", "[", "]", "byte", ")", "bool", "{", "newCachedKeys", "=", "newCachedKeys", ".", "Delete", "(", "k", ")", "\n", "newCachedRemove", "=", "newCachedRemove", ".", "Put", "(", "k", ",", "nil", ")", "\n", "return", "true", "\n", "}", ")", "\n", "tx", ".", "pendingRemove", "=", "nil", "\n\n", "// Atomically replace the immutable treaps which hold the cached keys to", "// add and delete.", "c", ".", "cacheLock", ".", "Lock", "(", ")", "\n", "c", ".", "cachedKeys", "=", "newCachedKeys", "\n", "c", ".", "cachedRemove", "=", "newCachedRemove", "\n", "c", ".", "cacheLock", ".", "Unlock", "(", ")", "\n", "return", "nil", "\n", "}" ]
// commitTx atomically adds all of the pending keys to add and remove into the // database cache. When adding the pending keys would cause the size of the // cache to exceed the max cache size, or the time since the last flush exceeds // the configured flush interval, the cache will be flushed to the underlying // persistent database. // // This is an atomic operation with respect to the cache in that either all of // the pending keys to add and remove in the transaction will be applied or none // of them will. // // The database cache itself might be flushed to the underlying persistent // database even if the transaction fails to apply, but it will only be the // state of the cache without the transaction applied. // // This function MUST be called during a database write transaction which in // turn implies the database write lock will be held.
[ "commitTx", "atomically", "adds", "all", "of", "the", "pending", "keys", "to", "add", "and", "remove", "into", "the", "database", "cache", ".", "When", "adding", "the", "pending", "keys", "would", "cause", "the", "size", "of", "the", "cache", "to", "exceed", "the", "max", "cache", "size", "or", "the", "time", "since", "the", "last", "flush", "exceeds", "the", "configured", "flush", "interval", "the", "cache", "will", "be", "flushed", "to", "the", "underlying", "persistent", "database", ".", "This", "is", "an", "atomic", "operation", "with", "respect", "to", "the", "cache", "in", "that", "either", "all", "of", "the", "pending", "keys", "to", "add", "and", "remove", "in", "the", "transaction", "will", "be", "applied", "or", "none", "of", "them", "will", ".", "The", "database", "cache", "itself", "might", "be", "flushed", "to", "the", "underlying", "persistent", "database", "even", "if", "the", "transaction", "fails", "to", "apply", "but", "it", "will", "only", "be", "the", "state", "of", "the", "cache", "without", "the", "transaction", "applied", ".", "This", "function", "MUST", "be", "called", "during", "a", "database", "write", "transaction", "which", "in", "turn", "implies", "the", "database", "write", "lock", "will", "be", "held", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/database/ffldb/dbcache.go#L567-L621
train
btcsuite/btcd
database/ffldb/dbcache.go
Close
func (c *dbCache) Close() error { // Flush any outstanding cached entries to disk. if err := c.flush(); err != nil { // Even if there is an error while flushing, attempt to close // the underlying database. The error is ignored since it would // mask the flush error. _ = c.ldb.Close() return err } // Close the underlying leveldb database. if err := c.ldb.Close(); err != nil { str := "failed to close underlying leveldb database" return convertErr(str, err) } return nil }
go
func (c *dbCache) Close() error { // Flush any outstanding cached entries to disk. if err := c.flush(); err != nil { // Even if there is an error while flushing, attempt to close // the underlying database. The error is ignored since it would // mask the flush error. _ = c.ldb.Close() return err } // Close the underlying leveldb database. if err := c.ldb.Close(); err != nil { str := "failed to close underlying leveldb database" return convertErr(str, err) } return nil }
[ "func", "(", "c", "*", "dbCache", ")", "Close", "(", ")", "error", "{", "// Flush any outstanding cached entries to disk.", "if", "err", ":=", "c", ".", "flush", "(", ")", ";", "err", "!=", "nil", "{", "// Even if there is an error while flushing, attempt to close", "// the underlying database. The error is ignored since it would", "// mask the flush error.", "_", "=", "c", ".", "ldb", ".", "Close", "(", ")", "\n", "return", "err", "\n", "}", "\n\n", "// Close the underlying leveldb database.", "if", "err", ":=", "c", ".", "ldb", ".", "Close", "(", ")", ";", "err", "!=", "nil", "{", "str", ":=", "\"", "\"", "\n", "return", "convertErr", "(", "str", ",", "err", ")", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// Close cleanly shuts down the database cache by syncing all data and closing // the underlying leveldb database. // // This function MUST be called with the database write lock held.
[ "Close", "cleanly", "shuts", "down", "the", "database", "cache", "by", "syncing", "all", "data", "and", "closing", "the", "underlying", "leveldb", "database", ".", "This", "function", "MUST", "be", "called", "with", "the", "database", "write", "lock", "held", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/database/ffldb/dbcache.go#L627-L644
train
btcsuite/btcd
database/ffldb/dbcache.go
newDbCache
func newDbCache(ldb *leveldb.DB, store *blockStore, maxSize uint64, flushIntervalSecs uint32) *dbCache { return &dbCache{ ldb: ldb, store: store, maxSize: maxSize, flushInterval: time.Second * time.Duration(flushIntervalSecs), lastFlush: time.Now(), cachedKeys: treap.NewImmutable(), cachedRemove: treap.NewImmutable(), } }
go
func newDbCache(ldb *leveldb.DB, store *blockStore, maxSize uint64, flushIntervalSecs uint32) *dbCache { return &dbCache{ ldb: ldb, store: store, maxSize: maxSize, flushInterval: time.Second * time.Duration(flushIntervalSecs), lastFlush: time.Now(), cachedKeys: treap.NewImmutable(), cachedRemove: treap.NewImmutable(), } }
[ "func", "newDbCache", "(", "ldb", "*", "leveldb", ".", "DB", ",", "store", "*", "blockStore", ",", "maxSize", "uint64", ",", "flushIntervalSecs", "uint32", ")", "*", "dbCache", "{", "return", "&", "dbCache", "{", "ldb", ":", "ldb", ",", "store", ":", "store", ",", "maxSize", ":", "maxSize", ",", "flushInterval", ":", "time", ".", "Second", "*", "time", ".", "Duration", "(", "flushIntervalSecs", ")", ",", "lastFlush", ":", "time", ".", "Now", "(", ")", ",", "cachedKeys", ":", "treap", ".", "NewImmutable", "(", ")", ",", "cachedRemove", ":", "treap", ".", "NewImmutable", "(", ")", ",", "}", "\n", "}" ]
// newDbCache returns a new database cache instance backed by the provided // leveldb instance. The cache will be flushed to leveldb when the max size // exceeds the provided value or it has been longer than the provided interval // since the last flush.
[ "newDbCache", "returns", "a", "new", "database", "cache", "instance", "backed", "by", "the", "provided", "leveldb", "instance", ".", "The", "cache", "will", "be", "flushed", "to", "leveldb", "when", "the", "max", "size", "exceeds", "the", "provided", "value", "or", "it", "has", "been", "longer", "than", "the", "provided", "interval", "since", "the", "last", "flush", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/database/ffldb/dbcache.go#L650-L660
train
btcsuite/btcd
blockchain/notifications.go
String
func (n NotificationType) String() string { if s, ok := notificationTypeStrings[n]; ok { return s } return fmt.Sprintf("Unknown Notification Type (%d)", int(n)) }
go
func (n NotificationType) String() string { if s, ok := notificationTypeStrings[n]; ok { return s } return fmt.Sprintf("Unknown Notification Type (%d)", int(n)) }
[ "func", "(", "n", "NotificationType", ")", "String", "(", ")", "string", "{", "if", "s", ",", "ok", ":=", "notificationTypeStrings", "[", "n", "]", ";", "ok", "{", "return", "s", "\n", "}", "\n", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "int", "(", "n", ")", ")", "\n", "}" ]
// String returns the NotificationType in human-readable form.
[ "String", "returns", "the", "NotificationType", "in", "human", "-", "readable", "form", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/notifications.go#L43-L48
train
btcsuite/btcd
blockchain/notifications.go
Subscribe
func (b *BlockChain) Subscribe(callback NotificationCallback) { b.notificationsLock.Lock() b.notifications = append(b.notifications, callback) b.notificationsLock.Unlock() }
go
func (b *BlockChain) Subscribe(callback NotificationCallback) { b.notificationsLock.Lock() b.notifications = append(b.notifications, callback) b.notificationsLock.Unlock() }
[ "func", "(", "b", "*", "BlockChain", ")", "Subscribe", "(", "callback", "NotificationCallback", ")", "{", "b", ".", "notificationsLock", ".", "Lock", "(", ")", "\n", "b", ".", "notifications", "=", "append", "(", "b", ".", "notifications", ",", "callback", ")", "\n", "b", ".", "notificationsLock", ".", "Unlock", "(", ")", "\n", "}" ]
// Subscribe to block chain notifications. Registers a callback to be executed // when various events take place. See the documentation on Notification and // NotificationType for details on the types and contents of notifications.
[ "Subscribe", "to", "block", "chain", "notifications", ".", "Registers", "a", "callback", "to", "be", "executed", "when", "various", "events", "take", "place", ".", "See", "the", "documentation", "on", "Notification", "and", "NotificationType", "for", "details", "on", "the", "types", "and", "contents", "of", "notifications", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/notifications.go#L64-L68
train
btcsuite/btcd
blockchain/notifications.go
sendNotification
func (b *BlockChain) sendNotification(typ NotificationType, data interface{}) { // Generate and send the notification. n := Notification{Type: typ, Data: data} b.notificationsLock.RLock() for _, callback := range b.notifications { callback(&n) } b.notificationsLock.RUnlock() }
go
func (b *BlockChain) sendNotification(typ NotificationType, data interface{}) { // Generate and send the notification. n := Notification{Type: typ, Data: data} b.notificationsLock.RLock() for _, callback := range b.notifications { callback(&n) } b.notificationsLock.RUnlock() }
[ "func", "(", "b", "*", "BlockChain", ")", "sendNotification", "(", "typ", "NotificationType", ",", "data", "interface", "{", "}", ")", "{", "// Generate and send the notification.", "n", ":=", "Notification", "{", "Type", ":", "typ", ",", "Data", ":", "data", "}", "\n", "b", ".", "notificationsLock", ".", "RLock", "(", ")", "\n", "for", "_", ",", "callback", ":=", "range", "b", ".", "notifications", "{", "callback", "(", "&", "n", ")", "\n", "}", "\n", "b", ".", "notificationsLock", ".", "RUnlock", "(", ")", "\n", "}" ]
// sendNotification sends a notification with the passed type and data if the // caller requested notifications by providing a callback function in the call // to New.
[ "sendNotification", "sends", "a", "notification", "with", "the", "passed", "type", "and", "data", "if", "the", "caller", "requested", "notifications", "by", "providing", "a", "callback", "function", "in", "the", "call", "to", "New", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/notifications.go#L73-L81
train
btcsuite/btcd
btcjson/cmdinfo.go
CmdMethod
func CmdMethod(cmd interface{}) (string, error) { // Look up the cmd type and error out if not registered. rt := reflect.TypeOf(cmd) registerLock.RLock() method, ok := concreteTypeToMethod[rt] registerLock.RUnlock() if !ok { str := fmt.Sprintf("%q is not registered", method) return "", makeError(ErrUnregisteredMethod, str) } return method, nil }
go
func CmdMethod(cmd interface{}) (string, error) { // Look up the cmd type and error out if not registered. rt := reflect.TypeOf(cmd) registerLock.RLock() method, ok := concreteTypeToMethod[rt] registerLock.RUnlock() if !ok { str := fmt.Sprintf("%q is not registered", method) return "", makeError(ErrUnregisteredMethod, str) } return method, nil }
[ "func", "CmdMethod", "(", "cmd", "interface", "{", "}", ")", "(", "string", ",", "error", ")", "{", "// Look up the cmd type and error out if not registered.", "rt", ":=", "reflect", ".", "TypeOf", "(", "cmd", ")", "\n", "registerLock", ".", "RLock", "(", ")", "\n", "method", ",", "ok", ":=", "concreteTypeToMethod", "[", "rt", "]", "\n", "registerLock", ".", "RUnlock", "(", ")", "\n", "if", "!", "ok", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "method", ")", "\n", "return", "\"", "\"", ",", "makeError", "(", "ErrUnregisteredMethod", ",", "str", ")", "\n", "}", "\n\n", "return", "method", ",", "nil", "\n", "}" ]
// CmdMethod returns the method for the passed command. The provided command // type must be a registered type. All commands provided by this package are // registered by default.
[ "CmdMethod", "returns", "the", "method", "for", "the", "passed", "command", ".", "The", "provided", "command", "type", "must", "be", "a", "registered", "type", ".", "All", "commands", "provided", "by", "this", "package", "are", "registered", "by", "default", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/btcjson/cmdinfo.go#L16-L28
train
btcsuite/btcd
btcjson/cmdinfo.go
MethodUsageFlags
func MethodUsageFlags(method string) (UsageFlag, error) { // Look up details about the provided method and error out if not // registered. registerLock.RLock() info, ok := methodToInfo[method] registerLock.RUnlock() if !ok { str := fmt.Sprintf("%q is not registered", method) return 0, makeError(ErrUnregisteredMethod, str) } return info.flags, nil }
go
func MethodUsageFlags(method string) (UsageFlag, error) { // Look up details about the provided method and error out if not // registered. registerLock.RLock() info, ok := methodToInfo[method] registerLock.RUnlock() if !ok { str := fmt.Sprintf("%q is not registered", method) return 0, makeError(ErrUnregisteredMethod, str) } return info.flags, nil }
[ "func", "MethodUsageFlags", "(", "method", "string", ")", "(", "UsageFlag", ",", "error", ")", "{", "// Look up details about the provided method and error out if not", "// registered.", "registerLock", ".", "RLock", "(", ")", "\n", "info", ",", "ok", ":=", "methodToInfo", "[", "method", "]", "\n", "registerLock", ".", "RUnlock", "(", ")", "\n", "if", "!", "ok", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "method", ")", "\n", "return", "0", ",", "makeError", "(", "ErrUnregisteredMethod", ",", "str", ")", "\n", "}", "\n\n", "return", "info", ".", "flags", ",", "nil", "\n", "}" ]
// MethodUsageFlags returns the usage flags for the passed command method. The // provided method must be associated with a registered type. All commands // provided by this package are registered by default.
[ "MethodUsageFlags", "returns", "the", "usage", "flags", "for", "the", "passed", "command", "method", ".", "The", "provided", "method", "must", "be", "associated", "with", "a", "registered", "type", ".", "All", "commands", "provided", "by", "this", "package", "are", "registered", "by", "default", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/btcjson/cmdinfo.go#L33-L45
train
btcsuite/btcd
btcjson/cmdinfo.go
subArrayUsage
func subArrayUsage(arrayType reflect.Type, fieldName string) string { // Convert plural field names to singular. Only works for English. singularFieldName := fieldName if strings.HasSuffix(fieldName, "ies") { singularFieldName = strings.TrimSuffix(fieldName, "ies") singularFieldName = singularFieldName + "y" } else if strings.HasSuffix(fieldName, "es") { singularFieldName = strings.TrimSuffix(fieldName, "es") } else if strings.HasSuffix(fieldName, "s") { singularFieldName = strings.TrimSuffix(fieldName, "s") } elemType := arrayType.Elem() switch elemType.Kind() { case reflect.String: return fmt.Sprintf("[%q,...]", singularFieldName) case reflect.Struct: return fmt.Sprintf("[%s,...]", subStructUsage(elemType)) } // Fall back to simply showing the field name in array syntax. return fmt.Sprintf(`[%s,...]`, singularFieldName) }
go
func subArrayUsage(arrayType reflect.Type, fieldName string) string { // Convert plural field names to singular. Only works for English. singularFieldName := fieldName if strings.HasSuffix(fieldName, "ies") { singularFieldName = strings.TrimSuffix(fieldName, "ies") singularFieldName = singularFieldName + "y" } else if strings.HasSuffix(fieldName, "es") { singularFieldName = strings.TrimSuffix(fieldName, "es") } else if strings.HasSuffix(fieldName, "s") { singularFieldName = strings.TrimSuffix(fieldName, "s") } elemType := arrayType.Elem() switch elemType.Kind() { case reflect.String: return fmt.Sprintf("[%q,...]", singularFieldName) case reflect.Struct: return fmt.Sprintf("[%s,...]", subStructUsage(elemType)) } // Fall back to simply showing the field name in array syntax. return fmt.Sprintf(`[%s,...]`, singularFieldName) }
[ "func", "subArrayUsage", "(", "arrayType", "reflect", ".", "Type", ",", "fieldName", "string", ")", "string", "{", "// Convert plural field names to singular. Only works for English.", "singularFieldName", ":=", "fieldName", "\n", "if", "strings", ".", "HasSuffix", "(", "fieldName", ",", "\"", "\"", ")", "{", "singularFieldName", "=", "strings", ".", "TrimSuffix", "(", "fieldName", ",", "\"", "\"", ")", "\n", "singularFieldName", "=", "singularFieldName", "+", "\"", "\"", "\n", "}", "else", "if", "strings", ".", "HasSuffix", "(", "fieldName", ",", "\"", "\"", ")", "{", "singularFieldName", "=", "strings", ".", "TrimSuffix", "(", "fieldName", ",", "\"", "\"", ")", "\n", "}", "else", "if", "strings", ".", "HasSuffix", "(", "fieldName", ",", "\"", "\"", ")", "{", "singularFieldName", "=", "strings", ".", "TrimSuffix", "(", "fieldName", ",", "\"", "\"", ")", "\n", "}", "\n\n", "elemType", ":=", "arrayType", ".", "Elem", "(", ")", "\n", "switch", "elemType", ".", "Kind", "(", ")", "{", "case", "reflect", ".", "String", ":", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "singularFieldName", ")", "\n\n", "case", "reflect", ".", "Struct", ":", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "subStructUsage", "(", "elemType", ")", ")", "\n", "}", "\n\n", "// Fall back to simply showing the field name in array syntax.", "return", "fmt", ".", "Sprintf", "(", "`[%s,...]`", ",", "singularFieldName", ")", "\n", "}" ]
// subArrayUsage returns a string for use in the one-line usage for the given // array or slice. It also contains logic to convert plural field names to // singular so the generated usage string reads better.
[ "subArrayUsage", "returns", "a", "string", "for", "use", "in", "the", "one", "-", "line", "usage", "for", "the", "given", "array", "or", "slice", ".", "It", "also", "contains", "logic", "to", "convert", "plural", "field", "names", "to", "singular", "so", "the", "generated", "usage", "string", "reads", "better", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/btcjson/cmdinfo.go#L101-L124
train
btcsuite/btcd
btcjson/cmdinfo.go
fieldUsage
func fieldUsage(structField reflect.StructField, defaultVal *reflect.Value) string { // When the field has a jsonrpcusage struct tag specified use that // instead of automatically generating it. if tag := structField.Tag.Get("jsonrpcusage"); tag != "" { return tag } // Indirect the pointer if needed. fieldType := structField.Type if fieldType.Kind() == reflect.Ptr { fieldType = fieldType.Elem() } // When there is a default value, it must also be a pointer due to the // rules enforced by RegisterCmd. if defaultVal != nil { indirect := defaultVal.Elem() defaultVal = &indirect } // Handle certain types uniquely to provide nicer usage. fieldName := strings.ToLower(structField.Name) switch fieldType.Kind() { case reflect.String: if defaultVal != nil { return fmt.Sprintf("%s=%q", fieldName, defaultVal.Interface()) } return fmt.Sprintf("%q", fieldName) case reflect.Array, reflect.Slice: return subArrayUsage(fieldType, fieldName) case reflect.Struct: return subStructUsage(fieldType) } // Simply return the field name when none of the above special cases // apply. if defaultVal != nil { return fmt.Sprintf("%s=%v", fieldName, defaultVal.Interface()) } return fieldName }
go
func fieldUsage(structField reflect.StructField, defaultVal *reflect.Value) string { // When the field has a jsonrpcusage struct tag specified use that // instead of automatically generating it. if tag := structField.Tag.Get("jsonrpcusage"); tag != "" { return tag } // Indirect the pointer if needed. fieldType := structField.Type if fieldType.Kind() == reflect.Ptr { fieldType = fieldType.Elem() } // When there is a default value, it must also be a pointer due to the // rules enforced by RegisterCmd. if defaultVal != nil { indirect := defaultVal.Elem() defaultVal = &indirect } // Handle certain types uniquely to provide nicer usage. fieldName := strings.ToLower(structField.Name) switch fieldType.Kind() { case reflect.String: if defaultVal != nil { return fmt.Sprintf("%s=%q", fieldName, defaultVal.Interface()) } return fmt.Sprintf("%q", fieldName) case reflect.Array, reflect.Slice: return subArrayUsage(fieldType, fieldName) case reflect.Struct: return subStructUsage(fieldType) } // Simply return the field name when none of the above special cases // apply. if defaultVal != nil { return fmt.Sprintf("%s=%v", fieldName, defaultVal.Interface()) } return fieldName }
[ "func", "fieldUsage", "(", "structField", "reflect", ".", "StructField", ",", "defaultVal", "*", "reflect", ".", "Value", ")", "string", "{", "// When the field has a jsonrpcusage struct tag specified use that", "// instead of automatically generating it.", "if", "tag", ":=", "structField", ".", "Tag", ".", "Get", "(", "\"", "\"", ")", ";", "tag", "!=", "\"", "\"", "{", "return", "tag", "\n", "}", "\n\n", "// Indirect the pointer if needed.", "fieldType", ":=", "structField", ".", "Type", "\n", "if", "fieldType", ".", "Kind", "(", ")", "==", "reflect", ".", "Ptr", "{", "fieldType", "=", "fieldType", ".", "Elem", "(", ")", "\n", "}", "\n\n", "// When there is a default value, it must also be a pointer due to the", "// rules enforced by RegisterCmd.", "if", "defaultVal", "!=", "nil", "{", "indirect", ":=", "defaultVal", ".", "Elem", "(", ")", "\n", "defaultVal", "=", "&", "indirect", "\n", "}", "\n\n", "// Handle certain types uniquely to provide nicer usage.", "fieldName", ":=", "strings", ".", "ToLower", "(", "structField", ".", "Name", ")", "\n", "switch", "fieldType", ".", "Kind", "(", ")", "{", "case", "reflect", ".", "String", ":", "if", "defaultVal", "!=", "nil", "{", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "fieldName", ",", "defaultVal", ".", "Interface", "(", ")", ")", "\n", "}", "\n\n", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "fieldName", ")", "\n\n", "case", "reflect", ".", "Array", ",", "reflect", ".", "Slice", ":", "return", "subArrayUsage", "(", "fieldType", ",", "fieldName", ")", "\n\n", "case", "reflect", ".", "Struct", ":", "return", "subStructUsage", "(", "fieldType", ")", "\n", "}", "\n\n", "// Simply return the field name when none of the above special cases", "// apply.", "if", "defaultVal", "!=", "nil", "{", "return", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "fieldName", ",", "defaultVal", ".", "Interface", "(", ")", ")", "\n", "}", "\n", "return", "fieldName", "\n", "}" ]
// fieldUsage returns a string for use in the one-line usage for the struct // field of a command. // // Any fields that include a jsonrpcusage struct tag will use that instead of // being automatically generated.
[ "fieldUsage", "returns", "a", "string", "for", "use", "in", "the", "one", "-", "line", "usage", "for", "the", "struct", "field", "of", "a", "command", ".", "Any", "fields", "that", "include", "a", "jsonrpcusage", "struct", "tag", "will", "use", "that", "instead", "of", "being", "automatically", "generated", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/btcjson/cmdinfo.go#L131-L175
train
btcsuite/btcd
btcjson/cmdinfo.go
methodUsageText
func methodUsageText(rtp reflect.Type, defaults map[int]reflect.Value, method string) string { // Generate the individual usage for each field in the command. Several // simplifying assumptions are made here because the RegisterCmd // function has already rigorously enforced the layout. rt := rtp.Elem() numFields := rt.NumField() reqFieldUsages := make([]string, 0, numFields) optFieldUsages := make([]string, 0, numFields) for i := 0; i < numFields; i++ { rtf := rt.Field(i) var isOptional bool if kind := rtf.Type.Kind(); kind == reflect.Ptr { isOptional = true } var defaultVal *reflect.Value if defVal, ok := defaults[i]; ok { defaultVal = &defVal } // Add human-readable usage to the appropriate slice that is // later used to generate the one-line usage. usage := fieldUsage(rtf, defaultVal) if isOptional { optFieldUsages = append(optFieldUsages, usage) } else { reqFieldUsages = append(reqFieldUsages, usage) } } // Generate and return the one-line usage string. usageStr := method if len(reqFieldUsages) > 0 { usageStr += " " + strings.Join(reqFieldUsages, " ") } if len(optFieldUsages) > 0 { usageStr += fmt.Sprintf(" (%s)", strings.Join(optFieldUsages, " ")) } return usageStr }
go
func methodUsageText(rtp reflect.Type, defaults map[int]reflect.Value, method string) string { // Generate the individual usage for each field in the command. Several // simplifying assumptions are made here because the RegisterCmd // function has already rigorously enforced the layout. rt := rtp.Elem() numFields := rt.NumField() reqFieldUsages := make([]string, 0, numFields) optFieldUsages := make([]string, 0, numFields) for i := 0; i < numFields; i++ { rtf := rt.Field(i) var isOptional bool if kind := rtf.Type.Kind(); kind == reflect.Ptr { isOptional = true } var defaultVal *reflect.Value if defVal, ok := defaults[i]; ok { defaultVal = &defVal } // Add human-readable usage to the appropriate slice that is // later used to generate the one-line usage. usage := fieldUsage(rtf, defaultVal) if isOptional { optFieldUsages = append(optFieldUsages, usage) } else { reqFieldUsages = append(reqFieldUsages, usage) } } // Generate and return the one-line usage string. usageStr := method if len(reqFieldUsages) > 0 { usageStr += " " + strings.Join(reqFieldUsages, " ") } if len(optFieldUsages) > 0 { usageStr += fmt.Sprintf(" (%s)", strings.Join(optFieldUsages, " ")) } return usageStr }
[ "func", "methodUsageText", "(", "rtp", "reflect", ".", "Type", ",", "defaults", "map", "[", "int", "]", "reflect", ".", "Value", ",", "method", "string", ")", "string", "{", "// Generate the individual usage for each field in the command. Several", "// simplifying assumptions are made here because the RegisterCmd", "// function has already rigorously enforced the layout.", "rt", ":=", "rtp", ".", "Elem", "(", ")", "\n", "numFields", ":=", "rt", ".", "NumField", "(", ")", "\n", "reqFieldUsages", ":=", "make", "(", "[", "]", "string", ",", "0", ",", "numFields", ")", "\n", "optFieldUsages", ":=", "make", "(", "[", "]", "string", ",", "0", ",", "numFields", ")", "\n", "for", "i", ":=", "0", ";", "i", "<", "numFields", ";", "i", "++", "{", "rtf", ":=", "rt", ".", "Field", "(", "i", ")", "\n", "var", "isOptional", "bool", "\n", "if", "kind", ":=", "rtf", ".", "Type", ".", "Kind", "(", ")", ";", "kind", "==", "reflect", ".", "Ptr", "{", "isOptional", "=", "true", "\n", "}", "\n\n", "var", "defaultVal", "*", "reflect", ".", "Value", "\n", "if", "defVal", ",", "ok", ":=", "defaults", "[", "i", "]", ";", "ok", "{", "defaultVal", "=", "&", "defVal", "\n", "}", "\n\n", "// Add human-readable usage to the appropriate slice that is", "// later used to generate the one-line usage.", "usage", ":=", "fieldUsage", "(", "rtf", ",", "defaultVal", ")", "\n", "if", "isOptional", "{", "optFieldUsages", "=", "append", "(", "optFieldUsages", ",", "usage", ")", "\n", "}", "else", "{", "reqFieldUsages", "=", "append", "(", "reqFieldUsages", ",", "usage", ")", "\n", "}", "\n", "}", "\n\n", "// Generate and return the one-line usage string.", "usageStr", ":=", "method", "\n", "if", "len", "(", "reqFieldUsages", ")", ">", "0", "{", "usageStr", "+=", "\"", "\"", "+", "strings", ".", "Join", "(", "reqFieldUsages", ",", "\"", "\"", ")", "\n", "}", "\n", "if", "len", "(", "optFieldUsages", ")", ">", "0", "{", "usageStr", "+=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "strings", ".", "Join", "(", "optFieldUsages", ",", "\"", "\"", ")", ")", "\n", "}", "\n", "return", "usageStr", "\n", "}" ]
// methodUsageText returns a one-line usage string for the provided command and // method info. This is the main work horse for the exported MethodUsageText // function.
[ "methodUsageText", "returns", "a", "one", "-", "line", "usage", "string", "for", "the", "provided", "command", "and", "method", "info", ".", "This", "is", "the", "main", "work", "horse", "for", "the", "exported", "MethodUsageText", "function", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/btcjson/cmdinfo.go#L180-L219
train
btcsuite/btcd
btcjson/cmdinfo.go
MethodUsageText
func MethodUsageText(method string) (string, error) { // Look up details about the provided method and error out if not // registered. registerLock.RLock() rtp, ok := methodToConcreteType[method] info := methodToInfo[method] registerLock.RUnlock() if !ok { str := fmt.Sprintf("%q is not registered", method) return "", makeError(ErrUnregisteredMethod, str) } // When the usage for this method has already been generated, simply // return it. if info.usage != "" { return info.usage, nil } // Generate and store the usage string for future calls and return it. usage := methodUsageText(rtp, info.defaults, method) registerLock.Lock() info.usage = usage methodToInfo[method] = info registerLock.Unlock() return usage, nil }
go
func MethodUsageText(method string) (string, error) { // Look up details about the provided method and error out if not // registered. registerLock.RLock() rtp, ok := methodToConcreteType[method] info := methodToInfo[method] registerLock.RUnlock() if !ok { str := fmt.Sprintf("%q is not registered", method) return "", makeError(ErrUnregisteredMethod, str) } // When the usage for this method has already been generated, simply // return it. if info.usage != "" { return info.usage, nil } // Generate and store the usage string for future calls and return it. usage := methodUsageText(rtp, info.defaults, method) registerLock.Lock() info.usage = usage methodToInfo[method] = info registerLock.Unlock() return usage, nil }
[ "func", "MethodUsageText", "(", "method", "string", ")", "(", "string", ",", "error", ")", "{", "// Look up details about the provided method and error out if not", "// registered.", "registerLock", ".", "RLock", "(", ")", "\n", "rtp", ",", "ok", ":=", "methodToConcreteType", "[", "method", "]", "\n", "info", ":=", "methodToInfo", "[", "method", "]", "\n", "registerLock", ".", "RUnlock", "(", ")", "\n", "if", "!", "ok", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "method", ")", "\n", "return", "\"", "\"", ",", "makeError", "(", "ErrUnregisteredMethod", ",", "str", ")", "\n", "}", "\n\n", "// When the usage for this method has already been generated, simply", "// return it.", "if", "info", ".", "usage", "!=", "\"", "\"", "{", "return", "info", ".", "usage", ",", "nil", "\n", "}", "\n\n", "// Generate and store the usage string for future calls and return it.", "usage", ":=", "methodUsageText", "(", "rtp", ",", "info", ".", "defaults", ",", "method", ")", "\n", "registerLock", ".", "Lock", "(", ")", "\n", "info", ".", "usage", "=", "usage", "\n", "methodToInfo", "[", "method", "]", "=", "info", "\n", "registerLock", ".", "Unlock", "(", ")", "\n", "return", "usage", ",", "nil", "\n", "}" ]
// MethodUsageText returns a one-line usage string for the provided method. The // provided method must be associated with a registered type. All commands // provided by this package are registered by default.
[ "MethodUsageText", "returns", "a", "one", "-", "line", "usage", "string", "for", "the", "provided", "method", ".", "The", "provided", "method", "must", "be", "associated", "with", "a", "registered", "type", ".", "All", "commands", "provided", "by", "this", "package", "are", "registered", "by", "default", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/btcjson/cmdinfo.go#L224-L249
train
btcsuite/btcd
btcjson/error.go
makeError
func makeError(c ErrorCode, desc string) Error { return Error{ErrorCode: c, Description: desc} }
go
func makeError(c ErrorCode, desc string) Error { return Error{ErrorCode: c, Description: desc} }
[ "func", "makeError", "(", "c", "ErrorCode", ",", "desc", "string", ")", "Error", "{", "return", "Error", "{", "ErrorCode", ":", "c", ",", "Description", ":", "desc", "}", "\n", "}" ]
// makeError creates an Error given a set of arguments.
[ "makeError", "creates", "an", "Error", "given", "a", "set", "of", "arguments", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/btcjson/error.go#L109-L111
train
btcsuite/btcd
blockchain/indexers/cfindex.go
dbFetchFilterIdxEntry
func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Get(h[:]), nil }
go
func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Get(h[:]), nil }
[ "func", "dbFetchFilterIdxEntry", "(", "dbTx", "database", ".", "Tx", ",", "key", "[", "]", "byte", ",", "h", "*", "chainhash", ".", "Hash", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "idx", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "cfIndexParentBucketKey", ")", ".", "Bucket", "(", "key", ")", "\n", "return", "idx", ".", "Get", "(", "h", "[", ":", "]", ")", ",", "nil", "\n", "}" ]
// dbFetchFilterIdxEntry retrieves a data blob from the filter index database. // An entry's absence is not considered an error.
[ "dbFetchFilterIdxEntry", "retrieves", "a", "data", "blob", "from", "the", "filter", "index", "database", ".", "An", "entry", "s", "absence", "is", "not", "considered", "an", "error", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L60-L63
train
btcsuite/btcd
blockchain/indexers/cfindex.go
dbStoreFilterIdxEntry
func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Put(h[:], f) }
go
func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Put(h[:], f) }
[ "func", "dbStoreFilterIdxEntry", "(", "dbTx", "database", ".", "Tx", ",", "key", "[", "]", "byte", ",", "h", "*", "chainhash", ".", "Hash", ",", "f", "[", "]", "byte", ")", "error", "{", "idx", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "cfIndexParentBucketKey", ")", ".", "Bucket", "(", "key", ")", "\n", "return", "idx", ".", "Put", "(", "h", "[", ":", "]", ",", "f", ")", "\n", "}" ]
// dbStoreFilterIdxEntry stores a data blob in the filter index database.
[ "dbStoreFilterIdxEntry", "stores", "a", "data", "blob", "in", "the", "filter", "index", "database", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L66-L69
train
btcsuite/btcd
blockchain/indexers/cfindex.go
dbDeleteFilterIdxEntry
func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) error { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Delete(h[:]) }
go
func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) error { idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key) return idx.Delete(h[:]) }
[ "func", "dbDeleteFilterIdxEntry", "(", "dbTx", "database", ".", "Tx", ",", "key", "[", "]", "byte", ",", "h", "*", "chainhash", ".", "Hash", ")", "error", "{", "idx", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "cfIndexParentBucketKey", ")", ".", "Bucket", "(", "key", ")", "\n", "return", "idx", ".", "Delete", "(", "h", "[", ":", "]", ")", "\n", "}" ]
// dbDeleteFilterIdxEntry deletes a data blob from the filter index database.
[ "dbDeleteFilterIdxEntry", "deletes", "a", "data", "blob", "from", "the", "filter", "index", "database", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L72-L75
train
btcsuite/btcd
blockchain/indexers/cfindex.go
storeFilter
func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter, filterType wire.FilterType) error { if uint8(filterType) > maxFilterType { return errors.New("unsupported filter type") } // Figure out which buckets to use. fkey := cfIndexKeys[filterType] hkey := cfHeaderKeys[filterType] hashkey := cfHashKeys[filterType] // Start by storing the filter. h := block.Hash() filterBytes, err := f.NBytes() if err != nil { return err } err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes) if err != nil { return err } // Next store the filter hash. filterHash, err := builder.GetFilterHash(f) if err != nil { return err } err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:]) if err != nil { return err } // Then fetch the previous block's filter header. var prevHeader *chainhash.Hash ph := &block.MsgBlock().Header.PrevBlock if ph.IsEqual(&zeroHash) { prevHeader = &zeroHash } else { pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph) if err != nil { return err } // Construct the new block's filter header, and store it. prevHeader, err = chainhash.NewHash(pfh) if err != nil { return err } } fh, err := builder.MakeHeaderForFilter(f, *prevHeader) if err != nil { return err } return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:]) }
go
func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter, filterType wire.FilterType) error { if uint8(filterType) > maxFilterType { return errors.New("unsupported filter type") } // Figure out which buckets to use. fkey := cfIndexKeys[filterType] hkey := cfHeaderKeys[filterType] hashkey := cfHashKeys[filterType] // Start by storing the filter. h := block.Hash() filterBytes, err := f.NBytes() if err != nil { return err } err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes) if err != nil { return err } // Next store the filter hash. filterHash, err := builder.GetFilterHash(f) if err != nil { return err } err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:]) if err != nil { return err } // Then fetch the previous block's filter header. var prevHeader *chainhash.Hash ph := &block.MsgBlock().Header.PrevBlock if ph.IsEqual(&zeroHash) { prevHeader = &zeroHash } else { pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph) if err != nil { return err } // Construct the new block's filter header, and store it. prevHeader, err = chainhash.NewHash(pfh) if err != nil { return err } } fh, err := builder.MakeHeaderForFilter(f, *prevHeader) if err != nil { return err } return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:]) }
[ "func", "storeFilter", "(", "dbTx", "database", ".", "Tx", ",", "block", "*", "btcutil", ".", "Block", ",", "f", "*", "gcs", ".", "Filter", ",", "filterType", "wire", ".", "FilterType", ")", "error", "{", "if", "uint8", "(", "filterType", ")", ">", "maxFilterType", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n\n", "// Figure out which buckets to use.", "fkey", ":=", "cfIndexKeys", "[", "filterType", "]", "\n", "hkey", ":=", "cfHeaderKeys", "[", "filterType", "]", "\n", "hashkey", ":=", "cfHashKeys", "[", "filterType", "]", "\n\n", "// Start by storing the filter.", "h", ":=", "block", ".", "Hash", "(", ")", "\n", "filterBytes", ",", "err", ":=", "f", ".", "NBytes", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "err", "=", "dbStoreFilterIdxEntry", "(", "dbTx", ",", "fkey", ",", "h", ",", "filterBytes", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Next store the filter hash.", "filterHash", ",", "err", ":=", "builder", ".", "GetFilterHash", "(", "f", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "err", "=", "dbStoreFilterIdxEntry", "(", "dbTx", ",", "hashkey", ",", "h", ",", "filterHash", "[", ":", "]", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Then fetch the previous block's filter header.", "var", "prevHeader", "*", "chainhash", ".", "Hash", "\n", "ph", ":=", "&", "block", ".", "MsgBlock", "(", ")", ".", "Header", ".", "PrevBlock", "\n", "if", "ph", ".", "IsEqual", "(", "&", "zeroHash", ")", "{", "prevHeader", "=", "&", "zeroHash", "\n", "}", "else", "{", "pfh", ",", "err", ":=", "dbFetchFilterIdxEntry", "(", "dbTx", ",", "hkey", ",", "ph", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Construct the new block's filter header, and store it.", "prevHeader", ",", "err", "=", "chainhash", ".", "NewHash", "(", "pfh", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n\n", "fh", ",", "err", ":=", "builder", ".", "MakeHeaderForFilter", "(", "f", ",", "*", "prevHeader", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "return", "dbStoreFilterIdxEntry", "(", "dbTx", ",", "hkey", ",", "h", ",", "fh", "[", ":", "]", ")", "\n", "}" ]
// storeFilter stores a given filter, and performs the steps needed to // generate the filter's header.
[ "storeFilter", "stores", "a", "given", "filter", "and", "performs", "the", "steps", "needed", "to", "generate", "the", "filter", "s", "header", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L152-L207
train
btcsuite/btcd
blockchain/indexers/cfindex.go
ConnectBlock
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, stxos []blockchain.SpentTxOut) error { prevScripts := make([][]byte, len(stxos)) for i, stxo := range stxos { prevScripts[i] = stxo.PkScript } f, err := builder.BuildBasicFilter(block.MsgBlock(), prevScripts) if err != nil { return err } return storeFilter(dbTx, block, f, wire.GCSFilterRegular) }
go
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, stxos []blockchain.SpentTxOut) error { prevScripts := make([][]byte, len(stxos)) for i, stxo := range stxos { prevScripts[i] = stxo.PkScript } f, err := builder.BuildBasicFilter(block.MsgBlock(), prevScripts) if err != nil { return err } return storeFilter(dbTx, block, f, wire.GCSFilterRegular) }
[ "func", "(", "idx", "*", "CfIndex", ")", "ConnectBlock", "(", "dbTx", "database", ".", "Tx", ",", "block", "*", "btcutil", ".", "Block", ",", "stxos", "[", "]", "blockchain", ".", "SpentTxOut", ")", "error", "{", "prevScripts", ":=", "make", "(", "[", "]", "[", "]", "byte", ",", "len", "(", "stxos", ")", ")", "\n", "for", "i", ",", "stxo", ":=", "range", "stxos", "{", "prevScripts", "[", "i", "]", "=", "stxo", ".", "PkScript", "\n", "}", "\n\n", "f", ",", "err", ":=", "builder", ".", "BuildBasicFilter", "(", "block", ".", "MsgBlock", "(", ")", ",", "prevScripts", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "return", "storeFilter", "(", "dbTx", ",", "block", ",", "f", ",", "wire", ".", "GCSFilterRegular", ")", "\n", "}" ]
// ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. This indexer adds a hash-to-cf mapping for // every passed block. This is part of the Indexer interface.
[ "ConnectBlock", "is", "invoked", "by", "the", "index", "manager", "when", "a", "new", "block", "has", "been", "connected", "to", "the", "main", "chain", ".", "This", "indexer", "adds", "a", "hash", "-", "to", "-", "cf", "mapping", "for", "every", "passed", "block", ".", "This", "is", "part", "of", "the", "Indexer", "interface", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L212-L226
train
btcsuite/btcd
blockchain/indexers/cfindex.go
DisconnectBlock
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, _ []blockchain.SpentTxOut) error { for _, key := range cfIndexKeys { err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash()) if err != nil { return err } } for _, key := range cfHeaderKeys { err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash()) if err != nil { return err } } for _, key := range cfHashKeys { err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash()) if err != nil { return err } } return nil }
go
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, _ []blockchain.SpentTxOut) error { for _, key := range cfIndexKeys { err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash()) if err != nil { return err } } for _, key := range cfHeaderKeys { err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash()) if err != nil { return err } } for _, key := range cfHashKeys { err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash()) if err != nil { return err } } return nil }
[ "func", "(", "idx", "*", "CfIndex", ")", "DisconnectBlock", "(", "dbTx", "database", ".", "Tx", ",", "block", "*", "btcutil", ".", "Block", ",", "_", "[", "]", "blockchain", ".", "SpentTxOut", ")", "error", "{", "for", "_", ",", "key", ":=", "range", "cfIndexKeys", "{", "err", ":=", "dbDeleteFilterIdxEntry", "(", "dbTx", ",", "key", ",", "block", ".", "Hash", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n\n", "for", "_", ",", "key", ":=", "range", "cfHeaderKeys", "{", "err", ":=", "dbDeleteFilterIdxEntry", "(", "dbTx", ",", "key", ",", "block", ".", "Hash", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n\n", "for", "_", ",", "key", ":=", "range", "cfHashKeys", "{", "err", ":=", "dbDeleteFilterIdxEntry", "(", "dbTx", ",", "key", ",", "block", ".", "Hash", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// DisconnectBlock is invoked by the index manager when a block has been // disconnected from the main chain. This indexer removes the hash-to-cf // mapping for every passed block. This is part of the Indexer interface.
[ "DisconnectBlock", "is", "invoked", "by", "the", "index", "manager", "when", "a", "block", "has", "been", "disconnected", "from", "the", "main", "chain", ".", "This", "indexer", "removes", "the", "hash", "-", "to", "-", "cf", "mapping", "for", "every", "passed", "block", ".", "This", "is", "part", "of", "the", "Indexer", "interface", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L231-L256
train
btcsuite/btcd
blockchain/indexers/cfindex.go
FilterByBlockHash
func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfIndexKeys, filterType, h) }
go
func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfIndexKeys, filterType, h) }
[ "func", "(", "idx", "*", "CfIndex", ")", "FilterByBlockHash", "(", "h", "*", "chainhash", ".", "Hash", ",", "filterType", "wire", ".", "FilterType", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "return", "idx", ".", "entryByBlockHash", "(", "cfIndexKeys", ",", "filterType", ",", "h", ")", "\n", "}" ]
// FilterByBlockHash returns the serialized contents of a block's basic or // committed filter.
[ "FilterByBlockHash", "returns", "the", "serialized", "contents", "of", "a", "block", "s", "basic", "or", "committed", "filter", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L303-L306
train
btcsuite/btcd
blockchain/indexers/cfindex.go
FiltersByBlockHashes
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*chainhash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes) }
go
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*chainhash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes) }
[ "func", "(", "idx", "*", "CfIndex", ")", "FiltersByBlockHashes", "(", "blockHashes", "[", "]", "*", "chainhash", ".", "Hash", ",", "filterType", "wire", ".", "FilterType", ")", "(", "[", "]", "[", "]", "byte", ",", "error", ")", "{", "return", "idx", ".", "entriesByBlockHashes", "(", "cfIndexKeys", ",", "filterType", ",", "blockHashes", ")", "\n", "}" ]
// FiltersByBlockHashes returns the serialized contents of a block's basic or // committed filter for a set of blocks by hash.
[ "FiltersByBlockHashes", "returns", "the", "serialized", "contents", "of", "a", "block", "s", "basic", "or", "committed", "filter", "for", "a", "set", "of", "blocks", "by", "hash", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L310-L313
train
btcsuite/btcd
blockchain/indexers/cfindex.go
FilterHeaderByBlockHash
func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfHeaderKeys, filterType, h) }
go
func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfHeaderKeys, filterType, h) }
[ "func", "(", "idx", "*", "CfIndex", ")", "FilterHeaderByBlockHash", "(", "h", "*", "chainhash", ".", "Hash", ",", "filterType", "wire", ".", "FilterType", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "return", "idx", ".", "entryByBlockHash", "(", "cfHeaderKeys", ",", "filterType", ",", "h", ")", "\n", "}" ]
// FilterHeaderByBlockHash returns the serialized contents of a block's basic // committed filter header.
[ "FilterHeaderByBlockHash", "returns", "the", "serialized", "contents", "of", "a", "block", "s", "basic", "committed", "filter", "header", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L317-L320
train
btcsuite/btcd
blockchain/indexers/cfindex.go
FilterHeadersByBlockHashes
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*chainhash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes) }
go
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*chainhash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes) }
[ "func", "(", "idx", "*", "CfIndex", ")", "FilterHeadersByBlockHashes", "(", "blockHashes", "[", "]", "*", "chainhash", ".", "Hash", ",", "filterType", "wire", ".", "FilterType", ")", "(", "[", "]", "[", "]", "byte", ",", "error", ")", "{", "return", "idx", ".", "entriesByBlockHashes", "(", "cfHeaderKeys", ",", "filterType", ",", "blockHashes", ")", "\n", "}" ]
// FilterHeadersByBlockHashes returns the serialized contents of a block's // basic committed filter header for a set of blocks by hash.
[ "FilterHeadersByBlockHashes", "returns", "the", "serialized", "contents", "of", "a", "block", "s", "basic", "committed", "filter", "header", "for", "a", "set", "of", "blocks", "by", "hash", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L324-L327
train
btcsuite/btcd
blockchain/indexers/cfindex.go
FilterHashByBlockHash
func (idx *CfIndex) FilterHashByBlockHash(h *chainhash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfHashKeys, filterType, h) }
go
func (idx *CfIndex) FilterHashByBlockHash(h *chainhash.Hash, filterType wire.FilterType) ([]byte, error) { return idx.entryByBlockHash(cfHashKeys, filterType, h) }
[ "func", "(", "idx", "*", "CfIndex", ")", "FilterHashByBlockHash", "(", "h", "*", "chainhash", ".", "Hash", ",", "filterType", "wire", ".", "FilterType", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "return", "idx", ".", "entryByBlockHash", "(", "cfHashKeys", ",", "filterType", ",", "h", ")", "\n", "}" ]
// FilterHashByBlockHash returns the serialized contents of a block's basic // committed filter hash.
[ "FilterHashByBlockHash", "returns", "the", "serialized", "contents", "of", "a", "block", "s", "basic", "committed", "filter", "hash", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L331-L334
train
btcsuite/btcd
blockchain/indexers/cfindex.go
FilterHashesByBlockHashes
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*chainhash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes) }
go
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*chainhash.Hash, filterType wire.FilterType) ([][]byte, error) { return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes) }
[ "func", "(", "idx", "*", "CfIndex", ")", "FilterHashesByBlockHashes", "(", "blockHashes", "[", "]", "*", "chainhash", ".", "Hash", ",", "filterType", "wire", ".", "FilterType", ")", "(", "[", "]", "[", "]", "byte", ",", "error", ")", "{", "return", "idx", ".", "entriesByBlockHashes", "(", "cfHashKeys", ",", "filterType", ",", "blockHashes", ")", "\n", "}" ]
// FilterHashesByBlockHashes returns the serialized contents of a block's basic // committed filter hash for a set of blocks by hash.
[ "FilterHashesByBlockHashes", "returns", "the", "serialized", "contents", "of", "a", "block", "s", "basic", "committed", "filter", "hash", "for", "a", "set", "of", "blocks", "by", "hash", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L338-L341
train
btcsuite/btcd
blockchain/indexers/cfindex.go
NewCfIndex
func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex { return &CfIndex{db: db, chainParams: chainParams} }
go
func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex { return &CfIndex{db: db, chainParams: chainParams} }
[ "func", "NewCfIndex", "(", "db", "database", ".", "DB", ",", "chainParams", "*", "chaincfg", ".", "Params", ")", "*", "CfIndex", "{", "return", "&", "CfIndex", "{", "db", ":", "db", ",", "chainParams", ":", "chainParams", "}", "\n", "}" ]
// NewCfIndex returns a new instance of an indexer that is used to create a // mapping of the hashes of all blocks in the blockchain to their respective // committed filters. // // It implements the Indexer interface which plugs into the IndexManager that // in turn is used by the blockchain package. This allows the index to be // seamlessly maintained along with the chain.
[ "NewCfIndex", "returns", "a", "new", "instance", "of", "an", "indexer", "that", "is", "used", "to", "create", "a", "mapping", "of", "the", "hashes", "of", "all", "blocks", "in", "the", "blockchain", "to", "their", "respective", "committed", "filters", ".", "It", "implements", "the", "Indexer", "interface", "which", "plugs", "into", "the", "IndexManager", "that", "in", "turn", "is", "used", "by", "the", "blockchain", "package", ".", "This", "allows", "the", "index", "to", "be", "seamlessly", "maintained", "along", "with", "the", "chain", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L350-L352
train
btcsuite/btcd
blockchain/indexers/cfindex.go
DropCfIndex
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error { return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt) }
go
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error { return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt) }
[ "func", "DropCfIndex", "(", "db", "database", ".", "DB", ",", "interrupt", "<-", "chan", "struct", "{", "}", ")", "error", "{", "return", "dropIndex", "(", "db", ",", "cfIndexParentBucketKey", ",", "cfIndexName", ",", "interrupt", ")", "\n", "}" ]
// DropCfIndex drops the CF index from the provided database if exists.
[ "DropCfIndex", "drops", "the", "CF", "index", "from", "the", "provided", "database", "if", "exists", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/indexers/cfindex.go#L355-L357
train
btcsuite/btcd
blockchain/chainio.go
isDbBucketNotFoundErr
func isDbBucketNotFoundErr(err error) bool { dbErr, ok := err.(database.Error) return ok && dbErr.ErrorCode == database.ErrBucketNotFound }
go
func isDbBucketNotFoundErr(err error) bool { dbErr, ok := err.(database.Error) return ok && dbErr.ErrorCode == database.ErrBucketNotFound }
[ "func", "isDbBucketNotFoundErr", "(", "err", "error", ")", "bool", "{", "dbErr", ",", "ok", ":=", "err", ".", "(", "database", ".", "Error", ")", "\n", "return", "ok", "&&", "dbErr", ".", "ErrorCode", "==", "database", ".", "ErrBucketNotFound", "\n", "}" ]
// isDbBucketNotFoundErr returns whether or not the passed error is a // database.Error with an error code of database.ErrBucketNotFound.
[ "isDbBucketNotFoundErr", "returns", "whether", "or", "not", "the", "passed", "error", "is", "a", "database", ".", "Error", "with", "an", "error", "code", "of", "database", ".", "ErrBucketNotFound", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L109-L112
train
btcsuite/btcd
blockchain/chainio.go
dbFetchVersion
func dbFetchVersion(dbTx database.Tx, key []byte) uint32 { serialized := dbTx.Metadata().Get(key) if serialized == nil { return 0 } return byteOrder.Uint32(serialized[:]) }
go
func dbFetchVersion(dbTx database.Tx, key []byte) uint32 { serialized := dbTx.Metadata().Get(key) if serialized == nil { return 0 } return byteOrder.Uint32(serialized[:]) }
[ "func", "dbFetchVersion", "(", "dbTx", "database", ".", "Tx", ",", "key", "[", "]", "byte", ")", "uint32", "{", "serialized", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Get", "(", "key", ")", "\n", "if", "serialized", "==", "nil", "{", "return", "0", "\n", "}", "\n\n", "return", "byteOrder", ".", "Uint32", "(", "serialized", "[", ":", "]", ")", "\n", "}" ]
// dbFetchVersion fetches an individual version with the given key from the // metadata bucket. It is primarily used to track versions on entities such as // buckets. It returns zero if the provided key does not exist.
[ "dbFetchVersion", "fetches", "an", "individual", "version", "with", "the", "given", "key", "from", "the", "metadata", "bucket", ".", "It", "is", "primarily", "used", "to", "track", "versions", "on", "entities", "such", "as", "buckets", ".", "It", "returns", "zero", "if", "the", "provided", "key", "does", "not", "exist", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L117-L124
train
btcsuite/btcd
blockchain/chainio.go
dbPutVersion
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error { var serialized [4]byte byteOrder.PutUint32(serialized[:], version) return dbTx.Metadata().Put(key, serialized[:]) }
go
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error { var serialized [4]byte byteOrder.PutUint32(serialized[:], version) return dbTx.Metadata().Put(key, serialized[:]) }
[ "func", "dbPutVersion", "(", "dbTx", "database", ".", "Tx", ",", "key", "[", "]", "byte", ",", "version", "uint32", ")", "error", "{", "var", "serialized", "[", "4", "]", "byte", "\n", "byteOrder", ".", "PutUint32", "(", "serialized", "[", ":", "]", ",", "version", ")", "\n", "return", "dbTx", ".", "Metadata", "(", ")", ".", "Put", "(", "key", ",", "serialized", "[", ":", "]", ")", "\n", "}" ]
// dbPutVersion uses an existing database transaction to update the provided // key in the metadata bucket to the given version. It is primarily used to // track versions on entities such as buckets.
[ "dbPutVersion", "uses", "an", "existing", "database", "transaction", "to", "update", "the", "provided", "key", "in", "the", "metadata", "bucket", "to", "the", "given", "version", ".", "It", "is", "primarily", "used", "to", "track", "versions", "on", "entities", "such", "as", "buckets", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L129-L133
train
btcsuite/btcd
blockchain/chainio.go
dbFetchOrCreateVersion
func dbFetchOrCreateVersion(dbTx database.Tx, key []byte, defaultVersion uint32) (uint32, error) { version := dbFetchVersion(dbTx, key) if version == 0 { version = defaultVersion err := dbPutVersion(dbTx, key, version) if err != nil { return 0, err } } return version, nil }
go
func dbFetchOrCreateVersion(dbTx database.Tx, key []byte, defaultVersion uint32) (uint32, error) { version := dbFetchVersion(dbTx, key) if version == 0 { version = defaultVersion err := dbPutVersion(dbTx, key, version) if err != nil { return 0, err } } return version, nil }
[ "func", "dbFetchOrCreateVersion", "(", "dbTx", "database", ".", "Tx", ",", "key", "[", "]", "byte", ",", "defaultVersion", "uint32", ")", "(", "uint32", ",", "error", ")", "{", "version", ":=", "dbFetchVersion", "(", "dbTx", ",", "key", ")", "\n", "if", "version", "==", "0", "{", "version", "=", "defaultVersion", "\n", "err", ":=", "dbPutVersion", "(", "dbTx", ",", "key", ",", "version", ")", "\n", "if", "err", "!=", "nil", "{", "return", "0", ",", "err", "\n", "}", "\n", "}", "\n\n", "return", "version", ",", "nil", "\n", "}" ]
// dbFetchOrCreateVersion uses an existing database transaction to attempt to // fetch the provided key from the metadata bucket as a version and in the case // it doesn't exist, it adds the entry with the provided default version and // returns that. This is useful during upgrades to automatically handle loading // and adding version keys as necessary.
[ "dbFetchOrCreateVersion", "uses", "an", "existing", "database", "transaction", "to", "attempt", "to", "fetch", "the", "provided", "key", "from", "the", "metadata", "bucket", "as", "a", "version", "and", "in", "the", "case", "it", "doesn", "t", "exist", "it", "adds", "the", "entry", "with", "the", "provided", "default", "version", "and", "returns", "that", ".", "This", "is", "useful", "during", "upgrades", "to", "automatically", "handle", "loading", "and", "adding", "version", "keys", "as", "necessary", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L140-L151
train
btcsuite/btcd
blockchain/chainio.go
FetchSpendJournal
func (b *BlockChain) FetchSpendJournal(targetBlock *btcutil.Block) ([]SpentTxOut, error) { b.chainLock.RLock() defer b.chainLock.RUnlock() var spendEntries []SpentTxOut err := b.db.View(func(dbTx database.Tx) error { var err error spendEntries, err = dbFetchSpendJournalEntry(dbTx, targetBlock) return err }) if err != nil { return nil, err } return spendEntries, nil }
go
func (b *BlockChain) FetchSpendJournal(targetBlock *btcutil.Block) ([]SpentTxOut, error) { b.chainLock.RLock() defer b.chainLock.RUnlock() var spendEntries []SpentTxOut err := b.db.View(func(dbTx database.Tx) error { var err error spendEntries, err = dbFetchSpendJournalEntry(dbTx, targetBlock) return err }) if err != nil { return nil, err } return spendEntries, nil }
[ "func", "(", "b", "*", "BlockChain", ")", "FetchSpendJournal", "(", "targetBlock", "*", "btcutil", ".", "Block", ")", "(", "[", "]", "SpentTxOut", ",", "error", ")", "{", "b", ".", "chainLock", ".", "RLock", "(", ")", "\n", "defer", "b", ".", "chainLock", ".", "RUnlock", "(", ")", "\n\n", "var", "spendEntries", "[", "]", "SpentTxOut", "\n", "err", ":=", "b", ".", "db", ".", "View", "(", "func", "(", "dbTx", "database", ".", "Tx", ")", "error", "{", "var", "err", "error", "\n\n", "spendEntries", ",", "err", "=", "dbFetchSpendJournalEntry", "(", "dbTx", ",", "targetBlock", ")", "\n", "return", "err", "\n", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "return", "spendEntries", ",", "nil", "\n", "}" ]
// FetchSpendJournal attempts to retrieve the spend journal, or the set of // outputs spent for the target block. This provides a view of all the outputs // that will be consumed once the target block is connected to the end of the // main chain. // // This function is safe for concurrent access.
[ "FetchSpendJournal", "attempts", "to", "retrieve", "the", "spend", "journal", "or", "the", "set", "of", "outputs", "spent", "for", "the", "target", "block", ".", "This", "provides", "a", "view", "of", "all", "the", "outputs", "that", "will", "be", "consumed", "once", "the", "target", "block", "is", "connected", "to", "the", "end", "of", "the", "main", "chain", ".", "This", "function", "is", "safe", "for", "concurrent", "access", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L262-L278
train
btcsuite/btcd
blockchain/chainio.go
spentTxOutHeaderCode
func spentTxOutHeaderCode(stxo *SpentTxOut) uint64 { // As described in the serialization format comments, the header code // encodes the height shifted over one bit and the coinbase flag in the // lowest bit. headerCode := uint64(stxo.Height) << 1 if stxo.IsCoinBase { headerCode |= 0x01 } return headerCode }
go
func spentTxOutHeaderCode(stxo *SpentTxOut) uint64 { // As described in the serialization format comments, the header code // encodes the height shifted over one bit and the coinbase flag in the // lowest bit. headerCode := uint64(stxo.Height) << 1 if stxo.IsCoinBase { headerCode |= 0x01 } return headerCode }
[ "func", "spentTxOutHeaderCode", "(", "stxo", "*", "SpentTxOut", ")", "uint64", "{", "// As described in the serialization format comments, the header code", "// encodes the height shifted over one bit and the coinbase flag in the", "// lowest bit.", "headerCode", ":=", "uint64", "(", "stxo", ".", "Height", ")", "<<", "1", "\n", "if", "stxo", ".", "IsCoinBase", "{", "headerCode", "|=", "0x01", "\n", "}", "\n\n", "return", "headerCode", "\n", "}" ]
// spentTxOutHeaderCode returns the calculated header code to be used when // serializing the provided stxo entry.
[ "spentTxOutHeaderCode", "returns", "the", "calculated", "header", "code", "to", "be", "used", "when", "serializing", "the", "provided", "stxo", "entry", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L282-L292
train
btcsuite/btcd
blockchain/chainio.go
spentTxOutSerializeSize
func spentTxOutSerializeSize(stxo *SpentTxOut) int { size := serializeSizeVLQ(spentTxOutHeaderCode(stxo)) if stxo.Height > 0 { // The legacy v1 spend journal format conditionally tracked the // containing transaction version when the height was non-zero, // so this is required for backwards compat. size += serializeSizeVLQ(0) } return size + compressedTxOutSize(uint64(stxo.Amount), stxo.PkScript) }
go
func spentTxOutSerializeSize(stxo *SpentTxOut) int { size := serializeSizeVLQ(spentTxOutHeaderCode(stxo)) if stxo.Height > 0 { // The legacy v1 spend journal format conditionally tracked the // containing transaction version when the height was non-zero, // so this is required for backwards compat. size += serializeSizeVLQ(0) } return size + compressedTxOutSize(uint64(stxo.Amount), stxo.PkScript) }
[ "func", "spentTxOutSerializeSize", "(", "stxo", "*", "SpentTxOut", ")", "int", "{", "size", ":=", "serializeSizeVLQ", "(", "spentTxOutHeaderCode", "(", "stxo", ")", ")", "\n", "if", "stxo", ".", "Height", ">", "0", "{", "// The legacy v1 spend journal format conditionally tracked the", "// containing transaction version when the height was non-zero,", "// so this is required for backwards compat.", "size", "+=", "serializeSizeVLQ", "(", "0", ")", "\n", "}", "\n", "return", "size", "+", "compressedTxOutSize", "(", "uint64", "(", "stxo", ".", "Amount", ")", ",", "stxo", ".", "PkScript", ")", "\n", "}" ]
// spentTxOutSerializeSize returns the number of bytes it would take to // serialize the passed stxo according to the format described above.
[ "spentTxOutSerializeSize", "returns", "the", "number", "of", "bytes", "it", "would", "take", "to", "serialize", "the", "passed", "stxo", "according", "to", "the", "format", "described", "above", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L296-L305
train
btcsuite/btcd
blockchain/chainio.go
putSpentTxOut
func putSpentTxOut(target []byte, stxo *SpentTxOut) int { headerCode := spentTxOutHeaderCode(stxo) offset := putVLQ(target, headerCode) if stxo.Height > 0 { // The legacy v1 spend journal format conditionally tracked the // containing transaction version when the height was non-zero, // so this is required for backwards compat. offset += putVLQ(target[offset:], 0) } return offset + putCompressedTxOut(target[offset:], uint64(stxo.Amount), stxo.PkScript) }
go
func putSpentTxOut(target []byte, stxo *SpentTxOut) int { headerCode := spentTxOutHeaderCode(stxo) offset := putVLQ(target, headerCode) if stxo.Height > 0 { // The legacy v1 spend journal format conditionally tracked the // containing transaction version when the height was non-zero, // so this is required for backwards compat. offset += putVLQ(target[offset:], 0) } return offset + putCompressedTxOut(target[offset:], uint64(stxo.Amount), stxo.PkScript) }
[ "func", "putSpentTxOut", "(", "target", "[", "]", "byte", ",", "stxo", "*", "SpentTxOut", ")", "int", "{", "headerCode", ":=", "spentTxOutHeaderCode", "(", "stxo", ")", "\n", "offset", ":=", "putVLQ", "(", "target", ",", "headerCode", ")", "\n", "if", "stxo", ".", "Height", ">", "0", "{", "// The legacy v1 spend journal format conditionally tracked the", "// containing transaction version when the height was non-zero,", "// so this is required for backwards compat.", "offset", "+=", "putVLQ", "(", "target", "[", "offset", ":", "]", ",", "0", ")", "\n", "}", "\n", "return", "offset", "+", "putCompressedTxOut", "(", "target", "[", "offset", ":", "]", ",", "uint64", "(", "stxo", ".", "Amount", ")", ",", "stxo", ".", "PkScript", ")", "\n", "}" ]
// putSpentTxOut serializes the passed stxo according to the format described // above directly into the passed target byte slice. The target byte slice must // be at least large enough to handle the number of bytes returned by the // SpentTxOutSerializeSize function or it will panic.
[ "putSpentTxOut", "serializes", "the", "passed", "stxo", "according", "to", "the", "format", "described", "above", "directly", "into", "the", "passed", "target", "byte", "slice", ".", "The", "target", "byte", "slice", "must", "be", "at", "least", "large", "enough", "to", "handle", "the", "number", "of", "bytes", "returned", "by", "the", "SpentTxOutSerializeSize", "function", "or", "it", "will", "panic", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L311-L322
train
btcsuite/btcd
blockchain/chainio.go
decodeSpentTxOut
func decodeSpentTxOut(serialized []byte, stxo *SpentTxOut) (int, error) { // Ensure there are bytes to decode. if len(serialized) == 0 { return 0, errDeserialize("no serialized bytes") } // Deserialize the header code. code, offset := deserializeVLQ(serialized) if offset >= len(serialized) { return offset, errDeserialize("unexpected end of data after " + "header code") } // Decode the header code. // // Bit 0 indicates containing transaction is a coinbase. // Bits 1-x encode height of containing transaction. stxo.IsCoinBase = code&0x01 != 0 stxo.Height = int32(code >> 1) if stxo.Height > 0 { // The legacy v1 spend journal format conditionally tracked the // containing transaction version when the height was non-zero, // so this is required for backwards compat. _, bytesRead := deserializeVLQ(serialized[offset:]) offset += bytesRead if offset >= len(serialized) { return offset, errDeserialize("unexpected end of data " + "after reserved") } } // Decode the compressed txout. amount, pkScript, bytesRead, err := decodeCompressedTxOut( serialized[offset:]) offset += bytesRead if err != nil { return offset, errDeserialize(fmt.Sprintf("unable to decode "+ "txout: %v", err)) } stxo.Amount = int64(amount) stxo.PkScript = pkScript return offset, nil }
go
func decodeSpentTxOut(serialized []byte, stxo *SpentTxOut) (int, error) { // Ensure there are bytes to decode. if len(serialized) == 0 { return 0, errDeserialize("no serialized bytes") } // Deserialize the header code. code, offset := deserializeVLQ(serialized) if offset >= len(serialized) { return offset, errDeserialize("unexpected end of data after " + "header code") } // Decode the header code. // // Bit 0 indicates containing transaction is a coinbase. // Bits 1-x encode height of containing transaction. stxo.IsCoinBase = code&0x01 != 0 stxo.Height = int32(code >> 1) if stxo.Height > 0 { // The legacy v1 spend journal format conditionally tracked the // containing transaction version when the height was non-zero, // so this is required for backwards compat. _, bytesRead := deserializeVLQ(serialized[offset:]) offset += bytesRead if offset >= len(serialized) { return offset, errDeserialize("unexpected end of data " + "after reserved") } } // Decode the compressed txout. amount, pkScript, bytesRead, err := decodeCompressedTxOut( serialized[offset:]) offset += bytesRead if err != nil { return offset, errDeserialize(fmt.Sprintf("unable to decode "+ "txout: %v", err)) } stxo.Amount = int64(amount) stxo.PkScript = pkScript return offset, nil }
[ "func", "decodeSpentTxOut", "(", "serialized", "[", "]", "byte", ",", "stxo", "*", "SpentTxOut", ")", "(", "int", ",", "error", ")", "{", "// Ensure there are bytes to decode.", "if", "len", "(", "serialized", ")", "==", "0", "{", "return", "0", ",", "errDeserialize", "(", "\"", "\"", ")", "\n", "}", "\n\n", "// Deserialize the header code.", "code", ",", "offset", ":=", "deserializeVLQ", "(", "serialized", ")", "\n", "if", "offset", ">=", "len", "(", "serialized", ")", "{", "return", "offset", ",", "errDeserialize", "(", "\"", "\"", "+", "\"", "\"", ")", "\n", "}", "\n\n", "// Decode the header code.", "//", "// Bit 0 indicates containing transaction is a coinbase.", "// Bits 1-x encode height of containing transaction.", "stxo", ".", "IsCoinBase", "=", "code", "&", "0x01", "!=", "0", "\n", "stxo", ".", "Height", "=", "int32", "(", "code", ">>", "1", ")", "\n", "if", "stxo", ".", "Height", ">", "0", "{", "// The legacy v1 spend journal format conditionally tracked the", "// containing transaction version when the height was non-zero,", "// so this is required for backwards compat.", "_", ",", "bytesRead", ":=", "deserializeVLQ", "(", "serialized", "[", "offset", ":", "]", ")", "\n", "offset", "+=", "bytesRead", "\n", "if", "offset", ">=", "len", "(", "serialized", ")", "{", "return", "offset", ",", "errDeserialize", "(", "\"", "\"", "+", "\"", "\"", ")", "\n", "}", "\n", "}", "\n\n", "// Decode the compressed txout.", "amount", ",", "pkScript", ",", "bytesRead", ",", "err", ":=", "decodeCompressedTxOut", "(", "serialized", "[", "offset", ":", "]", ")", "\n", "offset", "+=", "bytesRead", "\n", "if", "err", "!=", "nil", "{", "return", "offset", ",", "errDeserialize", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", ",", "err", ")", ")", "\n", "}", "\n", "stxo", ".", "Amount", "=", "int64", "(", "amount", ")", "\n", "stxo", ".", "PkScript", "=", "pkScript", "\n", "return", "offset", ",", "nil", "\n", "}" ]
// decodeSpentTxOut decodes the passed serialized stxo entry, possibly followed // by other data, into the passed stxo struct. It returns the number of bytes // read.
[ "decodeSpentTxOut", "decodes", "the", "passed", "serialized", "stxo", "entry", "possibly", "followed", "by", "other", "data", "into", "the", "passed", "stxo", "struct", ".", "It", "returns", "the", "number", "of", "bytes", "read", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L327-L369
train
btcsuite/btcd
blockchain/chainio.go
deserializeSpendJournalEntry
func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]SpentTxOut, error) { // Calculate the total number of stxos. var numStxos int for _, tx := range txns { numStxos += len(tx.TxIn) } // When a block has no spent txouts there is nothing to serialize. if len(serialized) == 0 { // Ensure the block actually has no stxos. This should never // happen unless there is database corruption or an empty entry // erroneously made its way into the database. if numStxos != 0 { return nil, AssertError(fmt.Sprintf("mismatched spend "+ "journal serialization - no serialization for "+ "expected %d stxos", numStxos)) } return nil, nil } // Loop backwards through all transactions so everything is read in // reverse order to match the serialization order. stxoIdx := numStxos - 1 offset := 0 stxos := make([]SpentTxOut, numStxos) for txIdx := len(txns) - 1; txIdx > -1; txIdx-- { tx := txns[txIdx] // Loop backwards through all of the transaction inputs and read // the associated stxo. for txInIdx := len(tx.TxIn) - 1; txInIdx > -1; txInIdx-- { txIn := tx.TxIn[txInIdx] stxo := &stxos[stxoIdx] stxoIdx-- n, err := decodeSpentTxOut(serialized[offset:], stxo) offset += n if err != nil { return nil, errDeserialize(fmt.Sprintf("unable "+ "to decode stxo for %v: %v", txIn.PreviousOutPoint, err)) } } } return stxos, nil }
go
func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]SpentTxOut, error) { // Calculate the total number of stxos. var numStxos int for _, tx := range txns { numStxos += len(tx.TxIn) } // When a block has no spent txouts there is nothing to serialize. if len(serialized) == 0 { // Ensure the block actually has no stxos. This should never // happen unless there is database corruption or an empty entry // erroneously made its way into the database. if numStxos != 0 { return nil, AssertError(fmt.Sprintf("mismatched spend "+ "journal serialization - no serialization for "+ "expected %d stxos", numStxos)) } return nil, nil } // Loop backwards through all transactions so everything is read in // reverse order to match the serialization order. stxoIdx := numStxos - 1 offset := 0 stxos := make([]SpentTxOut, numStxos) for txIdx := len(txns) - 1; txIdx > -1; txIdx-- { tx := txns[txIdx] // Loop backwards through all of the transaction inputs and read // the associated stxo. for txInIdx := len(tx.TxIn) - 1; txInIdx > -1; txInIdx-- { txIn := tx.TxIn[txInIdx] stxo := &stxos[stxoIdx] stxoIdx-- n, err := decodeSpentTxOut(serialized[offset:], stxo) offset += n if err != nil { return nil, errDeserialize(fmt.Sprintf("unable "+ "to decode stxo for %v: %v", txIn.PreviousOutPoint, err)) } } } return stxos, nil }
[ "func", "deserializeSpendJournalEntry", "(", "serialized", "[", "]", "byte", ",", "txns", "[", "]", "*", "wire", ".", "MsgTx", ")", "(", "[", "]", "SpentTxOut", ",", "error", ")", "{", "// Calculate the total number of stxos.", "var", "numStxos", "int", "\n", "for", "_", ",", "tx", ":=", "range", "txns", "{", "numStxos", "+=", "len", "(", "tx", ".", "TxIn", ")", "\n", "}", "\n\n", "// When a block has no spent txouts there is nothing to serialize.", "if", "len", "(", "serialized", ")", "==", "0", "{", "// Ensure the block actually has no stxos. This should never", "// happen unless there is database corruption or an empty entry", "// erroneously made its way into the database.", "if", "numStxos", "!=", "0", "{", "return", "nil", ",", "AssertError", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", "+", "\"", "\"", ",", "numStxos", ")", ")", "\n", "}", "\n\n", "return", "nil", ",", "nil", "\n", "}", "\n\n", "// Loop backwards through all transactions so everything is read in", "// reverse order to match the serialization order.", "stxoIdx", ":=", "numStxos", "-", "1", "\n", "offset", ":=", "0", "\n", "stxos", ":=", "make", "(", "[", "]", "SpentTxOut", ",", "numStxos", ")", "\n", "for", "txIdx", ":=", "len", "(", "txns", ")", "-", "1", ";", "txIdx", ">", "-", "1", ";", "txIdx", "--", "{", "tx", ":=", "txns", "[", "txIdx", "]", "\n\n", "// Loop backwards through all of the transaction inputs and read", "// the associated stxo.", "for", "txInIdx", ":=", "len", "(", "tx", ".", "TxIn", ")", "-", "1", ";", "txInIdx", ">", "-", "1", ";", "txInIdx", "--", "{", "txIn", ":=", "tx", ".", "TxIn", "[", "txInIdx", "]", "\n", "stxo", ":=", "&", "stxos", "[", "stxoIdx", "]", "\n", "stxoIdx", "--", "\n\n", "n", ",", "err", ":=", "decodeSpentTxOut", "(", "serialized", "[", "offset", ":", "]", ",", "stxo", ")", "\n", "offset", "+=", "n", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "errDeserialize", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", ",", "txIn", ".", "PreviousOutPoint", ",", "err", ")", ")", "\n", "}", "\n", "}", "\n", "}", "\n\n", "return", "stxos", ",", "nil", "\n", "}" ]
// deserializeSpendJournalEntry decodes the passed serialized byte slice into a // slice of spent txouts according to the format described in detail above. // // Since the serialization format is not self describing, as noted in the // format comments, this function also requires the transactions that spend the // txouts.
[ "deserializeSpendJournalEntry", "decodes", "the", "passed", "serialized", "byte", "slice", "into", "a", "slice", "of", "spent", "txouts", "according", "to", "the", "format", "described", "in", "detail", "above", ".", "Since", "the", "serialization", "format", "is", "not", "self", "describing", "as", "noted", "in", "the", "format", "comments", "this", "function", "also", "requires", "the", "transactions", "that", "spend", "the", "txouts", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L377-L424
train
btcsuite/btcd
blockchain/chainio.go
serializeSpendJournalEntry
func serializeSpendJournalEntry(stxos []SpentTxOut) []byte { if len(stxos) == 0 { return nil } // Calculate the size needed to serialize the entire journal entry. var size int for i := range stxos { size += spentTxOutSerializeSize(&stxos[i]) } serialized := make([]byte, size) // Serialize each individual stxo directly into the slice in reverse // order one after the other. var offset int for i := len(stxos) - 1; i > -1; i-- { offset += putSpentTxOut(serialized[offset:], &stxos[i]) } return serialized }
go
func serializeSpendJournalEntry(stxos []SpentTxOut) []byte { if len(stxos) == 0 { return nil } // Calculate the size needed to serialize the entire journal entry. var size int for i := range stxos { size += spentTxOutSerializeSize(&stxos[i]) } serialized := make([]byte, size) // Serialize each individual stxo directly into the slice in reverse // order one after the other. var offset int for i := len(stxos) - 1; i > -1; i-- { offset += putSpentTxOut(serialized[offset:], &stxos[i]) } return serialized }
[ "func", "serializeSpendJournalEntry", "(", "stxos", "[", "]", "SpentTxOut", ")", "[", "]", "byte", "{", "if", "len", "(", "stxos", ")", "==", "0", "{", "return", "nil", "\n", "}", "\n\n", "// Calculate the size needed to serialize the entire journal entry.", "var", "size", "int", "\n", "for", "i", ":=", "range", "stxos", "{", "size", "+=", "spentTxOutSerializeSize", "(", "&", "stxos", "[", "i", "]", ")", "\n", "}", "\n", "serialized", ":=", "make", "(", "[", "]", "byte", ",", "size", ")", "\n\n", "// Serialize each individual stxo directly into the slice in reverse", "// order one after the other.", "var", "offset", "int", "\n", "for", "i", ":=", "len", "(", "stxos", ")", "-", "1", ";", "i", ">", "-", "1", ";", "i", "--", "{", "offset", "+=", "putSpentTxOut", "(", "serialized", "[", "offset", ":", "]", ",", "&", "stxos", "[", "i", "]", ")", "\n", "}", "\n\n", "return", "serialized", "\n", "}" ]
// serializeSpendJournalEntry serializes all of the passed spent txouts into a // single byte slice according to the format described in detail above.
[ "serializeSpendJournalEntry", "serializes", "all", "of", "the", "passed", "spent", "txouts", "into", "a", "single", "byte", "slice", "according", "to", "the", "format", "described", "in", "detail", "above", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L428-L448
train
btcsuite/btcd
blockchain/chainio.go
dbPutSpendJournalEntry
func dbPutSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash, stxos []SpentTxOut) error { spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) serialized := serializeSpendJournalEntry(stxos) return spendBucket.Put(blockHash[:], serialized) }
go
func dbPutSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash, stxos []SpentTxOut) error { spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) serialized := serializeSpendJournalEntry(stxos) return spendBucket.Put(blockHash[:], serialized) }
[ "func", "dbPutSpendJournalEntry", "(", "dbTx", "database", ".", "Tx", ",", "blockHash", "*", "chainhash", ".", "Hash", ",", "stxos", "[", "]", "SpentTxOut", ")", "error", "{", "spendBucket", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "spendJournalBucketName", ")", "\n", "serialized", ":=", "serializeSpendJournalEntry", "(", "stxos", ")", "\n", "return", "spendBucket", ".", "Put", "(", "blockHash", "[", ":", "]", ",", "serialized", ")", "\n", "}" ]
// dbPutSpendJournalEntry uses an existing database transaction to update the // spend journal entry for the given block hash using the provided slice of // spent txouts. The spent txouts slice must contain an entry for every txout // the transactions in the block spend in the order they are spent.
[ "dbPutSpendJournalEntry", "uses", "an", "existing", "database", "transaction", "to", "update", "the", "spend", "journal", "entry", "for", "the", "given", "block", "hash", "using", "the", "provided", "slice", "of", "spent", "txouts", ".", "The", "spent", "txouts", "slice", "must", "contain", "an", "entry", "for", "every", "txout", "the", "transactions", "in", "the", "block", "spend", "in", "the", "order", "they", "are", "spent", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L484-L488
train
btcsuite/btcd
blockchain/chainio.go
dbRemoveSpendJournalEntry
func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) error { spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) return spendBucket.Delete(blockHash[:]) }
go
func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) error { spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) return spendBucket.Delete(blockHash[:]) }
[ "func", "dbRemoveSpendJournalEntry", "(", "dbTx", "database", ".", "Tx", ",", "blockHash", "*", "chainhash", ".", "Hash", ")", "error", "{", "spendBucket", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "spendJournalBucketName", ")", "\n", "return", "spendBucket", ".", "Delete", "(", "blockHash", "[", ":", "]", ")", "\n", "}" ]
// dbRemoveSpendJournalEntry uses an existing database transaction to remove the // spend journal entry for the passed block hash.
[ "dbRemoveSpendJournalEntry", "uses", "an", "existing", "database", "transaction", "to", "remove", "the", "spend", "journal", "entry", "for", "the", "passed", "block", "hash", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L492-L495
train
btcsuite/btcd
blockchain/chainio.go
outpointKey
func outpointKey(outpoint wire.OutPoint) *[]byte { // A VLQ employs an MSB encoding, so they are useful not only to reduce // the amount of storage space, but also so iteration of utxos when // doing byte-wise comparisons will produce them in order. key := outpointKeyPool.Get().(*[]byte) idx := uint64(outpoint.Index) *key = (*key)[:chainhash.HashSize+serializeSizeVLQ(idx)] copy(*key, outpoint.Hash[:]) putVLQ((*key)[chainhash.HashSize:], idx) return key }
go
func outpointKey(outpoint wire.OutPoint) *[]byte { // A VLQ employs an MSB encoding, so they are useful not only to reduce // the amount of storage space, but also so iteration of utxos when // doing byte-wise comparisons will produce them in order. key := outpointKeyPool.Get().(*[]byte) idx := uint64(outpoint.Index) *key = (*key)[:chainhash.HashSize+serializeSizeVLQ(idx)] copy(*key, outpoint.Hash[:]) putVLQ((*key)[chainhash.HashSize:], idx) return key }
[ "func", "outpointKey", "(", "outpoint", "wire", ".", "OutPoint", ")", "*", "[", "]", "byte", "{", "// A VLQ employs an MSB encoding, so they are useful not only to reduce", "// the amount of storage space, but also so iteration of utxos when", "// doing byte-wise comparisons will produce them in order.", "key", ":=", "outpointKeyPool", ".", "Get", "(", ")", ".", "(", "*", "[", "]", "byte", ")", "\n", "idx", ":=", "uint64", "(", "outpoint", ".", "Index", ")", "\n", "*", "key", "=", "(", "*", "key", ")", "[", ":", "chainhash", ".", "HashSize", "+", "serializeSizeVLQ", "(", "idx", ")", "]", "\n", "copy", "(", "*", "key", ",", "outpoint", ".", "Hash", "[", ":", "]", ")", "\n", "putVLQ", "(", "(", "*", "key", ")", "[", "chainhash", ".", "HashSize", ":", "]", ",", "idx", ")", "\n", "return", "key", "\n", "}" ]
// outpointKey returns a key suitable for use as a database key in the utxo set // while making use of a free list. A new buffer is allocated if there are not // already any available on the free list. The returned byte slice should be // returned to the free list by using the recycleOutpointKey function when the // caller is done with it _unless_ the slice will need to live for longer than // the caller can calculate such as when used to write to the database.
[ "outpointKey", "returns", "a", "key", "suitable", "for", "use", "as", "a", "database", "key", "in", "the", "utxo", "set", "while", "making", "use", "of", "a", "free", "list", ".", "A", "new", "buffer", "is", "allocated", "if", "there", "are", "not", "already", "any", "available", "on", "the", "free", "list", ".", "The", "returned", "byte", "slice", "should", "be", "returned", "to", "the", "free", "list", "by", "using", "the", "recycleOutpointKey", "function", "when", "the", "caller", "is", "done", "with", "it", "_unless_", "the", "slice", "will", "need", "to", "live", "for", "longer", "than", "the", "caller", "can", "calculate", "such", "as", "when", "used", "to", "write", "to", "the", "database", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L594-L604
train
btcsuite/btcd
blockchain/chainio.go
utxoEntryHeaderCode
func utxoEntryHeaderCode(entry *UtxoEntry) (uint64, error) { if entry.IsSpent() { return 0, AssertError("attempt to serialize spent utxo header") } // As described in the serialization format comments, the header code // encodes the height shifted over one bit and the coinbase flag in the // lowest bit. headerCode := uint64(entry.BlockHeight()) << 1 if entry.IsCoinBase() { headerCode |= 0x01 } return headerCode, nil }
go
func utxoEntryHeaderCode(entry *UtxoEntry) (uint64, error) { if entry.IsSpent() { return 0, AssertError("attempt to serialize spent utxo header") } // As described in the serialization format comments, the header code // encodes the height shifted over one bit and the coinbase flag in the // lowest bit. headerCode := uint64(entry.BlockHeight()) << 1 if entry.IsCoinBase() { headerCode |= 0x01 } return headerCode, nil }
[ "func", "utxoEntryHeaderCode", "(", "entry", "*", "UtxoEntry", ")", "(", "uint64", ",", "error", ")", "{", "if", "entry", ".", "IsSpent", "(", ")", "{", "return", "0", ",", "AssertError", "(", "\"", "\"", ")", "\n", "}", "\n\n", "// As described in the serialization format comments, the header code", "// encodes the height shifted over one bit and the coinbase flag in the", "// lowest bit.", "headerCode", ":=", "uint64", "(", "entry", ".", "BlockHeight", "(", ")", ")", "<<", "1", "\n", "if", "entry", ".", "IsCoinBase", "(", ")", "{", "headerCode", "|=", "0x01", "\n", "}", "\n\n", "return", "headerCode", ",", "nil", "\n", "}" ]
// utxoEntryHeaderCode returns the calculated header code to be used when // serializing the provided utxo entry.
[ "utxoEntryHeaderCode", "returns", "the", "calculated", "header", "code", "to", "be", "used", "when", "serializing", "the", "provided", "utxo", "entry", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L614-L628
train
btcsuite/btcd
blockchain/chainio.go
serializeUtxoEntry
func serializeUtxoEntry(entry *UtxoEntry) ([]byte, error) { // Spent outputs have no serialization. if entry.IsSpent() { return nil, nil } // Encode the header code. headerCode, err := utxoEntryHeaderCode(entry) if err != nil { return nil, err } // Calculate the size needed to serialize the entry. size := serializeSizeVLQ(headerCode) + compressedTxOutSize(uint64(entry.Amount()), entry.PkScript()) // Serialize the header code followed by the compressed unspent // transaction output. serialized := make([]byte, size) offset := putVLQ(serialized, headerCode) offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()), entry.PkScript()) return serialized, nil }
go
func serializeUtxoEntry(entry *UtxoEntry) ([]byte, error) { // Spent outputs have no serialization. if entry.IsSpent() { return nil, nil } // Encode the header code. headerCode, err := utxoEntryHeaderCode(entry) if err != nil { return nil, err } // Calculate the size needed to serialize the entry. size := serializeSizeVLQ(headerCode) + compressedTxOutSize(uint64(entry.Amount()), entry.PkScript()) // Serialize the header code followed by the compressed unspent // transaction output. serialized := make([]byte, size) offset := putVLQ(serialized, headerCode) offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()), entry.PkScript()) return serialized, nil }
[ "func", "serializeUtxoEntry", "(", "entry", "*", "UtxoEntry", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "// Spent outputs have no serialization.", "if", "entry", ".", "IsSpent", "(", ")", "{", "return", "nil", ",", "nil", "\n", "}", "\n\n", "// Encode the header code.", "headerCode", ",", "err", ":=", "utxoEntryHeaderCode", "(", "entry", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "// Calculate the size needed to serialize the entry.", "size", ":=", "serializeSizeVLQ", "(", "headerCode", ")", "+", "compressedTxOutSize", "(", "uint64", "(", "entry", ".", "Amount", "(", ")", ")", ",", "entry", ".", "PkScript", "(", ")", ")", "\n\n", "// Serialize the header code followed by the compressed unspent", "// transaction output.", "serialized", ":=", "make", "(", "[", "]", "byte", ",", "size", ")", "\n", "offset", ":=", "putVLQ", "(", "serialized", ",", "headerCode", ")", "\n", "offset", "+=", "putCompressedTxOut", "(", "serialized", "[", "offset", ":", "]", ",", "uint64", "(", "entry", ".", "Amount", "(", ")", ")", ",", "entry", ".", "PkScript", "(", ")", ")", "\n\n", "return", "serialized", ",", "nil", "\n", "}" ]
// serializeUtxoEntry returns the entry serialized to a format that is suitable // for long-term storage. The format is described in detail above.
[ "serializeUtxoEntry", "returns", "the", "entry", "serialized", "to", "a", "format", "that", "is", "suitable", "for", "long", "-", "term", "storage", ".", "The", "format", "is", "described", "in", "detail", "above", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L632-L656
train
btcsuite/btcd
blockchain/chainio.go
deserializeUtxoEntry
func deserializeUtxoEntry(serialized []byte) (*UtxoEntry, error) { // Deserialize the header code. code, offset := deserializeVLQ(serialized) if offset >= len(serialized) { return nil, errDeserialize("unexpected end of data after header") } // Decode the header code. // // Bit 0 indicates whether the containing transaction is a coinbase. // Bits 1-x encode height of containing transaction. isCoinBase := code&0x01 != 0 blockHeight := int32(code >> 1) // Decode the compressed unspent transaction output. amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:]) if err != nil { return nil, errDeserialize(fmt.Sprintf("unable to decode "+ "utxo: %v", err)) } entry := &UtxoEntry{ amount: int64(amount), pkScript: pkScript, blockHeight: blockHeight, packedFlags: 0, } if isCoinBase { entry.packedFlags |= tfCoinBase } return entry, nil }
go
func deserializeUtxoEntry(serialized []byte) (*UtxoEntry, error) { // Deserialize the header code. code, offset := deserializeVLQ(serialized) if offset >= len(serialized) { return nil, errDeserialize("unexpected end of data after header") } // Decode the header code. // // Bit 0 indicates whether the containing transaction is a coinbase. // Bits 1-x encode height of containing transaction. isCoinBase := code&0x01 != 0 blockHeight := int32(code >> 1) // Decode the compressed unspent transaction output. amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:]) if err != nil { return nil, errDeserialize(fmt.Sprintf("unable to decode "+ "utxo: %v", err)) } entry := &UtxoEntry{ amount: int64(amount), pkScript: pkScript, blockHeight: blockHeight, packedFlags: 0, } if isCoinBase { entry.packedFlags |= tfCoinBase } return entry, nil }
[ "func", "deserializeUtxoEntry", "(", "serialized", "[", "]", "byte", ")", "(", "*", "UtxoEntry", ",", "error", ")", "{", "// Deserialize the header code.", "code", ",", "offset", ":=", "deserializeVLQ", "(", "serialized", ")", "\n", "if", "offset", ">=", "len", "(", "serialized", ")", "{", "return", "nil", ",", "errDeserialize", "(", "\"", "\"", ")", "\n", "}", "\n\n", "// Decode the header code.", "//", "// Bit 0 indicates whether the containing transaction is a coinbase.", "// Bits 1-x encode height of containing transaction.", "isCoinBase", ":=", "code", "&", "0x01", "!=", "0", "\n", "blockHeight", ":=", "int32", "(", "code", ">>", "1", ")", "\n\n", "// Decode the compressed unspent transaction output.", "amount", ",", "pkScript", ",", "_", ",", "err", ":=", "decodeCompressedTxOut", "(", "serialized", "[", "offset", ":", "]", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "errDeserialize", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", ",", "err", ")", ")", "\n", "}", "\n\n", "entry", ":=", "&", "UtxoEntry", "{", "amount", ":", "int64", "(", "amount", ")", ",", "pkScript", ":", "pkScript", ",", "blockHeight", ":", "blockHeight", ",", "packedFlags", ":", "0", ",", "}", "\n", "if", "isCoinBase", "{", "entry", ".", "packedFlags", "|=", "tfCoinBase", "\n", "}", "\n\n", "return", "entry", ",", "nil", "\n", "}" ]
// deserializeUtxoEntry decodes a utxo entry from the passed serialized byte // slice into a new UtxoEntry using a format that is suitable for long-term // storage. The format is described in detail above.
[ "deserializeUtxoEntry", "decodes", "a", "utxo", "entry", "from", "the", "passed", "serialized", "byte", "slice", "into", "a", "new", "UtxoEntry", "using", "a", "format", "that", "is", "suitable", "for", "long", "-", "term", "storage", ".", "The", "format", "is", "described", "in", "detail", "above", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L661-L693
train
btcsuite/btcd
blockchain/chainio.go
dbFetchUtxoEntryByHash
func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, error) { // Attempt to find an entry by seeking for the hash along with a zero // index. Due to the fact the keys are serialized as <hash><index>, // where the index uses an MSB encoding, if there are any entries for // the hash at all, one will be found. cursor := dbTx.Metadata().Bucket(utxoSetBucketName).Cursor() key := outpointKey(wire.OutPoint{Hash: *hash, Index: 0}) ok := cursor.Seek(*key) recycleOutpointKey(key) if !ok { return nil, nil } // An entry was found, but it could just be an entry with the next // highest hash after the requested one, so make sure the hashes // actually match. cursorKey := cursor.Key() if len(cursorKey) < chainhash.HashSize { return nil, nil } if !bytes.Equal(hash[:], cursorKey[:chainhash.HashSize]) { return nil, nil } return deserializeUtxoEntry(cursor.Value()) }
go
func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, error) { // Attempt to find an entry by seeking for the hash along with a zero // index. Due to the fact the keys are serialized as <hash><index>, // where the index uses an MSB encoding, if there are any entries for // the hash at all, one will be found. cursor := dbTx.Metadata().Bucket(utxoSetBucketName).Cursor() key := outpointKey(wire.OutPoint{Hash: *hash, Index: 0}) ok := cursor.Seek(*key) recycleOutpointKey(key) if !ok { return nil, nil } // An entry was found, but it could just be an entry with the next // highest hash after the requested one, so make sure the hashes // actually match. cursorKey := cursor.Key() if len(cursorKey) < chainhash.HashSize { return nil, nil } if !bytes.Equal(hash[:], cursorKey[:chainhash.HashSize]) { return nil, nil } return deserializeUtxoEntry(cursor.Value()) }
[ "func", "dbFetchUtxoEntryByHash", "(", "dbTx", "database", ".", "Tx", ",", "hash", "*", "chainhash", ".", "Hash", ")", "(", "*", "UtxoEntry", ",", "error", ")", "{", "// Attempt to find an entry by seeking for the hash along with a zero", "// index. Due to the fact the keys are serialized as <hash><index>,", "// where the index uses an MSB encoding, if there are any entries for", "// the hash at all, one will be found.", "cursor", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "utxoSetBucketName", ")", ".", "Cursor", "(", ")", "\n", "key", ":=", "outpointKey", "(", "wire", ".", "OutPoint", "{", "Hash", ":", "*", "hash", ",", "Index", ":", "0", "}", ")", "\n", "ok", ":=", "cursor", ".", "Seek", "(", "*", "key", ")", "\n", "recycleOutpointKey", "(", "key", ")", "\n", "if", "!", "ok", "{", "return", "nil", ",", "nil", "\n", "}", "\n\n", "// An entry was found, but it could just be an entry with the next", "// highest hash after the requested one, so make sure the hashes", "// actually match.", "cursorKey", ":=", "cursor", ".", "Key", "(", ")", "\n", "if", "len", "(", "cursorKey", ")", "<", "chainhash", ".", "HashSize", "{", "return", "nil", ",", "nil", "\n", "}", "\n", "if", "!", "bytes", ".", "Equal", "(", "hash", "[", ":", "]", ",", "cursorKey", "[", ":", "chainhash", ".", "HashSize", "]", ")", "{", "return", "nil", ",", "nil", "\n", "}", "\n\n", "return", "deserializeUtxoEntry", "(", "cursor", ".", "Value", "(", ")", ")", "\n", "}" ]
// dbFetchUtxoEntryByHash attempts to find and fetch a utxo for the given hash. // It uses a cursor and seek to try and do this as efficiently as possible. // // When there are no entries for the provided hash, nil will be returned for the // both the entry and the error.
[ "dbFetchUtxoEntryByHash", "attempts", "to", "find", "and", "fetch", "a", "utxo", "for", "the", "given", "hash", ".", "It", "uses", "a", "cursor", "and", "seek", "to", "try", "and", "do", "this", "as", "efficiently", "as", "possible", ".", "When", "there", "are", "no", "entries", "for", "the", "provided", "hash", "nil", "will", "be", "returned", "for", "the", "both", "the", "entry", "and", "the", "error", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L700-L725
train
btcsuite/btcd
blockchain/chainio.go
dbFetchUtxoEntry
func dbFetchUtxoEntry(dbTx database.Tx, outpoint wire.OutPoint) (*UtxoEntry, error) { // Fetch the unspent transaction output information for the passed // transaction output. Return now when there is no entry. key := outpointKey(outpoint) utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) serializedUtxo := utxoBucket.Get(*key) recycleOutpointKey(key) if serializedUtxo == nil { return nil, nil } // A non-nil zero-length entry means there is an entry in the database // for a spent transaction output which should never be the case. if len(serializedUtxo) == 0 { return nil, AssertError(fmt.Sprintf("database contains entry "+ "for spent tx output %v", outpoint)) } // Deserialize the utxo entry and return it. entry, err := deserializeUtxoEntry(serializedUtxo) if err != nil { // Ensure any deserialization errors are returned as database // corruption errors. if isDeserializeErr(err) { return nil, database.Error{ ErrorCode: database.ErrCorruption, Description: fmt.Sprintf("corrupt utxo entry "+ "for %v: %v", outpoint, err), } } return nil, err } return entry, nil }
go
func dbFetchUtxoEntry(dbTx database.Tx, outpoint wire.OutPoint) (*UtxoEntry, error) { // Fetch the unspent transaction output information for the passed // transaction output. Return now when there is no entry. key := outpointKey(outpoint) utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) serializedUtxo := utxoBucket.Get(*key) recycleOutpointKey(key) if serializedUtxo == nil { return nil, nil } // A non-nil zero-length entry means there is an entry in the database // for a spent transaction output which should never be the case. if len(serializedUtxo) == 0 { return nil, AssertError(fmt.Sprintf("database contains entry "+ "for spent tx output %v", outpoint)) } // Deserialize the utxo entry and return it. entry, err := deserializeUtxoEntry(serializedUtxo) if err != nil { // Ensure any deserialization errors are returned as database // corruption errors. if isDeserializeErr(err) { return nil, database.Error{ ErrorCode: database.ErrCorruption, Description: fmt.Sprintf("corrupt utxo entry "+ "for %v: %v", outpoint, err), } } return nil, err } return entry, nil }
[ "func", "dbFetchUtxoEntry", "(", "dbTx", "database", ".", "Tx", ",", "outpoint", "wire", ".", "OutPoint", ")", "(", "*", "UtxoEntry", ",", "error", ")", "{", "// Fetch the unspent transaction output information for the passed", "// transaction output. Return now when there is no entry.", "key", ":=", "outpointKey", "(", "outpoint", ")", "\n", "utxoBucket", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "utxoSetBucketName", ")", "\n", "serializedUtxo", ":=", "utxoBucket", ".", "Get", "(", "*", "key", ")", "\n", "recycleOutpointKey", "(", "key", ")", "\n", "if", "serializedUtxo", "==", "nil", "{", "return", "nil", ",", "nil", "\n", "}", "\n\n", "// A non-nil zero-length entry means there is an entry in the database", "// for a spent transaction output which should never be the case.", "if", "len", "(", "serializedUtxo", ")", "==", "0", "{", "return", "nil", ",", "AssertError", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", ",", "outpoint", ")", ")", "\n", "}", "\n\n", "// Deserialize the utxo entry and return it.", "entry", ",", "err", ":=", "deserializeUtxoEntry", "(", "serializedUtxo", ")", "\n", "if", "err", "!=", "nil", "{", "// Ensure any deserialization errors are returned as database", "// corruption errors.", "if", "isDeserializeErr", "(", "err", ")", "{", "return", "nil", ",", "database", ".", "Error", "{", "ErrorCode", ":", "database", ".", "ErrCorruption", ",", "Description", ":", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", ",", "outpoint", ",", "err", ")", ",", "}", "\n", "}", "\n\n", "return", "nil", ",", "err", "\n", "}", "\n\n", "return", "entry", ",", "nil", "\n", "}" ]
// dbFetchUtxoEntry uses an existing database transaction to fetch the specified // transaction output from the utxo set. // // When there is no entry for the provided output, nil will be returned for both // the entry and the error.
[ "dbFetchUtxoEntry", "uses", "an", "existing", "database", "transaction", "to", "fetch", "the", "specified", "transaction", "output", "from", "the", "utxo", "set", ".", "When", "there", "is", "no", "entry", "for", "the", "provided", "output", "nil", "will", "be", "returned", "for", "both", "the", "entry", "and", "the", "error", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L732-L767
train
btcsuite/btcd
blockchain/chainio.go
dbPutUtxoView
func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error { utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) for outpoint, entry := range view.entries { // No need to update the database if the entry was not modified. if entry == nil || !entry.isModified() { continue } // Remove the utxo entry if it is spent. if entry.IsSpent() { key := outpointKey(outpoint) err := utxoBucket.Delete(*key) recycleOutpointKey(key) if err != nil { return err } continue } // Serialize and store the utxo entry. serialized, err := serializeUtxoEntry(entry) if err != nil { return err } key := outpointKey(outpoint) err = utxoBucket.Put(*key, serialized) // NOTE: The key is intentionally not recycled here since the // database interface contract prohibits modifications. It will // be garbage collected normally when the database is done with // it. if err != nil { return err } } return nil }
go
func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error { utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) for outpoint, entry := range view.entries { // No need to update the database if the entry was not modified. if entry == nil || !entry.isModified() { continue } // Remove the utxo entry if it is spent. if entry.IsSpent() { key := outpointKey(outpoint) err := utxoBucket.Delete(*key) recycleOutpointKey(key) if err != nil { return err } continue } // Serialize and store the utxo entry. serialized, err := serializeUtxoEntry(entry) if err != nil { return err } key := outpointKey(outpoint) err = utxoBucket.Put(*key, serialized) // NOTE: The key is intentionally not recycled here since the // database interface contract prohibits modifications. It will // be garbage collected normally when the database is done with // it. if err != nil { return err } } return nil }
[ "func", "dbPutUtxoView", "(", "dbTx", "database", ".", "Tx", ",", "view", "*", "UtxoViewpoint", ")", "error", "{", "utxoBucket", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "utxoSetBucketName", ")", "\n", "for", "outpoint", ",", "entry", ":=", "range", "view", ".", "entries", "{", "// No need to update the database if the entry was not modified.", "if", "entry", "==", "nil", "||", "!", "entry", ".", "isModified", "(", ")", "{", "continue", "\n", "}", "\n\n", "// Remove the utxo entry if it is spent.", "if", "entry", ".", "IsSpent", "(", ")", "{", "key", ":=", "outpointKey", "(", "outpoint", ")", "\n", "err", ":=", "utxoBucket", ".", "Delete", "(", "*", "key", ")", "\n", "recycleOutpointKey", "(", "key", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "continue", "\n", "}", "\n\n", "// Serialize and store the utxo entry.", "serialized", ",", "err", ":=", "serializeUtxoEntry", "(", "entry", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "key", ":=", "outpointKey", "(", "outpoint", ")", "\n", "err", "=", "utxoBucket", ".", "Put", "(", "*", "key", ",", "serialized", ")", "\n", "// NOTE: The key is intentionally not recycled here since the", "// database interface contract prohibits modifications. It will", "// be garbage collected normally when the database is done with", "// it.", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// dbPutUtxoView uses an existing database transaction to update the utxo set // in the database based on the provided utxo view contents and state. In // particular, only the entries that have been marked as modified are written // to the database.
[ "dbPutUtxoView", "uses", "an", "existing", "database", "transaction", "to", "update", "the", "utxo", "set", "in", "the", "database", "based", "on", "the", "provided", "utxo", "view", "contents", "and", "state", ".", "In", "particular", "only", "the", "entries", "that", "have", "been", "marked", "as", "modified", "are", "written", "to", "the", "database", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L773-L810
train
btcsuite/btcd
blockchain/chainio.go
dbFetchHeightByHash
func dbFetchHeightByHash(dbTx database.Tx, hash *chainhash.Hash) (int32, error) { meta := dbTx.Metadata() hashIndex := meta.Bucket(hashIndexBucketName) serializedHeight := hashIndex.Get(hash[:]) if serializedHeight == nil { str := fmt.Sprintf("block %s is not in the main chain", hash) return 0, errNotInMainChain(str) } return int32(byteOrder.Uint32(serializedHeight)), nil }
go
func dbFetchHeightByHash(dbTx database.Tx, hash *chainhash.Hash) (int32, error) { meta := dbTx.Metadata() hashIndex := meta.Bucket(hashIndexBucketName) serializedHeight := hashIndex.Get(hash[:]) if serializedHeight == nil { str := fmt.Sprintf("block %s is not in the main chain", hash) return 0, errNotInMainChain(str) } return int32(byteOrder.Uint32(serializedHeight)), nil }
[ "func", "dbFetchHeightByHash", "(", "dbTx", "database", ".", "Tx", ",", "hash", "*", "chainhash", ".", "Hash", ")", "(", "int32", ",", "error", ")", "{", "meta", ":=", "dbTx", ".", "Metadata", "(", ")", "\n", "hashIndex", ":=", "meta", ".", "Bucket", "(", "hashIndexBucketName", ")", "\n", "serializedHeight", ":=", "hashIndex", ".", "Get", "(", "hash", "[", ":", "]", ")", "\n", "if", "serializedHeight", "==", "nil", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "hash", ")", "\n", "return", "0", ",", "errNotInMainChain", "(", "str", ")", "\n", "}", "\n\n", "return", "int32", "(", "byteOrder", ".", "Uint32", "(", "serializedHeight", ")", ")", ",", "nil", "\n", "}" ]
// dbFetchHeightByHash uses an existing database transaction to retrieve the // height for the provided hash from the index.
[ "dbFetchHeightByHash", "uses", "an", "existing", "database", "transaction", "to", "retrieve", "the", "height", "for", "the", "provided", "hash", "from", "the", "index", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L870-L880
train
btcsuite/btcd
blockchain/chainio.go
dbFetchHashByHeight
func dbFetchHashByHeight(dbTx database.Tx, height int32) (*chainhash.Hash, error) { var serializedHeight [4]byte byteOrder.PutUint32(serializedHeight[:], uint32(height)) meta := dbTx.Metadata() heightIndex := meta.Bucket(heightIndexBucketName) hashBytes := heightIndex.Get(serializedHeight[:]) if hashBytes == nil { str := fmt.Sprintf("no block at height %d exists", height) return nil, errNotInMainChain(str) } var hash chainhash.Hash copy(hash[:], hashBytes) return &hash, nil }
go
func dbFetchHashByHeight(dbTx database.Tx, height int32) (*chainhash.Hash, error) { var serializedHeight [4]byte byteOrder.PutUint32(serializedHeight[:], uint32(height)) meta := dbTx.Metadata() heightIndex := meta.Bucket(heightIndexBucketName) hashBytes := heightIndex.Get(serializedHeight[:]) if hashBytes == nil { str := fmt.Sprintf("no block at height %d exists", height) return nil, errNotInMainChain(str) } var hash chainhash.Hash copy(hash[:], hashBytes) return &hash, nil }
[ "func", "dbFetchHashByHeight", "(", "dbTx", "database", ".", "Tx", ",", "height", "int32", ")", "(", "*", "chainhash", ".", "Hash", ",", "error", ")", "{", "var", "serializedHeight", "[", "4", "]", "byte", "\n", "byteOrder", ".", "PutUint32", "(", "serializedHeight", "[", ":", "]", ",", "uint32", "(", "height", ")", ")", "\n\n", "meta", ":=", "dbTx", ".", "Metadata", "(", ")", "\n", "heightIndex", ":=", "meta", ".", "Bucket", "(", "heightIndexBucketName", ")", "\n", "hashBytes", ":=", "heightIndex", ".", "Get", "(", "serializedHeight", "[", ":", "]", ")", "\n", "if", "hashBytes", "==", "nil", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "height", ")", "\n", "return", "nil", ",", "errNotInMainChain", "(", "str", ")", "\n", "}", "\n\n", "var", "hash", "chainhash", ".", "Hash", "\n", "copy", "(", "hash", "[", ":", "]", ",", "hashBytes", ")", "\n", "return", "&", "hash", ",", "nil", "\n", "}" ]
// dbFetchHashByHeight uses an existing database transaction to retrieve the // hash for the provided height from the index.
[ "dbFetchHashByHeight", "uses", "an", "existing", "database", "transaction", "to", "retrieve", "the", "hash", "for", "the", "provided", "height", "from", "the", "index", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L884-L899
train
btcsuite/btcd
blockchain/chainio.go
serializeBestChainState
func serializeBestChainState(state bestChainState) []byte { // Calculate the full size needed to serialize the chain state. workSumBytes := state.workSum.Bytes() workSumBytesLen := uint32(len(workSumBytes)) serializedLen := chainhash.HashSize + 4 + 8 + 4 + workSumBytesLen // Serialize the chain state. serializedData := make([]byte, serializedLen) copy(serializedData[0:chainhash.HashSize], state.hash[:]) offset := uint32(chainhash.HashSize) byteOrder.PutUint32(serializedData[offset:], state.height) offset += 4 byteOrder.PutUint64(serializedData[offset:], state.totalTxns) offset += 8 byteOrder.PutUint32(serializedData[offset:], workSumBytesLen) offset += 4 copy(serializedData[offset:], workSumBytes) return serializedData[:] }
go
func serializeBestChainState(state bestChainState) []byte { // Calculate the full size needed to serialize the chain state. workSumBytes := state.workSum.Bytes() workSumBytesLen := uint32(len(workSumBytes)) serializedLen := chainhash.HashSize + 4 + 8 + 4 + workSumBytesLen // Serialize the chain state. serializedData := make([]byte, serializedLen) copy(serializedData[0:chainhash.HashSize], state.hash[:]) offset := uint32(chainhash.HashSize) byteOrder.PutUint32(serializedData[offset:], state.height) offset += 4 byteOrder.PutUint64(serializedData[offset:], state.totalTxns) offset += 8 byteOrder.PutUint32(serializedData[offset:], workSumBytesLen) offset += 4 copy(serializedData[offset:], workSumBytes) return serializedData[:] }
[ "func", "serializeBestChainState", "(", "state", "bestChainState", ")", "[", "]", "byte", "{", "// Calculate the full size needed to serialize the chain state.", "workSumBytes", ":=", "state", ".", "workSum", ".", "Bytes", "(", ")", "\n", "workSumBytesLen", ":=", "uint32", "(", "len", "(", "workSumBytes", ")", ")", "\n", "serializedLen", ":=", "chainhash", ".", "HashSize", "+", "4", "+", "8", "+", "4", "+", "workSumBytesLen", "\n\n", "// Serialize the chain state.", "serializedData", ":=", "make", "(", "[", "]", "byte", ",", "serializedLen", ")", "\n", "copy", "(", "serializedData", "[", "0", ":", "chainhash", ".", "HashSize", "]", ",", "state", ".", "hash", "[", ":", "]", ")", "\n", "offset", ":=", "uint32", "(", "chainhash", ".", "HashSize", ")", "\n", "byteOrder", ".", "PutUint32", "(", "serializedData", "[", "offset", ":", "]", ",", "state", ".", "height", ")", "\n", "offset", "+=", "4", "\n", "byteOrder", ".", "PutUint64", "(", "serializedData", "[", "offset", ":", "]", ",", "state", ".", "totalTxns", ")", "\n", "offset", "+=", "8", "\n", "byteOrder", ".", "PutUint32", "(", "serializedData", "[", "offset", ":", "]", ",", "workSumBytesLen", ")", "\n", "offset", "+=", "4", "\n", "copy", "(", "serializedData", "[", "offset", ":", "]", ",", "workSumBytes", ")", "\n", "return", "serializedData", "[", ":", "]", "\n", "}" ]
// serializeBestChainState returns the serialization of the passed block best // chain state. This is data to be stored in the chain state bucket.
[ "serializeBestChainState", "returns", "the", "serialization", "of", "the", "passed", "block", "best", "chain", "state", ".", "This", "is", "data", "to", "be", "stored", "in", "the", "chain", "state", "bucket", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L929-L947
train
btcsuite/btcd
blockchain/chainio.go
deserializeBestChainState
func deserializeBestChainState(serializedData []byte) (bestChainState, error) { // Ensure the serialized data has enough bytes to properly deserialize // the hash, height, total transactions, and work sum length. if len(serializedData) < chainhash.HashSize+16 { return bestChainState{}, database.Error{ ErrorCode: database.ErrCorruption, Description: "corrupt best chain state", } } state := bestChainState{} copy(state.hash[:], serializedData[0:chainhash.HashSize]) offset := uint32(chainhash.HashSize) state.height = byteOrder.Uint32(serializedData[offset : offset+4]) offset += 4 state.totalTxns = byteOrder.Uint64(serializedData[offset : offset+8]) offset += 8 workSumBytesLen := byteOrder.Uint32(serializedData[offset : offset+4]) offset += 4 // Ensure the serialized data has enough bytes to deserialize the work // sum. if uint32(len(serializedData[offset:])) < workSumBytesLen { return bestChainState{}, database.Error{ ErrorCode: database.ErrCorruption, Description: "corrupt best chain state", } } workSumBytes := serializedData[offset : offset+workSumBytesLen] state.workSum = new(big.Int).SetBytes(workSumBytes) return state, nil }
go
func deserializeBestChainState(serializedData []byte) (bestChainState, error) { // Ensure the serialized data has enough bytes to properly deserialize // the hash, height, total transactions, and work sum length. if len(serializedData) < chainhash.HashSize+16 { return bestChainState{}, database.Error{ ErrorCode: database.ErrCorruption, Description: "corrupt best chain state", } } state := bestChainState{} copy(state.hash[:], serializedData[0:chainhash.HashSize]) offset := uint32(chainhash.HashSize) state.height = byteOrder.Uint32(serializedData[offset : offset+4]) offset += 4 state.totalTxns = byteOrder.Uint64(serializedData[offset : offset+8]) offset += 8 workSumBytesLen := byteOrder.Uint32(serializedData[offset : offset+4]) offset += 4 // Ensure the serialized data has enough bytes to deserialize the work // sum. if uint32(len(serializedData[offset:])) < workSumBytesLen { return bestChainState{}, database.Error{ ErrorCode: database.ErrCorruption, Description: "corrupt best chain state", } } workSumBytes := serializedData[offset : offset+workSumBytesLen] state.workSum = new(big.Int).SetBytes(workSumBytes) return state, nil }
[ "func", "deserializeBestChainState", "(", "serializedData", "[", "]", "byte", ")", "(", "bestChainState", ",", "error", ")", "{", "// Ensure the serialized data has enough bytes to properly deserialize", "// the hash, height, total transactions, and work sum length.", "if", "len", "(", "serializedData", ")", "<", "chainhash", ".", "HashSize", "+", "16", "{", "return", "bestChainState", "{", "}", ",", "database", ".", "Error", "{", "ErrorCode", ":", "database", ".", "ErrCorruption", ",", "Description", ":", "\"", "\"", ",", "}", "\n", "}", "\n\n", "state", ":=", "bestChainState", "{", "}", "\n", "copy", "(", "state", ".", "hash", "[", ":", "]", ",", "serializedData", "[", "0", ":", "chainhash", ".", "HashSize", "]", ")", "\n", "offset", ":=", "uint32", "(", "chainhash", ".", "HashSize", ")", "\n", "state", ".", "height", "=", "byteOrder", ".", "Uint32", "(", "serializedData", "[", "offset", ":", "offset", "+", "4", "]", ")", "\n", "offset", "+=", "4", "\n", "state", ".", "totalTxns", "=", "byteOrder", ".", "Uint64", "(", "serializedData", "[", "offset", ":", "offset", "+", "8", "]", ")", "\n", "offset", "+=", "8", "\n", "workSumBytesLen", ":=", "byteOrder", ".", "Uint32", "(", "serializedData", "[", "offset", ":", "offset", "+", "4", "]", ")", "\n", "offset", "+=", "4", "\n\n", "// Ensure the serialized data has enough bytes to deserialize the work", "// sum.", "if", "uint32", "(", "len", "(", "serializedData", "[", "offset", ":", "]", ")", ")", "<", "workSumBytesLen", "{", "return", "bestChainState", "{", "}", ",", "database", ".", "Error", "{", "ErrorCode", ":", "database", ".", "ErrCorruption", ",", "Description", ":", "\"", "\"", ",", "}", "\n", "}", "\n", "workSumBytes", ":=", "serializedData", "[", "offset", ":", "offset", "+", "workSumBytesLen", "]", "\n", "state", ".", "workSum", "=", "new", "(", "big", ".", "Int", ")", ".", "SetBytes", "(", "workSumBytes", ")", "\n\n", "return", "state", ",", "nil", "\n", "}" ]
// deserializeBestChainState deserializes the passed serialized best chain // state. This is data stored in the chain state bucket and is updated after // every block is connected or disconnected form the main chain. // block.
[ "deserializeBestChainState", "deserializes", "the", "passed", "serialized", "best", "chain", "state", ".", "This", "is", "data", "stored", "in", "the", "chain", "state", "bucket", "and", "is", "updated", "after", "every", "block", "is", "connected", "or", "disconnected", "form", "the", "main", "chain", ".", "block", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L953-L985
train
btcsuite/btcd
blockchain/chainio.go
dbPutBestState
func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error { // Serialize the current best chain state. serializedData := serializeBestChainState(bestChainState{ hash: snapshot.Hash, height: uint32(snapshot.Height), totalTxns: snapshot.TotalTxns, workSum: workSum, }) // Store the current best chain state into the database. return dbTx.Metadata().Put(chainStateKeyName, serializedData) }
go
func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error { // Serialize the current best chain state. serializedData := serializeBestChainState(bestChainState{ hash: snapshot.Hash, height: uint32(snapshot.Height), totalTxns: snapshot.TotalTxns, workSum: workSum, }) // Store the current best chain state into the database. return dbTx.Metadata().Put(chainStateKeyName, serializedData) }
[ "func", "dbPutBestState", "(", "dbTx", "database", ".", "Tx", ",", "snapshot", "*", "BestState", ",", "workSum", "*", "big", ".", "Int", ")", "error", "{", "// Serialize the current best chain state.", "serializedData", ":=", "serializeBestChainState", "(", "bestChainState", "{", "hash", ":", "snapshot", ".", "Hash", ",", "height", ":", "uint32", "(", "snapshot", ".", "Height", ")", ",", "totalTxns", ":", "snapshot", ".", "TotalTxns", ",", "workSum", ":", "workSum", ",", "}", ")", "\n\n", "// Store the current best chain state into the database.", "return", "dbTx", ".", "Metadata", "(", ")", ".", "Put", "(", "chainStateKeyName", ",", "serializedData", ")", "\n", "}" ]
// dbPutBestState uses an existing database transaction to update the best chain // state with the given parameters.
[ "dbPutBestState", "uses", "an", "existing", "database", "transaction", "to", "update", "the", "best", "chain", "state", "with", "the", "given", "parameters", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L989-L1000
train
btcsuite/btcd
blockchain/chainio.go
createChainState
func (b *BlockChain) createChainState() error { // Create a new node from the genesis block and set it as the best node. genesisBlock := btcutil.NewBlock(b.chainParams.GenesisBlock) genesisBlock.SetHeight(0) header := &genesisBlock.MsgBlock().Header node := newBlockNode(header, nil) node.status = statusDataStored | statusValid b.bestChain.SetTip(node) // Add the new node to the index which is used for faster lookups. b.index.addNode(node) // Initialize the state related to the best block. Since it is the // genesis block, use its timestamp for the median time. numTxns := uint64(len(genesisBlock.MsgBlock().Transactions)) blockSize := uint64(genesisBlock.MsgBlock().SerializeSize()) blockWeight := uint64(GetBlockWeight(genesisBlock)) b.stateSnapshot = newBestState(node, blockSize, blockWeight, numTxns, numTxns, time.Unix(node.timestamp, 0)) // Create the initial the database chain state including creating the // necessary index buckets and inserting the genesis block. err := b.db.Update(func(dbTx database.Tx) error { meta := dbTx.Metadata() // Create the bucket that houses the block index data. _, err := meta.CreateBucket(blockIndexBucketName) if err != nil { return err } // Create the bucket that houses the chain block hash to height // index. _, err = meta.CreateBucket(hashIndexBucketName) if err != nil { return err } // Create the bucket that houses the chain block height to hash // index. _, err = meta.CreateBucket(heightIndexBucketName) if err != nil { return err } // Create the bucket that houses the spend journal data and // store its version. _, err = meta.CreateBucket(spendJournalBucketName) if err != nil { return err } err = dbPutVersion(dbTx, utxoSetVersionKeyName, latestUtxoSetBucketVersion) if err != nil { return err } // Create the bucket that houses the utxo set and store its // version. Note that the genesis block coinbase transaction is // intentionally not inserted here since it is not spendable by // consensus rules. _, err = meta.CreateBucket(utxoSetBucketName) if err != nil { return err } err = dbPutVersion(dbTx, spendJournalVersionKeyName, latestSpendJournalBucketVersion) if err != nil { return err } // Save the genesis block to the block index database. err = dbStoreBlockNode(dbTx, node) if err != nil { return err } // Add the genesis block hash to height and height to hash // mappings to the index. err = dbPutBlockIndex(dbTx, &node.hash, node.height) if err != nil { return err } // Store the current best chain state into the database. err = dbPutBestState(dbTx, b.stateSnapshot, node.workSum) if err != nil { return err } // Store the genesis block into the database. return dbStoreBlock(dbTx, genesisBlock) }) return err }
go
func (b *BlockChain) createChainState() error { // Create a new node from the genesis block and set it as the best node. genesisBlock := btcutil.NewBlock(b.chainParams.GenesisBlock) genesisBlock.SetHeight(0) header := &genesisBlock.MsgBlock().Header node := newBlockNode(header, nil) node.status = statusDataStored | statusValid b.bestChain.SetTip(node) // Add the new node to the index which is used for faster lookups. b.index.addNode(node) // Initialize the state related to the best block. Since it is the // genesis block, use its timestamp for the median time. numTxns := uint64(len(genesisBlock.MsgBlock().Transactions)) blockSize := uint64(genesisBlock.MsgBlock().SerializeSize()) blockWeight := uint64(GetBlockWeight(genesisBlock)) b.stateSnapshot = newBestState(node, blockSize, blockWeight, numTxns, numTxns, time.Unix(node.timestamp, 0)) // Create the initial the database chain state including creating the // necessary index buckets and inserting the genesis block. err := b.db.Update(func(dbTx database.Tx) error { meta := dbTx.Metadata() // Create the bucket that houses the block index data. _, err := meta.CreateBucket(blockIndexBucketName) if err != nil { return err } // Create the bucket that houses the chain block hash to height // index. _, err = meta.CreateBucket(hashIndexBucketName) if err != nil { return err } // Create the bucket that houses the chain block height to hash // index. _, err = meta.CreateBucket(heightIndexBucketName) if err != nil { return err } // Create the bucket that houses the spend journal data and // store its version. _, err = meta.CreateBucket(spendJournalBucketName) if err != nil { return err } err = dbPutVersion(dbTx, utxoSetVersionKeyName, latestUtxoSetBucketVersion) if err != nil { return err } // Create the bucket that houses the utxo set and store its // version. Note that the genesis block coinbase transaction is // intentionally not inserted here since it is not spendable by // consensus rules. _, err = meta.CreateBucket(utxoSetBucketName) if err != nil { return err } err = dbPutVersion(dbTx, spendJournalVersionKeyName, latestSpendJournalBucketVersion) if err != nil { return err } // Save the genesis block to the block index database. err = dbStoreBlockNode(dbTx, node) if err != nil { return err } // Add the genesis block hash to height and height to hash // mappings to the index. err = dbPutBlockIndex(dbTx, &node.hash, node.height) if err != nil { return err } // Store the current best chain state into the database. err = dbPutBestState(dbTx, b.stateSnapshot, node.workSum) if err != nil { return err } // Store the genesis block into the database. return dbStoreBlock(dbTx, genesisBlock) }) return err }
[ "func", "(", "b", "*", "BlockChain", ")", "createChainState", "(", ")", "error", "{", "// Create a new node from the genesis block and set it as the best node.", "genesisBlock", ":=", "btcutil", ".", "NewBlock", "(", "b", ".", "chainParams", ".", "GenesisBlock", ")", "\n", "genesisBlock", ".", "SetHeight", "(", "0", ")", "\n", "header", ":=", "&", "genesisBlock", ".", "MsgBlock", "(", ")", ".", "Header", "\n", "node", ":=", "newBlockNode", "(", "header", ",", "nil", ")", "\n", "node", ".", "status", "=", "statusDataStored", "|", "statusValid", "\n", "b", ".", "bestChain", ".", "SetTip", "(", "node", ")", "\n\n", "// Add the new node to the index which is used for faster lookups.", "b", ".", "index", ".", "addNode", "(", "node", ")", "\n\n", "// Initialize the state related to the best block. Since it is the", "// genesis block, use its timestamp for the median time.", "numTxns", ":=", "uint64", "(", "len", "(", "genesisBlock", ".", "MsgBlock", "(", ")", ".", "Transactions", ")", ")", "\n", "blockSize", ":=", "uint64", "(", "genesisBlock", ".", "MsgBlock", "(", ")", ".", "SerializeSize", "(", ")", ")", "\n", "blockWeight", ":=", "uint64", "(", "GetBlockWeight", "(", "genesisBlock", ")", ")", "\n", "b", ".", "stateSnapshot", "=", "newBestState", "(", "node", ",", "blockSize", ",", "blockWeight", ",", "numTxns", ",", "numTxns", ",", "time", ".", "Unix", "(", "node", ".", "timestamp", ",", "0", ")", ")", "\n\n", "// Create the initial the database chain state including creating the", "// necessary index buckets and inserting the genesis block.", "err", ":=", "b", ".", "db", ".", "Update", "(", "func", "(", "dbTx", "database", ".", "Tx", ")", "error", "{", "meta", ":=", "dbTx", ".", "Metadata", "(", ")", "\n\n", "// Create the bucket that houses the block index data.", "_", ",", "err", ":=", "meta", ".", "CreateBucket", "(", "blockIndexBucketName", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Create the bucket that houses the chain block hash to height", "// index.", "_", ",", "err", "=", "meta", ".", "CreateBucket", "(", "hashIndexBucketName", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Create the bucket that houses the chain block height to hash", "// index.", "_", ",", "err", "=", "meta", ".", "CreateBucket", "(", "heightIndexBucketName", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Create the bucket that houses the spend journal data and", "// store its version.", "_", ",", "err", "=", "meta", ".", "CreateBucket", "(", "spendJournalBucketName", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "err", "=", "dbPutVersion", "(", "dbTx", ",", "utxoSetVersionKeyName", ",", "latestUtxoSetBucketVersion", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Create the bucket that houses the utxo set and store its", "// version. Note that the genesis block coinbase transaction is", "// intentionally not inserted here since it is not spendable by", "// consensus rules.", "_", ",", "err", "=", "meta", ".", "CreateBucket", "(", "utxoSetBucketName", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "err", "=", "dbPutVersion", "(", "dbTx", ",", "spendJournalVersionKeyName", ",", "latestSpendJournalBucketVersion", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Save the genesis block to the block index database.", "err", "=", "dbStoreBlockNode", "(", "dbTx", ",", "node", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Add the genesis block hash to height and height to hash", "// mappings to the index.", "err", "=", "dbPutBlockIndex", "(", "dbTx", ",", "&", "node", ".", "hash", ",", "node", ".", "height", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Store the current best chain state into the database.", "err", "=", "dbPutBestState", "(", "dbTx", ",", "b", ".", "stateSnapshot", ",", "node", ".", "workSum", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Store the genesis block into the database.", "return", "dbStoreBlock", "(", "dbTx", ",", "genesisBlock", ")", "\n", "}", ")", "\n", "return", "err", "\n", "}" ]
// createChainState initializes both the database and the chain state to the // genesis block. This includes creating the necessary buckets and inserting // the genesis block, so it must only be called on an uninitialized database.
[ "createChainState", "initializes", "both", "the", "database", "and", "the", "chain", "state", "to", "the", "genesis", "block", ".", "This", "includes", "creating", "the", "necessary", "buckets", "and", "inserting", "the", "genesis", "block", "so", "it", "must", "only", "be", "called", "on", "an", "uninitialized", "database", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1005-L1099
train
btcsuite/btcd
blockchain/chainio.go
deserializeBlockRow
func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) { buffer := bytes.NewReader(blockRow) var header wire.BlockHeader err := header.Deserialize(buffer) if err != nil { return nil, statusNone, err } statusByte, err := buffer.ReadByte() if err != nil { return nil, statusNone, err } return &header, blockStatus(statusByte), nil }
go
func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) { buffer := bytes.NewReader(blockRow) var header wire.BlockHeader err := header.Deserialize(buffer) if err != nil { return nil, statusNone, err } statusByte, err := buffer.ReadByte() if err != nil { return nil, statusNone, err } return &header, blockStatus(statusByte), nil }
[ "func", "deserializeBlockRow", "(", "blockRow", "[", "]", "byte", ")", "(", "*", "wire", ".", "BlockHeader", ",", "blockStatus", ",", "error", ")", "{", "buffer", ":=", "bytes", ".", "NewReader", "(", "blockRow", ")", "\n\n", "var", "header", "wire", ".", "BlockHeader", "\n", "err", ":=", "header", ".", "Deserialize", "(", "buffer", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "statusNone", ",", "err", "\n", "}", "\n\n", "statusByte", ",", "err", ":=", "buffer", ".", "ReadByte", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "statusNone", ",", "err", "\n", "}", "\n\n", "return", "&", "header", ",", "blockStatus", "(", "statusByte", ")", ",", "nil", "\n", "}" ]
// deserializeBlockRow parses a value in the block index bucket into a block // header and block status bitfield.
[ "deserializeBlockRow", "parses", "a", "value", "in", "the", "block", "index", "bucket", "into", "a", "block", "header", "and", "block", "status", "bitfield", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1264-L1279
train
btcsuite/btcd
blockchain/chainio.go
dbFetchHeaderByHash
func dbFetchHeaderByHash(dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHeader, error) { headerBytes, err := dbTx.FetchBlockHeader(hash) if err != nil { return nil, err } var header wire.BlockHeader err = header.Deserialize(bytes.NewReader(headerBytes)) if err != nil { return nil, err } return &header, nil }
go
func dbFetchHeaderByHash(dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHeader, error) { headerBytes, err := dbTx.FetchBlockHeader(hash) if err != nil { return nil, err } var header wire.BlockHeader err = header.Deserialize(bytes.NewReader(headerBytes)) if err != nil { return nil, err } return &header, nil }
[ "func", "dbFetchHeaderByHash", "(", "dbTx", "database", ".", "Tx", ",", "hash", "*", "chainhash", ".", "Hash", ")", "(", "*", "wire", ".", "BlockHeader", ",", "error", ")", "{", "headerBytes", ",", "err", ":=", "dbTx", ".", "FetchBlockHeader", "(", "hash", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "var", "header", "wire", ".", "BlockHeader", "\n", "err", "=", "header", ".", "Deserialize", "(", "bytes", ".", "NewReader", "(", "headerBytes", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "return", "&", "header", ",", "nil", "\n", "}" ]
// dbFetchHeaderByHash uses an existing database transaction to retrieve the // block header for the provided hash.
[ "dbFetchHeaderByHash", "uses", "an", "existing", "database", "transaction", "to", "retrieve", "the", "block", "header", "for", "the", "provided", "hash", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1283-L1296
train
btcsuite/btcd
blockchain/chainio.go
dbFetchHeaderByHeight
func dbFetchHeaderByHeight(dbTx database.Tx, height int32) (*wire.BlockHeader, error) { hash, err := dbFetchHashByHeight(dbTx, height) if err != nil { return nil, err } return dbFetchHeaderByHash(dbTx, hash) }
go
func dbFetchHeaderByHeight(dbTx database.Tx, height int32) (*wire.BlockHeader, error) { hash, err := dbFetchHashByHeight(dbTx, height) if err != nil { return nil, err } return dbFetchHeaderByHash(dbTx, hash) }
[ "func", "dbFetchHeaderByHeight", "(", "dbTx", "database", ".", "Tx", ",", "height", "int32", ")", "(", "*", "wire", ".", "BlockHeader", ",", "error", ")", "{", "hash", ",", "err", ":=", "dbFetchHashByHeight", "(", "dbTx", ",", "height", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "return", "dbFetchHeaderByHash", "(", "dbTx", ",", "hash", ")", "\n", "}" ]
// dbFetchHeaderByHeight uses an existing database transaction to retrieve the // block header for the provided height.
[ "dbFetchHeaderByHeight", "uses", "an", "existing", "database", "transaction", "to", "retrieve", "the", "block", "header", "for", "the", "provided", "height", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1300-L1307
train
btcsuite/btcd
blockchain/chainio.go
dbFetchBlockByNode
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*btcutil.Block, error) { // Load the raw block bytes from the database. blockBytes, err := dbTx.FetchBlock(&node.hash) if err != nil { return nil, err } // Create the encapsulated block and set the height appropriately. block, err := btcutil.NewBlockFromBytes(blockBytes) if err != nil { return nil, err } block.SetHeight(node.height) return block, nil }
go
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*btcutil.Block, error) { // Load the raw block bytes from the database. blockBytes, err := dbTx.FetchBlock(&node.hash) if err != nil { return nil, err } // Create the encapsulated block and set the height appropriately. block, err := btcutil.NewBlockFromBytes(blockBytes) if err != nil { return nil, err } block.SetHeight(node.height) return block, nil }
[ "func", "dbFetchBlockByNode", "(", "dbTx", "database", ".", "Tx", ",", "node", "*", "blockNode", ")", "(", "*", "btcutil", ".", "Block", ",", "error", ")", "{", "// Load the raw block bytes from the database.", "blockBytes", ",", "err", ":=", "dbTx", ".", "FetchBlock", "(", "&", "node", ".", "hash", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "// Create the encapsulated block and set the height appropriately.", "block", ",", "err", ":=", "btcutil", ".", "NewBlockFromBytes", "(", "blockBytes", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "block", ".", "SetHeight", "(", "node", ".", "height", ")", "\n\n", "return", "block", ",", "nil", "\n", "}" ]
// dbFetchBlockByNode uses an existing database transaction to retrieve the // raw block for the provided node, deserialize it, and return a btcutil.Block // with the height set.
[ "dbFetchBlockByNode", "uses", "an", "existing", "database", "transaction", "to", "retrieve", "the", "raw", "block", "for", "the", "provided", "node", "deserialize", "it", "and", "return", "a", "btcutil", ".", "Block", "with", "the", "height", "set", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1312-L1327
train
btcsuite/btcd
blockchain/chainio.go
dbStoreBlockNode
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error { // Serialize block data to be stored. w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1)) header := node.Header() err := header.Serialize(w) if err != nil { return err } err = w.WriteByte(byte(node.status)) if err != nil { return err } value := w.Bytes() // Write block header data to block index bucket. blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName) key := blockIndexKey(&node.hash, uint32(node.height)) return blockIndexBucket.Put(key, value) }
go
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error { // Serialize block data to be stored. w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1)) header := node.Header() err := header.Serialize(w) if err != nil { return err } err = w.WriteByte(byte(node.status)) if err != nil { return err } value := w.Bytes() // Write block header data to block index bucket. blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName) key := blockIndexKey(&node.hash, uint32(node.height)) return blockIndexBucket.Put(key, value) }
[ "func", "dbStoreBlockNode", "(", "dbTx", "database", ".", "Tx", ",", "node", "*", "blockNode", ")", "error", "{", "// Serialize block data to be stored.", "w", ":=", "bytes", ".", "NewBuffer", "(", "make", "(", "[", "]", "byte", ",", "0", ",", "blockHdrSize", "+", "1", ")", ")", "\n", "header", ":=", "node", ".", "Header", "(", ")", "\n", "err", ":=", "header", ".", "Serialize", "(", "w", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "err", "=", "w", ".", "WriteByte", "(", "byte", "(", "node", ".", "status", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "value", ":=", "w", ".", "Bytes", "(", ")", "\n\n", "// Write block header data to block index bucket.", "blockIndexBucket", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "blockIndexBucketName", ")", "\n", "key", ":=", "blockIndexKey", "(", "&", "node", ".", "hash", ",", "uint32", "(", "node", ".", "height", ")", ")", "\n", "return", "blockIndexBucket", ".", "Put", "(", "key", ",", "value", ")", "\n", "}" ]
// dbStoreBlockNode stores the block header and validation status to the block // index bucket. This overwrites the current entry if there exists one.
[ "dbStoreBlockNode", "stores", "the", "block", "header", "and", "validation", "status", "to", "the", "block", "index", "bucket", ".", "This", "overwrites", "the", "current", "entry", "if", "there", "exists", "one", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1331-L1349
train
btcsuite/btcd
blockchain/chainio.go
dbStoreBlock
func dbStoreBlock(dbTx database.Tx, block *btcutil.Block) error { hasBlock, err := dbTx.HasBlock(block.Hash()) if err != nil { return err } if hasBlock { return nil } return dbTx.StoreBlock(block) }
go
func dbStoreBlock(dbTx database.Tx, block *btcutil.Block) error { hasBlock, err := dbTx.HasBlock(block.Hash()) if err != nil { return err } if hasBlock { return nil } return dbTx.StoreBlock(block) }
[ "func", "dbStoreBlock", "(", "dbTx", "database", ".", "Tx", ",", "block", "*", "btcutil", ".", "Block", ")", "error", "{", "hasBlock", ",", "err", ":=", "dbTx", ".", "HasBlock", "(", "block", ".", "Hash", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "hasBlock", "{", "return", "nil", "\n", "}", "\n", "return", "dbTx", ".", "StoreBlock", "(", "block", ")", "\n", "}" ]
// dbStoreBlock stores the provided block in the database if it is not already // there. The full block data is written to ffldb.
[ "dbStoreBlock", "stores", "the", "provided", "block", "in", "the", "database", "if", "it", "is", "not", "already", "there", ".", "The", "full", "block", "data", "is", "written", "to", "ffldb", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1353-L1362
train
btcsuite/btcd
blockchain/chainio.go
blockIndexKey
func blockIndexKey(blockHash *chainhash.Hash, blockHeight uint32) []byte { indexKey := make([]byte, chainhash.HashSize+4) binary.BigEndian.PutUint32(indexKey[0:4], blockHeight) copy(indexKey[4:chainhash.HashSize+4], blockHash[:]) return indexKey }
go
func blockIndexKey(blockHash *chainhash.Hash, blockHeight uint32) []byte { indexKey := make([]byte, chainhash.HashSize+4) binary.BigEndian.PutUint32(indexKey[0:4], blockHeight) copy(indexKey[4:chainhash.HashSize+4], blockHash[:]) return indexKey }
[ "func", "blockIndexKey", "(", "blockHash", "*", "chainhash", ".", "Hash", ",", "blockHeight", "uint32", ")", "[", "]", "byte", "{", "indexKey", ":=", "make", "(", "[", "]", "byte", ",", "chainhash", ".", "HashSize", "+", "4", ")", "\n", "binary", ".", "BigEndian", ".", "PutUint32", "(", "indexKey", "[", "0", ":", "4", "]", ",", "blockHeight", ")", "\n", "copy", "(", "indexKey", "[", "4", ":", "chainhash", ".", "HashSize", "+", "4", "]", ",", "blockHash", "[", ":", "]", ")", "\n", "return", "indexKey", "\n", "}" ]
// blockIndexKey generates the binary key for an entry in the block index // bucket. The key is composed of the block height encoded as a big-endian // 32-bit unsigned int followed by the 32 byte block hash.
[ "blockIndexKey", "generates", "the", "binary", "key", "for", "an", "entry", "in", "the", "block", "index", "bucket", ".", "The", "key", "is", "composed", "of", "the", "block", "height", "encoded", "as", "a", "big", "-", "endian", "32", "-", "bit", "unsigned", "int", "followed", "by", "the", "32", "byte", "block", "hash", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1367-L1372
train
btcsuite/btcd
blockchain/chainio.go
BlockByHeight
func (b *BlockChain) BlockByHeight(blockHeight int32) (*btcutil.Block, error) { // Lookup the block height in the best chain. node := b.bestChain.NodeByHeight(blockHeight) if node == nil { str := fmt.Sprintf("no block at height %d exists", blockHeight) return nil, errNotInMainChain(str) } // Load the block from the database and return it. var block *btcutil.Block err := b.db.View(func(dbTx database.Tx) error { var err error block, err = dbFetchBlockByNode(dbTx, node) return err }) return block, err }
go
func (b *BlockChain) BlockByHeight(blockHeight int32) (*btcutil.Block, error) { // Lookup the block height in the best chain. node := b.bestChain.NodeByHeight(blockHeight) if node == nil { str := fmt.Sprintf("no block at height %d exists", blockHeight) return nil, errNotInMainChain(str) } // Load the block from the database and return it. var block *btcutil.Block err := b.db.View(func(dbTx database.Tx) error { var err error block, err = dbFetchBlockByNode(dbTx, node) return err }) return block, err }
[ "func", "(", "b", "*", "BlockChain", ")", "BlockByHeight", "(", "blockHeight", "int32", ")", "(", "*", "btcutil", ".", "Block", ",", "error", ")", "{", "// Lookup the block height in the best chain.", "node", ":=", "b", ".", "bestChain", ".", "NodeByHeight", "(", "blockHeight", ")", "\n", "if", "node", "==", "nil", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "blockHeight", ")", "\n", "return", "nil", ",", "errNotInMainChain", "(", "str", ")", "\n", "}", "\n\n", "// Load the block from the database and return it.", "var", "block", "*", "btcutil", ".", "Block", "\n", "err", ":=", "b", ".", "db", ".", "View", "(", "func", "(", "dbTx", "database", ".", "Tx", ")", "error", "{", "var", "err", "error", "\n", "block", ",", "err", "=", "dbFetchBlockByNode", "(", "dbTx", ",", "node", ")", "\n", "return", "err", "\n", "}", ")", "\n", "return", "block", ",", "err", "\n", "}" ]
// BlockByHeight returns the block at the given height in the main chain. // // This function is safe for concurrent access.
[ "BlockByHeight", "returns", "the", "block", "at", "the", "given", "height", "in", "the", "main", "chain", ".", "This", "function", "is", "safe", "for", "concurrent", "access", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1377-L1393
train
btcsuite/btcd
blockchain/chainio.go
BlockByHash
func (b *BlockChain) BlockByHash(hash *chainhash.Hash) (*btcutil.Block, error) { // Lookup the block hash in block index and ensure it is in the best // chain. node := b.index.LookupNode(hash) if node == nil || !b.bestChain.Contains(node) { str := fmt.Sprintf("block %s is not in the main chain", hash) return nil, errNotInMainChain(str) } // Load the block from the database and return it. var block *btcutil.Block err := b.db.View(func(dbTx database.Tx) error { var err error block, err = dbFetchBlockByNode(dbTx, node) return err }) return block, err }
go
func (b *BlockChain) BlockByHash(hash *chainhash.Hash) (*btcutil.Block, error) { // Lookup the block hash in block index and ensure it is in the best // chain. node := b.index.LookupNode(hash) if node == nil || !b.bestChain.Contains(node) { str := fmt.Sprintf("block %s is not in the main chain", hash) return nil, errNotInMainChain(str) } // Load the block from the database and return it. var block *btcutil.Block err := b.db.View(func(dbTx database.Tx) error { var err error block, err = dbFetchBlockByNode(dbTx, node) return err }) return block, err }
[ "func", "(", "b", "*", "BlockChain", ")", "BlockByHash", "(", "hash", "*", "chainhash", ".", "Hash", ")", "(", "*", "btcutil", ".", "Block", ",", "error", ")", "{", "// Lookup the block hash in block index and ensure it is in the best", "// chain.", "node", ":=", "b", ".", "index", ".", "LookupNode", "(", "hash", ")", "\n", "if", "node", "==", "nil", "||", "!", "b", ".", "bestChain", ".", "Contains", "(", "node", ")", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "hash", ")", "\n", "return", "nil", ",", "errNotInMainChain", "(", "str", ")", "\n", "}", "\n\n", "// Load the block from the database and return it.", "var", "block", "*", "btcutil", ".", "Block", "\n", "err", ":=", "b", ".", "db", ".", "View", "(", "func", "(", "dbTx", "database", ".", "Tx", ")", "error", "{", "var", "err", "error", "\n", "block", ",", "err", "=", "dbFetchBlockByNode", "(", "dbTx", ",", "node", ")", "\n", "return", "err", "\n", "}", ")", "\n", "return", "block", ",", "err", "\n", "}" ]
// BlockByHash returns the block from the main chain with the given hash with // the appropriate chain height set. // // This function is safe for concurrent access.
[ "BlockByHash", "returns", "the", "block", "from", "the", "main", "chain", "with", "the", "given", "hash", "with", "the", "appropriate", "chain", "height", "set", ".", "This", "function", "is", "safe", "for", "concurrent", "access", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/chainio.go#L1399-L1416
train
btcsuite/btcd
rpcclient/extensions.go
Receive
func (r FutureDebugLevelResult) Receive() (string, error) { res, err := receiveFuture(r) if err != nil { return "", err } // Unmashal the result as a string. var result string err = json.Unmarshal(res, &result) if err != nil { return "", err } return result, nil }
go
func (r FutureDebugLevelResult) Receive() (string, error) { res, err := receiveFuture(r) if err != nil { return "", err } // Unmashal the result as a string. var result string err = json.Unmarshal(res, &result) if err != nil { return "", err } return result, nil }
[ "func", "(", "r", "FutureDebugLevelResult", ")", "Receive", "(", ")", "(", "string", ",", "error", ")", "{", "res", ",", "err", ":=", "receiveFuture", "(", "r", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "err", "\n", "}", "\n\n", "// Unmashal the result as a string.", "var", "result", "string", "\n", "err", "=", "json", ".", "Unmarshal", "(", "res", ",", "&", "result", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"", "\"", ",", "err", "\n", "}", "\n", "return", "result", ",", "nil", "\n", "}" ]
// Receive waits for the response promised by the future and returns the result // of setting the debug logging level to the passed level specification or the // list of of the available subsystems for the special keyword 'show'.
[ "Receive", "waits", "for", "the", "response", "promised", "by", "the", "future", "and", "returns", "the", "result", "of", "setting", "the", "debug", "logging", "level", "to", "the", "passed", "level", "specification", "or", "the", "list", "of", "of", "the", "available", "subsystems", "for", "the", "special", "keyword", "show", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/rpcclient/extensions.go#L28-L41
train
btcsuite/btcd
rpcclient/extensions.go
Receive
func (r FutureGetCurrentNetResult) Receive() (wire.BitcoinNet, error) { res, err := receiveFuture(r) if err != nil { return 0, err } // Unmarshal result as an int64. var net int64 err = json.Unmarshal(res, &net) if err != nil { return 0, err } return wire.BitcoinNet(net), nil }
go
func (r FutureGetCurrentNetResult) Receive() (wire.BitcoinNet, error) { res, err := receiveFuture(r) if err != nil { return 0, err } // Unmarshal result as an int64. var net int64 err = json.Unmarshal(res, &net) if err != nil { return 0, err } return wire.BitcoinNet(net), nil }
[ "func", "(", "r", "FutureGetCurrentNetResult", ")", "Receive", "(", ")", "(", "wire", ".", "BitcoinNet", ",", "error", ")", "{", "res", ",", "err", ":=", "receiveFuture", "(", "r", ")", "\n", "if", "err", "!=", "nil", "{", "return", "0", ",", "err", "\n", "}", "\n\n", "// Unmarshal result as an int64.", "var", "net", "int64", "\n", "err", "=", "json", ".", "Unmarshal", "(", "res", ",", "&", "net", ")", "\n", "if", "err", "!=", "nil", "{", "return", "0", ",", "err", "\n", "}", "\n\n", "return", "wire", ".", "BitcoinNet", "(", "net", ")", ",", "nil", "\n", "}" ]
// Receive waits for the response promised by the future and returns the network // the server is running on.
[ "Receive", "waits", "for", "the", "response", "promised", "by", "the", "future", "and", "returns", "the", "network", "the", "server", "is", "running", "on", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/rpcclient/extensions.go#L203-L217
train
btcsuite/btcd
rpcclient/extensions.go
Receive
func (r FutureExportWatchingWalletResult) Receive() ([]byte, []byte, error) { res, err := receiveFuture(r) if err != nil { return nil, nil, err } // Unmarshal result as a JSON object. var obj map[string]interface{} err = json.Unmarshal(res, &obj) if err != nil { return nil, nil, err } // Check for the wallet and tx string fields in the object. base64Wallet, ok := obj["wallet"].(string) if !ok { return nil, nil, fmt.Errorf("unexpected response type for "+ "exportwatchingwallet 'wallet' field: %T\n", obj["wallet"]) } base64TxStore, ok := obj["tx"].(string) if !ok { return nil, nil, fmt.Errorf("unexpected response type for "+ "exportwatchingwallet 'tx' field: %T\n", obj["tx"]) } walletBytes, err := base64.StdEncoding.DecodeString(base64Wallet) if err != nil { return nil, nil, err } txStoreBytes, err := base64.StdEncoding.DecodeString(base64TxStore) if err != nil { return nil, nil, err } return walletBytes, txStoreBytes, nil }
go
func (r FutureExportWatchingWalletResult) Receive() ([]byte, []byte, error) { res, err := receiveFuture(r) if err != nil { return nil, nil, err } // Unmarshal result as a JSON object. var obj map[string]interface{} err = json.Unmarshal(res, &obj) if err != nil { return nil, nil, err } // Check for the wallet and tx string fields in the object. base64Wallet, ok := obj["wallet"].(string) if !ok { return nil, nil, fmt.Errorf("unexpected response type for "+ "exportwatchingwallet 'wallet' field: %T\n", obj["wallet"]) } base64TxStore, ok := obj["tx"].(string) if !ok { return nil, nil, fmt.Errorf("unexpected response type for "+ "exportwatchingwallet 'tx' field: %T\n", obj["tx"]) } walletBytes, err := base64.StdEncoding.DecodeString(base64Wallet) if err != nil { return nil, nil, err } txStoreBytes, err := base64.StdEncoding.DecodeString(base64TxStore) if err != nil { return nil, nil, err } return walletBytes, txStoreBytes, nil }
[ "func", "(", "r", "FutureExportWatchingWalletResult", ")", "Receive", "(", ")", "(", "[", "]", "byte", ",", "[", "]", "byte", ",", "error", ")", "{", "res", ",", "err", ":=", "receiveFuture", "(", "r", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "nil", ",", "err", "\n", "}", "\n\n", "// Unmarshal result as a JSON object.", "var", "obj", "map", "[", "string", "]", "interface", "{", "}", "\n", "err", "=", "json", ".", "Unmarshal", "(", "res", ",", "&", "obj", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "nil", ",", "err", "\n", "}", "\n\n", "// Check for the wallet and tx string fields in the object.", "base64Wallet", ",", "ok", ":=", "obj", "[", "\"", "\"", "]", ".", "(", "string", ")", "\n", "if", "!", "ok", "{", "return", "nil", ",", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", "+", "\"", "\\n", "\"", ",", "obj", "[", "\"", "\"", "]", ")", "\n", "}", "\n", "base64TxStore", ",", "ok", ":=", "obj", "[", "\"", "\"", "]", ".", "(", "string", ")", "\n", "if", "!", "ok", "{", "return", "nil", ",", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", "+", "\"", "\\n", "\"", ",", "obj", "[", "\"", "\"", "]", ")", "\n", "}", "\n\n", "walletBytes", ",", "err", ":=", "base64", ".", "StdEncoding", ".", "DecodeString", "(", "base64Wallet", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "nil", ",", "err", "\n", "}", "\n\n", "txStoreBytes", ",", "err", ":=", "base64", ".", "StdEncoding", ".", "DecodeString", "(", "base64TxStore", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "nil", ",", "err", "\n", "}", "\n\n", "return", "walletBytes", ",", "txStoreBytes", ",", "nil", "\n\n", "}" ]
// Receive waits for the response promised by the future and returns the // exported wallet.
[ "Receive", "waits", "for", "the", "response", "promised", "by", "the", "future", "and", "returns", "the", "exported", "wallet", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/rpcclient/extensions.go#L314-L353
train
btcsuite/btcd
rpcclient/extensions.go
Receive
func (r FutureSessionResult) Receive() (*btcjson.SessionResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a session result object. var session btcjson.SessionResult err = json.Unmarshal(res, &session) if err != nil { return nil, err } return &session, nil }
go
func (r FutureSessionResult) Receive() (*btcjson.SessionResult, error) { res, err := receiveFuture(r) if err != nil { return nil, err } // Unmarshal result as a session result object. var session btcjson.SessionResult err = json.Unmarshal(res, &session) if err != nil { return nil, err } return &session, nil }
[ "func", "(", "r", "FutureSessionResult", ")", "Receive", "(", ")", "(", "*", "btcjson", ".", "SessionResult", ",", "error", ")", "{", "res", ",", "err", ":=", "receiveFuture", "(", "r", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "// Unmarshal result as a session result object.", "var", "session", "btcjson", ".", "SessionResult", "\n", "err", "=", "json", ".", "Unmarshal", "(", "res", ",", "&", "session", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "return", "&", "session", ",", "nil", "\n", "}" ]
// Receive waits for the response promised by the future and returns the // session result.
[ "Receive", "waits", "for", "the", "response", "promised", "by", "the", "future", "and", "returns", "the", "session", "result", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/rpcclient/extensions.go#L383-L397
train
btcsuite/btcd
netsync/manager.go
resetHeaderState
func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) { sm.headersFirstMode = false sm.headerList.Init() sm.startHeader = nil // When there is a next checkpoint, add an entry for the latest known // block into the header pool. This allows the next downloaded header // to prove it links to the chain properly. if sm.nextCheckpoint != nil { node := headerNode{height: newestHeight, hash: newestHash} sm.headerList.PushBack(&node) } }
go
func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) { sm.headersFirstMode = false sm.headerList.Init() sm.startHeader = nil // When there is a next checkpoint, add an entry for the latest known // block into the header pool. This allows the next downloaded header // to prove it links to the chain properly. if sm.nextCheckpoint != nil { node := headerNode{height: newestHeight, hash: newestHash} sm.headerList.PushBack(&node) } }
[ "func", "(", "sm", "*", "SyncManager", ")", "resetHeaderState", "(", "newestHash", "*", "chainhash", ".", "Hash", ",", "newestHeight", "int32", ")", "{", "sm", ".", "headersFirstMode", "=", "false", "\n", "sm", ".", "headerList", ".", "Init", "(", ")", "\n", "sm", ".", "startHeader", "=", "nil", "\n\n", "// When there is a next checkpoint, add an entry for the latest known", "// block into the header pool. This allows the next downloaded header", "// to prove it links to the chain properly.", "if", "sm", ".", "nextCheckpoint", "!=", "nil", "{", "node", ":=", "headerNode", "{", "height", ":", "newestHeight", ",", "hash", ":", "newestHash", "}", "\n", "sm", ".", "headerList", ".", "PushBack", "(", "&", "node", ")", "\n", "}", "\n", "}" ]
// resetHeaderState sets the headers-first mode state to values appropriate for // syncing from a new peer.
[ "resetHeaderState", "sets", "the", "headers", "-", "first", "mode", "state", "to", "values", "appropriate", "for", "syncing", "from", "a", "new", "peer", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L187-L199
train
btcsuite/btcd
netsync/manager.go
findNextHeaderCheckpoint
func (sm *SyncManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint { checkpoints := sm.chain.Checkpoints() if len(checkpoints) == 0 { return nil } // There is no next checkpoint if the height is already after the final // checkpoint. finalCheckpoint := &checkpoints[len(checkpoints)-1] if height >= finalCheckpoint.Height { return nil } // Find the next checkpoint. nextCheckpoint := finalCheckpoint for i := len(checkpoints) - 2; i >= 0; i-- { if height >= checkpoints[i].Height { break } nextCheckpoint = &checkpoints[i] } return nextCheckpoint }
go
func (sm *SyncManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint { checkpoints := sm.chain.Checkpoints() if len(checkpoints) == 0 { return nil } // There is no next checkpoint if the height is already after the final // checkpoint. finalCheckpoint := &checkpoints[len(checkpoints)-1] if height >= finalCheckpoint.Height { return nil } // Find the next checkpoint. nextCheckpoint := finalCheckpoint for i := len(checkpoints) - 2; i >= 0; i-- { if height >= checkpoints[i].Height { break } nextCheckpoint = &checkpoints[i] } return nextCheckpoint }
[ "func", "(", "sm", "*", "SyncManager", ")", "findNextHeaderCheckpoint", "(", "height", "int32", ")", "*", "chaincfg", ".", "Checkpoint", "{", "checkpoints", ":=", "sm", ".", "chain", ".", "Checkpoints", "(", ")", "\n", "if", "len", "(", "checkpoints", ")", "==", "0", "{", "return", "nil", "\n", "}", "\n\n", "// There is no next checkpoint if the height is already after the final", "// checkpoint.", "finalCheckpoint", ":=", "&", "checkpoints", "[", "len", "(", "checkpoints", ")", "-", "1", "]", "\n", "if", "height", ">=", "finalCheckpoint", ".", "Height", "{", "return", "nil", "\n", "}", "\n\n", "// Find the next checkpoint.", "nextCheckpoint", ":=", "finalCheckpoint", "\n", "for", "i", ":=", "len", "(", "checkpoints", ")", "-", "2", ";", "i", ">=", "0", ";", "i", "--", "{", "if", "height", ">=", "checkpoints", "[", "i", "]", ".", "Height", "{", "break", "\n", "}", "\n", "nextCheckpoint", "=", "&", "checkpoints", "[", "i", "]", "\n", "}", "\n", "return", "nextCheckpoint", "\n", "}" ]
// findNextHeaderCheckpoint returns the next checkpoint after the passed height. // It returns nil when there is not one either because the height is already // later than the final checkpoint or some other reason such as disabled // checkpoints.
[ "findNextHeaderCheckpoint", "returns", "the", "next", "checkpoint", "after", "the", "passed", "height", ".", "It", "returns", "nil", "when", "there", "is", "not", "one", "either", "because", "the", "height", "is", "already", "later", "than", "the", "final", "checkpoint", "or", "some", "other", "reason", "such", "as", "disabled", "checkpoints", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L205-L227
train
btcsuite/btcd
netsync/manager.go
handleStallSample
func (sm *SyncManager) handleStallSample() { if atomic.LoadInt32(&sm.shutdown) != 0 { return } // If we don't have an active sync peer, exit early. if sm.syncPeer == nil { return } // If the stall timeout has not elapsed, exit early. if time.Since(sm.lastProgressTime) <= maxStallDuration { return } // Check to see that the peer's sync state exists. state, exists := sm.peerStates[sm.syncPeer] if !exists { return } sm.clearRequestedState(state) disconnectSyncPeer := sm.shouldDCStalledSyncPeer() sm.updateSyncPeer(disconnectSyncPeer) }
go
func (sm *SyncManager) handleStallSample() { if atomic.LoadInt32(&sm.shutdown) != 0 { return } // If we don't have an active sync peer, exit early. if sm.syncPeer == nil { return } // If the stall timeout has not elapsed, exit early. if time.Since(sm.lastProgressTime) <= maxStallDuration { return } // Check to see that the peer's sync state exists. state, exists := sm.peerStates[sm.syncPeer] if !exists { return } sm.clearRequestedState(state) disconnectSyncPeer := sm.shouldDCStalledSyncPeer() sm.updateSyncPeer(disconnectSyncPeer) }
[ "func", "(", "sm", "*", "SyncManager", ")", "handleStallSample", "(", ")", "{", "if", "atomic", ".", "LoadInt32", "(", "&", "sm", ".", "shutdown", ")", "!=", "0", "{", "return", "\n", "}", "\n\n", "// If we don't have an active sync peer, exit early.", "if", "sm", ".", "syncPeer", "==", "nil", "{", "return", "\n", "}", "\n\n", "// If the stall timeout has not elapsed, exit early.", "if", "time", ".", "Since", "(", "sm", ".", "lastProgressTime", ")", "<=", "maxStallDuration", "{", "return", "\n", "}", "\n\n", "// Check to see that the peer's sync state exists.", "state", ",", "exists", ":=", "sm", ".", "peerStates", "[", "sm", ".", "syncPeer", "]", "\n", "if", "!", "exists", "{", "return", "\n", "}", "\n\n", "sm", ".", "clearRequestedState", "(", "state", ")", "\n\n", "disconnectSyncPeer", ":=", "sm", ".", "shouldDCStalledSyncPeer", "(", ")", "\n", "sm", ".", "updateSyncPeer", "(", "disconnectSyncPeer", ")", "\n", "}" ]
// handleStallSample will switch to a new sync peer if the current one has // stalled. This is detected when by comparing the last progress timestamp with // the current time, and disconnecting the peer if we stalled before reaching // their highest advertised block.
[ "handleStallSample", "will", "switch", "to", "a", "new", "sync", "peer", "if", "the", "current", "one", "has", "stalled", ".", "This", "is", "detected", "when", "by", "comparing", "the", "last", "progress", "timestamp", "with", "the", "current", "time", "and", "disconnecting", "the", "peer", "if", "we", "stalled", "before", "reaching", "their", "highest", "advertised", "block", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L423-L448
train
btcsuite/btcd
netsync/manager.go
shouldDCStalledSyncPeer
func (sm *SyncManager) shouldDCStalledSyncPeer() bool { lastBlock := sm.syncPeer.LastBlock() startHeight := sm.syncPeer.StartingHeight() var peerHeight int32 if lastBlock > startHeight { peerHeight = lastBlock } else { peerHeight = startHeight } // If we've stalled out yet the sync peer reports having more blocks for // us we will disconnect them. This allows us at tip to not disconnect // peers when we are equal or they temporarily lag behind us. best := sm.chain.BestSnapshot() return peerHeight > best.Height }
go
func (sm *SyncManager) shouldDCStalledSyncPeer() bool { lastBlock := sm.syncPeer.LastBlock() startHeight := sm.syncPeer.StartingHeight() var peerHeight int32 if lastBlock > startHeight { peerHeight = lastBlock } else { peerHeight = startHeight } // If we've stalled out yet the sync peer reports having more blocks for // us we will disconnect them. This allows us at tip to not disconnect // peers when we are equal or they temporarily lag behind us. best := sm.chain.BestSnapshot() return peerHeight > best.Height }
[ "func", "(", "sm", "*", "SyncManager", ")", "shouldDCStalledSyncPeer", "(", ")", "bool", "{", "lastBlock", ":=", "sm", ".", "syncPeer", ".", "LastBlock", "(", ")", "\n", "startHeight", ":=", "sm", ".", "syncPeer", ".", "StartingHeight", "(", ")", "\n\n", "var", "peerHeight", "int32", "\n", "if", "lastBlock", ">", "startHeight", "{", "peerHeight", "=", "lastBlock", "\n", "}", "else", "{", "peerHeight", "=", "startHeight", "\n", "}", "\n\n", "// If we've stalled out yet the sync peer reports having more blocks for", "// us we will disconnect them. This allows us at tip to not disconnect", "// peers when we are equal or they temporarily lag behind us.", "best", ":=", "sm", ".", "chain", ".", "BestSnapshot", "(", ")", "\n", "return", "peerHeight", ">", "best", ".", "Height", "\n", "}" ]
// shouldDCStalledSyncPeer determines whether or not we should disconnect a // stalled sync peer. If the peer has stalled and its reported height is greater // than our own best height, we will disconnect it. Otherwise, we will keep the // peer connected in case we are already at tip.
[ "shouldDCStalledSyncPeer", "determines", "whether", "or", "not", "we", "should", "disconnect", "a", "stalled", "sync", "peer", ".", "If", "the", "peer", "has", "stalled", "and", "its", "reported", "height", "is", "greater", "than", "our", "own", "best", "height", "we", "will", "disconnect", "it", ".", "Otherwise", "we", "will", "keep", "the", "peer", "connected", "in", "case", "we", "are", "already", "at", "tip", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L454-L470
train
btcsuite/btcd
netsync/manager.go
clearRequestedState
func (sm *SyncManager) clearRequestedState(state *peerSyncState) { // Remove requested transactions from the global map so that they will // be fetched from elsewhere next time we get an inv. for txHash := range state.requestedTxns { delete(sm.requestedTxns, txHash) } // Remove requested blocks from the global map so that they will be // fetched from elsewhere next time we get an inv. // TODO: we could possibly here check which peers have these blocks // and request them now to speed things up a little. for blockHash := range state.requestedBlocks { delete(sm.requestedBlocks, blockHash) } }
go
func (sm *SyncManager) clearRequestedState(state *peerSyncState) { // Remove requested transactions from the global map so that they will // be fetched from elsewhere next time we get an inv. for txHash := range state.requestedTxns { delete(sm.requestedTxns, txHash) } // Remove requested blocks from the global map so that they will be // fetched from elsewhere next time we get an inv. // TODO: we could possibly here check which peers have these blocks // and request them now to speed things up a little. for blockHash := range state.requestedBlocks { delete(sm.requestedBlocks, blockHash) } }
[ "func", "(", "sm", "*", "SyncManager", ")", "clearRequestedState", "(", "state", "*", "peerSyncState", ")", "{", "// Remove requested transactions from the global map so that they will", "// be fetched from elsewhere next time we get an inv.", "for", "txHash", ":=", "range", "state", ".", "requestedTxns", "{", "delete", "(", "sm", ".", "requestedTxns", ",", "txHash", ")", "\n", "}", "\n\n", "// Remove requested blocks from the global map so that they will be", "// fetched from elsewhere next time we get an inv.", "// TODO: we could possibly here check which peers have these blocks", "// and request them now to speed things up a little.", "for", "blockHash", ":=", "range", "state", ".", "requestedBlocks", "{", "delete", "(", "sm", ".", "requestedBlocks", ",", "blockHash", ")", "\n", "}", "\n", "}" ]
// clearRequestedState wipes all expected transactions and blocks from the sync // manager's requested maps that were requested under a peer's sync state, This // allows them to be rerequested by a subsequent sync peer.
[ "clearRequestedState", "wipes", "all", "expected", "transactions", "and", "blocks", "from", "the", "sync", "manager", "s", "requested", "maps", "that", "were", "requested", "under", "a", "peer", "s", "sync", "state", "This", "allows", "them", "to", "be", "rerequested", "by", "a", "subsequent", "sync", "peer", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L500-L514
train
btcsuite/btcd
netsync/manager.go
updateSyncPeer
func (sm *SyncManager) updateSyncPeer(dcSyncPeer bool) { log.Debugf("Updating sync peer, no progress for: %v", time.Since(sm.lastProgressTime)) // First, disconnect the current sync peer if requested. if dcSyncPeer { sm.syncPeer.Disconnect() } // Reset any header state before we choose our next active sync peer. if sm.headersFirstMode { best := sm.chain.BestSnapshot() sm.resetHeaderState(&best.Hash, best.Height) } sm.syncPeer = nil sm.startSync() }
go
func (sm *SyncManager) updateSyncPeer(dcSyncPeer bool) { log.Debugf("Updating sync peer, no progress for: %v", time.Since(sm.lastProgressTime)) // First, disconnect the current sync peer if requested. if dcSyncPeer { sm.syncPeer.Disconnect() } // Reset any header state before we choose our next active sync peer. if sm.headersFirstMode { best := sm.chain.BestSnapshot() sm.resetHeaderState(&best.Hash, best.Height) } sm.syncPeer = nil sm.startSync() }
[ "func", "(", "sm", "*", "SyncManager", ")", "updateSyncPeer", "(", "dcSyncPeer", "bool", ")", "{", "log", ".", "Debugf", "(", "\"", "\"", ",", "time", ".", "Since", "(", "sm", ".", "lastProgressTime", ")", ")", "\n\n", "// First, disconnect the current sync peer if requested.", "if", "dcSyncPeer", "{", "sm", ".", "syncPeer", ".", "Disconnect", "(", ")", "\n", "}", "\n\n", "// Reset any header state before we choose our next active sync peer.", "if", "sm", ".", "headersFirstMode", "{", "best", ":=", "sm", ".", "chain", ".", "BestSnapshot", "(", ")", "\n", "sm", ".", "resetHeaderState", "(", "&", "best", ".", "Hash", ",", "best", ".", "Height", ")", "\n", "}", "\n\n", "sm", ".", "syncPeer", "=", "nil", "\n", "sm", ".", "startSync", "(", ")", "\n", "}" ]
// updateSyncPeer choose a new sync peer to replace the current one. If // dcSyncPeer is true, this method will also disconnect the current sync peer. // If we are in header first mode, any header state related to prefetching is // also reset in preparation for the next sync peer.
[ "updateSyncPeer", "choose", "a", "new", "sync", "peer", "to", "replace", "the", "current", "one", ".", "If", "dcSyncPeer", "is", "true", "this", "method", "will", "also", "disconnect", "the", "current", "sync", "peer", ".", "If", "we", "are", "in", "header", "first", "mode", "any", "header", "state", "related", "to", "prefetching", "is", "also", "reset", "in", "preparation", "for", "the", "next", "sync", "peer", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L520-L537
train
btcsuite/btcd
netsync/manager.go
handleTxMsg
func (sm *SyncManager) handleTxMsg(tmsg *txMsg) { peer := tmsg.peer state, exists := sm.peerStates[peer] if !exists { log.Warnf("Received tx message from unknown peer %s", peer) return } // NOTE: BitcoinJ, and possibly other wallets, don't follow the spec of // sending an inventory message and allowing the remote peer to decide // whether or not they want to request the transaction via a getdata // message. Unfortunately, the reference implementation permits // unrequested data, so it has allowed wallets that don't follow the // spec to proliferate. While this is not ideal, there is no check here // to disconnect peers for sending unsolicited transactions to provide // interoperability. txHash := tmsg.tx.Hash() // Ignore transactions that we have already rejected. Do not // send a reject message here because if the transaction was already // rejected, the transaction was unsolicited. if _, exists = sm.rejectedTxns[*txHash]; exists { log.Debugf("Ignoring unsolicited previously rejected "+ "transaction %v from %s", txHash, peer) return } // Process the transaction to include validation, insertion in the // memory pool, orphan handling, etc. acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx, true, true, mempool.Tag(peer.ID())) // Remove transaction from request maps. Either the mempool/chain // already knows about it and as such we shouldn't have any more // instances of trying to fetch it, or we failed to insert and thus // we'll retry next time we get an inv. delete(state.requestedTxns, *txHash) delete(sm.requestedTxns, *txHash) if err != nil { // Do not request this transaction again until a new block // has been processed. sm.rejectedTxns[*txHash] = struct{}{} sm.limitMap(sm.rejectedTxns, maxRejectedTxns) // When the error is a rule error, it means the transaction was // simply rejected as opposed to something actually going wrong, // so log it as such. Otherwise, something really did go wrong, // so log it as an actual error. if _, ok := err.(mempool.RuleError); ok { log.Debugf("Rejected transaction %v from %s: %v", txHash, peer, err) } else { log.Errorf("Failed to process transaction %v: %v", txHash, err) } // Convert the error into an appropriate reject message and // send it. code, reason := mempool.ErrToRejectErr(err) peer.PushRejectMsg(wire.CmdTx, code, reason, txHash, false) return } sm.peerNotifier.AnnounceNewTransactions(acceptedTxs) }
go
func (sm *SyncManager) handleTxMsg(tmsg *txMsg) { peer := tmsg.peer state, exists := sm.peerStates[peer] if !exists { log.Warnf("Received tx message from unknown peer %s", peer) return } // NOTE: BitcoinJ, and possibly other wallets, don't follow the spec of // sending an inventory message and allowing the remote peer to decide // whether or not they want to request the transaction via a getdata // message. Unfortunately, the reference implementation permits // unrequested data, so it has allowed wallets that don't follow the // spec to proliferate. While this is not ideal, there is no check here // to disconnect peers for sending unsolicited transactions to provide // interoperability. txHash := tmsg.tx.Hash() // Ignore transactions that we have already rejected. Do not // send a reject message here because if the transaction was already // rejected, the transaction was unsolicited. if _, exists = sm.rejectedTxns[*txHash]; exists { log.Debugf("Ignoring unsolicited previously rejected "+ "transaction %v from %s", txHash, peer) return } // Process the transaction to include validation, insertion in the // memory pool, orphan handling, etc. acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx, true, true, mempool.Tag(peer.ID())) // Remove transaction from request maps. Either the mempool/chain // already knows about it and as such we shouldn't have any more // instances of trying to fetch it, or we failed to insert and thus // we'll retry next time we get an inv. delete(state.requestedTxns, *txHash) delete(sm.requestedTxns, *txHash) if err != nil { // Do not request this transaction again until a new block // has been processed. sm.rejectedTxns[*txHash] = struct{}{} sm.limitMap(sm.rejectedTxns, maxRejectedTxns) // When the error is a rule error, it means the transaction was // simply rejected as opposed to something actually going wrong, // so log it as such. Otherwise, something really did go wrong, // so log it as an actual error. if _, ok := err.(mempool.RuleError); ok { log.Debugf("Rejected transaction %v from %s: %v", txHash, peer, err) } else { log.Errorf("Failed to process transaction %v: %v", txHash, err) } // Convert the error into an appropriate reject message and // send it. code, reason := mempool.ErrToRejectErr(err) peer.PushRejectMsg(wire.CmdTx, code, reason, txHash, false) return } sm.peerNotifier.AnnounceNewTransactions(acceptedTxs) }
[ "func", "(", "sm", "*", "SyncManager", ")", "handleTxMsg", "(", "tmsg", "*", "txMsg", ")", "{", "peer", ":=", "tmsg", ".", "peer", "\n", "state", ",", "exists", ":=", "sm", ".", "peerStates", "[", "peer", "]", "\n", "if", "!", "exists", "{", "log", ".", "Warnf", "(", "\"", "\"", ",", "peer", ")", "\n", "return", "\n", "}", "\n\n", "// NOTE: BitcoinJ, and possibly other wallets, don't follow the spec of", "// sending an inventory message and allowing the remote peer to decide", "// whether or not they want to request the transaction via a getdata", "// message. Unfortunately, the reference implementation permits", "// unrequested data, so it has allowed wallets that don't follow the", "// spec to proliferate. While this is not ideal, there is no check here", "// to disconnect peers for sending unsolicited transactions to provide", "// interoperability.", "txHash", ":=", "tmsg", ".", "tx", ".", "Hash", "(", ")", "\n\n", "// Ignore transactions that we have already rejected. Do not", "// send a reject message here because if the transaction was already", "// rejected, the transaction was unsolicited.", "if", "_", ",", "exists", "=", "sm", ".", "rejectedTxns", "[", "*", "txHash", "]", ";", "exists", "{", "log", ".", "Debugf", "(", "\"", "\"", "+", "\"", "\"", ",", "txHash", ",", "peer", ")", "\n", "return", "\n", "}", "\n\n", "// Process the transaction to include validation, insertion in the", "// memory pool, orphan handling, etc.", "acceptedTxs", ",", "err", ":=", "sm", ".", "txMemPool", ".", "ProcessTransaction", "(", "tmsg", ".", "tx", ",", "true", ",", "true", ",", "mempool", ".", "Tag", "(", "peer", ".", "ID", "(", ")", ")", ")", "\n\n", "// Remove transaction from request maps. Either the mempool/chain", "// already knows about it and as such we shouldn't have any more", "// instances of trying to fetch it, or we failed to insert and thus", "// we'll retry next time we get an inv.", "delete", "(", "state", ".", "requestedTxns", ",", "*", "txHash", ")", "\n", "delete", "(", "sm", ".", "requestedTxns", ",", "*", "txHash", ")", "\n\n", "if", "err", "!=", "nil", "{", "// Do not request this transaction again until a new block", "// has been processed.", "sm", ".", "rejectedTxns", "[", "*", "txHash", "]", "=", "struct", "{", "}", "{", "}", "\n", "sm", ".", "limitMap", "(", "sm", ".", "rejectedTxns", ",", "maxRejectedTxns", ")", "\n\n", "// When the error is a rule error, it means the transaction was", "// simply rejected as opposed to something actually going wrong,", "// so log it as such. Otherwise, something really did go wrong,", "// so log it as an actual error.", "if", "_", ",", "ok", ":=", "err", ".", "(", "mempool", ".", "RuleError", ")", ";", "ok", "{", "log", ".", "Debugf", "(", "\"", "\"", ",", "txHash", ",", "peer", ",", "err", ")", "\n", "}", "else", "{", "log", ".", "Errorf", "(", "\"", "\"", ",", "txHash", ",", "err", ")", "\n", "}", "\n\n", "// Convert the error into an appropriate reject message and", "// send it.", "code", ",", "reason", ":=", "mempool", ".", "ErrToRejectErr", "(", "err", ")", "\n", "peer", ".", "PushRejectMsg", "(", "wire", ".", "CmdTx", ",", "code", ",", "reason", ",", "txHash", ",", "false", ")", "\n", "return", "\n", "}", "\n\n", "sm", ".", "peerNotifier", ".", "AnnounceNewTransactions", "(", "acceptedTxs", ")", "\n", "}" ]
// handleTxMsg handles transaction messages from all peers.
[ "handleTxMsg", "handles", "transaction", "messages", "from", "all", "peers", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L540-L605
train
btcsuite/btcd
netsync/manager.go
current
func (sm *SyncManager) current() bool { if !sm.chain.IsCurrent() { return false } // if blockChain thinks we are current and we have no syncPeer it // is probably right. if sm.syncPeer == nil { return true } // No matter what chain thinks, if we are below the block we are syncing // to we are not current. if sm.chain.BestSnapshot().Height < sm.syncPeer.LastBlock() { return false } return true }
go
func (sm *SyncManager) current() bool { if !sm.chain.IsCurrent() { return false } // if blockChain thinks we are current and we have no syncPeer it // is probably right. if sm.syncPeer == nil { return true } // No matter what chain thinks, if we are below the block we are syncing // to we are not current. if sm.chain.BestSnapshot().Height < sm.syncPeer.LastBlock() { return false } return true }
[ "func", "(", "sm", "*", "SyncManager", ")", "current", "(", ")", "bool", "{", "if", "!", "sm", ".", "chain", ".", "IsCurrent", "(", ")", "{", "return", "false", "\n", "}", "\n\n", "// if blockChain thinks we are current and we have no syncPeer it", "// is probably right.", "if", "sm", ".", "syncPeer", "==", "nil", "{", "return", "true", "\n", "}", "\n\n", "// No matter what chain thinks, if we are below the block we are syncing", "// to we are not current.", "if", "sm", ".", "chain", ".", "BestSnapshot", "(", ")", ".", "Height", "<", "sm", ".", "syncPeer", ".", "LastBlock", "(", ")", "{", "return", "false", "\n", "}", "\n", "return", "true", "\n", "}" ]
// current returns true if we believe we are synced with our peers, false if we // still have blocks to check
[ "current", "returns", "true", "if", "we", "believe", "we", "are", "synced", "with", "our", "peers", "false", "if", "we", "still", "have", "blocks", "to", "check" ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L609-L626
train
btcsuite/btcd
netsync/manager.go
fetchHeaderBlocks
func (sm *SyncManager) fetchHeaderBlocks() { // Nothing to do if there is no start header. if sm.startHeader == nil { log.Warnf("fetchHeaderBlocks called with no start header") return } // Build up a getdata request for the list of blocks the headers // describe. The size hint will be limited to wire.MaxInvPerMsg by // the function, so no need to double check it here. gdmsg := wire.NewMsgGetDataSizeHint(uint(sm.headerList.Len())) numRequested := 0 for e := sm.startHeader; e != nil; e = e.Next() { node, ok := e.Value.(*headerNode) if !ok { log.Warn("Header list node type is not a headerNode") continue } iv := wire.NewInvVect(wire.InvTypeBlock, node.hash) haveInv, err := sm.haveInventory(iv) if err != nil { log.Warnf("Unexpected failure when checking for "+ "existing inventory during header block "+ "fetch: %v", err) } if !haveInv { syncPeerState := sm.peerStates[sm.syncPeer] sm.requestedBlocks[*node.hash] = struct{}{} syncPeerState.requestedBlocks[*node.hash] = struct{}{} // If we're fetching from a witness enabled peer // post-fork, then ensure that we receive all the // witness data in the blocks. if sm.syncPeer.IsWitnessEnabled() { iv.Type = wire.InvTypeWitnessBlock } gdmsg.AddInvVect(iv) numRequested++ } sm.startHeader = e.Next() if numRequested >= wire.MaxInvPerMsg { break } } if len(gdmsg.InvList) > 0 { sm.syncPeer.QueueMessage(gdmsg, nil) } }
go
func (sm *SyncManager) fetchHeaderBlocks() { // Nothing to do if there is no start header. if sm.startHeader == nil { log.Warnf("fetchHeaderBlocks called with no start header") return } // Build up a getdata request for the list of blocks the headers // describe. The size hint will be limited to wire.MaxInvPerMsg by // the function, so no need to double check it here. gdmsg := wire.NewMsgGetDataSizeHint(uint(sm.headerList.Len())) numRequested := 0 for e := sm.startHeader; e != nil; e = e.Next() { node, ok := e.Value.(*headerNode) if !ok { log.Warn("Header list node type is not a headerNode") continue } iv := wire.NewInvVect(wire.InvTypeBlock, node.hash) haveInv, err := sm.haveInventory(iv) if err != nil { log.Warnf("Unexpected failure when checking for "+ "existing inventory during header block "+ "fetch: %v", err) } if !haveInv { syncPeerState := sm.peerStates[sm.syncPeer] sm.requestedBlocks[*node.hash] = struct{}{} syncPeerState.requestedBlocks[*node.hash] = struct{}{} // If we're fetching from a witness enabled peer // post-fork, then ensure that we receive all the // witness data in the blocks. if sm.syncPeer.IsWitnessEnabled() { iv.Type = wire.InvTypeWitnessBlock } gdmsg.AddInvVect(iv) numRequested++ } sm.startHeader = e.Next() if numRequested >= wire.MaxInvPerMsg { break } } if len(gdmsg.InvList) > 0 { sm.syncPeer.QueueMessage(gdmsg, nil) } }
[ "func", "(", "sm", "*", "SyncManager", ")", "fetchHeaderBlocks", "(", ")", "{", "// Nothing to do if there is no start header.", "if", "sm", ".", "startHeader", "==", "nil", "{", "log", ".", "Warnf", "(", "\"", "\"", ")", "\n", "return", "\n", "}", "\n\n", "// Build up a getdata request for the list of blocks the headers", "// describe. The size hint will be limited to wire.MaxInvPerMsg by", "// the function, so no need to double check it here.", "gdmsg", ":=", "wire", ".", "NewMsgGetDataSizeHint", "(", "uint", "(", "sm", ".", "headerList", ".", "Len", "(", ")", ")", ")", "\n", "numRequested", ":=", "0", "\n", "for", "e", ":=", "sm", ".", "startHeader", ";", "e", "!=", "nil", ";", "e", "=", "e", ".", "Next", "(", ")", "{", "node", ",", "ok", ":=", "e", ".", "Value", ".", "(", "*", "headerNode", ")", "\n", "if", "!", "ok", "{", "log", ".", "Warn", "(", "\"", "\"", ")", "\n", "continue", "\n", "}", "\n\n", "iv", ":=", "wire", ".", "NewInvVect", "(", "wire", ".", "InvTypeBlock", ",", "node", ".", "hash", ")", "\n", "haveInv", ",", "err", ":=", "sm", ".", "haveInventory", "(", "iv", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Warnf", "(", "\"", "\"", "+", "\"", "\"", "+", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "if", "!", "haveInv", "{", "syncPeerState", ":=", "sm", ".", "peerStates", "[", "sm", ".", "syncPeer", "]", "\n\n", "sm", ".", "requestedBlocks", "[", "*", "node", ".", "hash", "]", "=", "struct", "{", "}", "{", "}", "\n", "syncPeerState", ".", "requestedBlocks", "[", "*", "node", ".", "hash", "]", "=", "struct", "{", "}", "{", "}", "\n\n", "// If we're fetching from a witness enabled peer", "// post-fork, then ensure that we receive all the", "// witness data in the blocks.", "if", "sm", ".", "syncPeer", ".", "IsWitnessEnabled", "(", ")", "{", "iv", ".", "Type", "=", "wire", ".", "InvTypeWitnessBlock", "\n", "}", "\n\n", "gdmsg", ".", "AddInvVect", "(", "iv", ")", "\n", "numRequested", "++", "\n", "}", "\n", "sm", ".", "startHeader", "=", "e", ".", "Next", "(", ")", "\n", "if", "numRequested", ">=", "wire", ".", "MaxInvPerMsg", "{", "break", "\n", "}", "\n", "}", "\n", "if", "len", "(", "gdmsg", ".", "InvList", ")", ">", "0", "{", "sm", ".", "syncPeer", ".", "QueueMessage", "(", "gdmsg", ",", "nil", ")", "\n", "}", "\n", "}" ]
// fetchHeaderBlocks creates and sends a request to the syncPeer for the next // list of blocks to be downloaded based on the current list of headers.
[ "fetchHeaderBlocks", "creates", "and", "sends", "a", "request", "to", "the", "syncPeer", "for", "the", "next", "list", "of", "blocks", "to", "be", "downloaded", "based", "on", "the", "current", "list", "of", "headers", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L837-L887
train
btcsuite/btcd
netsync/manager.go
handleBlockchainNotification
func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Notification) { switch notification.Type { // A block has been accepted into the block chain. Relay it to other // peers. case blockchain.NTBlockAccepted: // Don't relay if we are not current. Other peers that are // current should already know about it. if !sm.current() { return } block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain accepted notification is not a block.") break } // Generate the inventory vector and relay it. iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash()) sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header) // A block has been connected to the main block chain. case blockchain.NTBlockConnected: block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain connected notification is not a block.") break } // Remove all of the transactions (except the coinbase) in the // connected block from the transaction pool. Secondly, remove any // transactions which are now double spends as a result of these // new transactions. Finally, remove any transaction that is // no longer an orphan. Transactions which depend on a confirmed // transaction are NOT removed recursively because they are still // valid. for _, tx := range block.Transactions()[1:] { sm.txMemPool.RemoveTransaction(tx, false) sm.txMemPool.RemoveDoubleSpends(tx) sm.txMemPool.RemoveOrphan(tx) sm.peerNotifier.TransactionConfirmed(tx) acceptedTxs := sm.txMemPool.ProcessOrphans(tx) sm.peerNotifier.AnnounceNewTransactions(acceptedTxs) } // Register block with the fee estimator, if it exists. if sm.feeEstimator != nil { err := sm.feeEstimator.RegisterBlock(block) // If an error is somehow generated then the fee estimator // has entered an invalid state. Since it doesn't know how // to recover, create a new one. if err != nil { sm.feeEstimator = mempool.NewFeeEstimator( mempool.DefaultEstimateFeeMaxRollback, mempool.DefaultEstimateFeeMinRegisteredBlocks) } } // A block has been disconnected from the main block chain. case blockchain.NTBlockDisconnected: block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain disconnected notification is not a block.") break } // Reinsert all of the transactions (except the coinbase) into // the transaction pool. for _, tx := range block.Transactions()[1:] { _, _, err := sm.txMemPool.MaybeAcceptTransaction(tx, false, false) if err != nil { // Remove the transaction and all transactions // that depend on it if it wasn't accepted into // the transaction pool. sm.txMemPool.RemoveTransaction(tx, true) } } // Rollback previous block recorded by the fee estimator. if sm.feeEstimator != nil { sm.feeEstimator.Rollback(block.Hash()) } } }
go
func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Notification) { switch notification.Type { // A block has been accepted into the block chain. Relay it to other // peers. case blockchain.NTBlockAccepted: // Don't relay if we are not current. Other peers that are // current should already know about it. if !sm.current() { return } block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain accepted notification is not a block.") break } // Generate the inventory vector and relay it. iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash()) sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header) // A block has been connected to the main block chain. case blockchain.NTBlockConnected: block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain connected notification is not a block.") break } // Remove all of the transactions (except the coinbase) in the // connected block from the transaction pool. Secondly, remove any // transactions which are now double spends as a result of these // new transactions. Finally, remove any transaction that is // no longer an orphan. Transactions which depend on a confirmed // transaction are NOT removed recursively because they are still // valid. for _, tx := range block.Transactions()[1:] { sm.txMemPool.RemoveTransaction(tx, false) sm.txMemPool.RemoveDoubleSpends(tx) sm.txMemPool.RemoveOrphan(tx) sm.peerNotifier.TransactionConfirmed(tx) acceptedTxs := sm.txMemPool.ProcessOrphans(tx) sm.peerNotifier.AnnounceNewTransactions(acceptedTxs) } // Register block with the fee estimator, if it exists. if sm.feeEstimator != nil { err := sm.feeEstimator.RegisterBlock(block) // If an error is somehow generated then the fee estimator // has entered an invalid state. Since it doesn't know how // to recover, create a new one. if err != nil { sm.feeEstimator = mempool.NewFeeEstimator( mempool.DefaultEstimateFeeMaxRollback, mempool.DefaultEstimateFeeMinRegisteredBlocks) } } // A block has been disconnected from the main block chain. case blockchain.NTBlockDisconnected: block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain disconnected notification is not a block.") break } // Reinsert all of the transactions (except the coinbase) into // the transaction pool. for _, tx := range block.Transactions()[1:] { _, _, err := sm.txMemPool.MaybeAcceptTransaction(tx, false, false) if err != nil { // Remove the transaction and all transactions // that depend on it if it wasn't accepted into // the transaction pool. sm.txMemPool.RemoveTransaction(tx, true) } } // Rollback previous block recorded by the fee estimator. if sm.feeEstimator != nil { sm.feeEstimator.Rollback(block.Hash()) } } }
[ "func", "(", "sm", "*", "SyncManager", ")", "handleBlockchainNotification", "(", "notification", "*", "blockchain", ".", "Notification", ")", "{", "switch", "notification", ".", "Type", "{", "// A block has been accepted into the block chain. Relay it to other", "// peers.", "case", "blockchain", ".", "NTBlockAccepted", ":", "// Don't relay if we are not current. Other peers that are", "// current should already know about it.", "if", "!", "sm", ".", "current", "(", ")", "{", "return", "\n", "}", "\n\n", "block", ",", "ok", ":=", "notification", ".", "Data", ".", "(", "*", "btcutil", ".", "Block", ")", "\n", "if", "!", "ok", "{", "log", ".", "Warnf", "(", "\"", "\"", ")", "\n", "break", "\n", "}", "\n\n", "// Generate the inventory vector and relay it.", "iv", ":=", "wire", ".", "NewInvVect", "(", "wire", ".", "InvTypeBlock", ",", "block", ".", "Hash", "(", ")", ")", "\n", "sm", ".", "peerNotifier", ".", "RelayInventory", "(", "iv", ",", "block", ".", "MsgBlock", "(", ")", ".", "Header", ")", "\n\n", "// A block has been connected to the main block chain.", "case", "blockchain", ".", "NTBlockConnected", ":", "block", ",", "ok", ":=", "notification", ".", "Data", ".", "(", "*", "btcutil", ".", "Block", ")", "\n", "if", "!", "ok", "{", "log", ".", "Warnf", "(", "\"", "\"", ")", "\n", "break", "\n", "}", "\n\n", "// Remove all of the transactions (except the coinbase) in the", "// connected block from the transaction pool. Secondly, remove any", "// transactions which are now double spends as a result of these", "// new transactions. Finally, remove any transaction that is", "// no longer an orphan. Transactions which depend on a confirmed", "// transaction are NOT removed recursively because they are still", "// valid.", "for", "_", ",", "tx", ":=", "range", "block", ".", "Transactions", "(", ")", "[", "1", ":", "]", "{", "sm", ".", "txMemPool", ".", "RemoveTransaction", "(", "tx", ",", "false", ")", "\n", "sm", ".", "txMemPool", ".", "RemoveDoubleSpends", "(", "tx", ")", "\n", "sm", ".", "txMemPool", ".", "RemoveOrphan", "(", "tx", ")", "\n", "sm", ".", "peerNotifier", ".", "TransactionConfirmed", "(", "tx", ")", "\n", "acceptedTxs", ":=", "sm", ".", "txMemPool", ".", "ProcessOrphans", "(", "tx", ")", "\n", "sm", ".", "peerNotifier", ".", "AnnounceNewTransactions", "(", "acceptedTxs", ")", "\n", "}", "\n\n", "// Register block with the fee estimator, if it exists.", "if", "sm", ".", "feeEstimator", "!=", "nil", "{", "err", ":=", "sm", ".", "feeEstimator", ".", "RegisterBlock", "(", "block", ")", "\n\n", "// If an error is somehow generated then the fee estimator", "// has entered an invalid state. Since it doesn't know how", "// to recover, create a new one.", "if", "err", "!=", "nil", "{", "sm", ".", "feeEstimator", "=", "mempool", ".", "NewFeeEstimator", "(", "mempool", ".", "DefaultEstimateFeeMaxRollback", ",", "mempool", ".", "DefaultEstimateFeeMinRegisteredBlocks", ")", "\n", "}", "\n", "}", "\n\n", "// A block has been disconnected from the main block chain.", "case", "blockchain", ".", "NTBlockDisconnected", ":", "block", ",", "ok", ":=", "notification", ".", "Data", ".", "(", "*", "btcutil", ".", "Block", ")", "\n", "if", "!", "ok", "{", "log", ".", "Warnf", "(", "\"", "\"", ")", "\n", "break", "\n", "}", "\n\n", "// Reinsert all of the transactions (except the coinbase) into", "// the transaction pool.", "for", "_", ",", "tx", ":=", "range", "block", ".", "Transactions", "(", ")", "[", "1", ":", "]", "{", "_", ",", "_", ",", "err", ":=", "sm", ".", "txMemPool", ".", "MaybeAcceptTransaction", "(", "tx", ",", "false", ",", "false", ")", "\n", "if", "err", "!=", "nil", "{", "// Remove the transaction and all transactions", "// that depend on it if it wasn't accepted into", "// the transaction pool.", "sm", ".", "txMemPool", ".", "RemoveTransaction", "(", "tx", ",", "true", ")", "\n", "}", "\n", "}", "\n\n", "// Rollback previous block recorded by the fee estimator.", "if", "sm", ".", "feeEstimator", "!=", "nil", "{", "sm", ".", "feeEstimator", ".", "Rollback", "(", "block", ".", "Hash", "(", ")", ")", "\n", "}", "\n", "}", "\n", "}" ]
// handleBlockchainNotification handles notifications from blockchain. It does // things such as request orphan block parents and relay accepted blocks to // connected peers.
[ "handleBlockchainNotification", "handles", "notifications", "from", "blockchain", ".", "It", "does", "things", "such", "as", "request", "orphan", "block", "parents", "and", "relay", "accepted", "blocks", "to", "connected", "peers", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1350-L1435
train
btcsuite/btcd
netsync/manager.go
NewPeer
func (sm *SyncManager) NewPeer(peer *peerpkg.Peer) { // Ignore if we are shutting down. if atomic.LoadInt32(&sm.shutdown) != 0 { return } sm.msgChan <- &newPeerMsg{peer: peer} }
go
func (sm *SyncManager) NewPeer(peer *peerpkg.Peer) { // Ignore if we are shutting down. if atomic.LoadInt32(&sm.shutdown) != 0 { return } sm.msgChan <- &newPeerMsg{peer: peer} }
[ "func", "(", "sm", "*", "SyncManager", ")", "NewPeer", "(", "peer", "*", "peerpkg", ".", "Peer", ")", "{", "// Ignore if we are shutting down.", "if", "atomic", ".", "LoadInt32", "(", "&", "sm", ".", "shutdown", ")", "!=", "0", "{", "return", "\n", "}", "\n", "sm", ".", "msgChan", "<-", "&", "newPeerMsg", "{", "peer", ":", "peer", "}", "\n", "}" ]
// NewPeer informs the sync manager of a newly active peer.
[ "NewPeer", "informs", "the", "sync", "manager", "of", "a", "newly", "active", "peer", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1438-L1444
train
btcsuite/btcd
netsync/manager.go
QueueTx
func (sm *SyncManager) QueueTx(tx *btcutil.Tx, peer *peerpkg.Peer, done chan struct{}) { // Don't accept more transactions if we're shutting down. if atomic.LoadInt32(&sm.shutdown) != 0 { done <- struct{}{} return } sm.msgChan <- &txMsg{tx: tx, peer: peer, reply: done} }
go
func (sm *SyncManager) QueueTx(tx *btcutil.Tx, peer *peerpkg.Peer, done chan struct{}) { // Don't accept more transactions if we're shutting down. if atomic.LoadInt32(&sm.shutdown) != 0 { done <- struct{}{} return } sm.msgChan <- &txMsg{tx: tx, peer: peer, reply: done} }
[ "func", "(", "sm", "*", "SyncManager", ")", "QueueTx", "(", "tx", "*", "btcutil", ".", "Tx", ",", "peer", "*", "peerpkg", ".", "Peer", ",", "done", "chan", "struct", "{", "}", ")", "{", "// Don't accept more transactions if we're shutting down.", "if", "atomic", ".", "LoadInt32", "(", "&", "sm", ".", "shutdown", ")", "!=", "0", "{", "done", "<-", "struct", "{", "}", "{", "}", "\n", "return", "\n", "}", "\n\n", "sm", ".", "msgChan", "<-", "&", "txMsg", "{", "tx", ":", "tx", ",", "peer", ":", "peer", ",", "reply", ":", "done", "}", "\n", "}" ]
// QueueTx adds the passed transaction message and peer to the block handling // queue. Responds to the done channel argument after the tx message is // processed.
[ "QueueTx", "adds", "the", "passed", "transaction", "message", "and", "peer", "to", "the", "block", "handling", "queue", ".", "Responds", "to", "the", "done", "channel", "argument", "after", "the", "tx", "message", "is", "processed", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1449-L1457
train
btcsuite/btcd
netsync/manager.go
QueueBlock
func (sm *SyncManager) QueueBlock(block *btcutil.Block, peer *peerpkg.Peer, done chan struct{}) { // Don't accept more blocks if we're shutting down. if atomic.LoadInt32(&sm.shutdown) != 0 { done <- struct{}{} return } sm.msgChan <- &blockMsg{block: block, peer: peer, reply: done} }
go
func (sm *SyncManager) QueueBlock(block *btcutil.Block, peer *peerpkg.Peer, done chan struct{}) { // Don't accept more blocks if we're shutting down. if atomic.LoadInt32(&sm.shutdown) != 0 { done <- struct{}{} return } sm.msgChan <- &blockMsg{block: block, peer: peer, reply: done} }
[ "func", "(", "sm", "*", "SyncManager", ")", "QueueBlock", "(", "block", "*", "btcutil", ".", "Block", ",", "peer", "*", "peerpkg", ".", "Peer", ",", "done", "chan", "struct", "{", "}", ")", "{", "// Don't accept more blocks if we're shutting down.", "if", "atomic", ".", "LoadInt32", "(", "&", "sm", ".", "shutdown", ")", "!=", "0", "{", "done", "<-", "struct", "{", "}", "{", "}", "\n", "return", "\n", "}", "\n\n", "sm", ".", "msgChan", "<-", "&", "blockMsg", "{", "block", ":", "block", ",", "peer", ":", "peer", ",", "reply", ":", "done", "}", "\n", "}" ]
// QueueBlock adds the passed block message and peer to the block handling // queue. Responds to the done channel argument after the block message is // processed.
[ "QueueBlock", "adds", "the", "passed", "block", "message", "and", "peer", "to", "the", "block", "handling", "queue", ".", "Responds", "to", "the", "done", "channel", "argument", "after", "the", "block", "message", "is", "processed", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1462-L1470
train
btcsuite/btcd
netsync/manager.go
Stop
func (sm *SyncManager) Stop() error { if atomic.AddInt32(&sm.shutdown, 1) != 1 { log.Warnf("Sync manager is already in the process of " + "shutting down") return nil } log.Infof("Sync manager shutting down") close(sm.quit) sm.wg.Wait() return nil }
go
func (sm *SyncManager) Stop() error { if atomic.AddInt32(&sm.shutdown, 1) != 1 { log.Warnf("Sync manager is already in the process of " + "shutting down") return nil } log.Infof("Sync manager shutting down") close(sm.quit) sm.wg.Wait() return nil }
[ "func", "(", "sm", "*", "SyncManager", ")", "Stop", "(", ")", "error", "{", "if", "atomic", ".", "AddInt32", "(", "&", "sm", ".", "shutdown", ",", "1", ")", "!=", "1", "{", "log", ".", "Warnf", "(", "\"", "\"", "+", "\"", "\"", ")", "\n", "return", "nil", "\n", "}", "\n\n", "log", ".", "Infof", "(", "\"", "\"", ")", "\n", "close", "(", "sm", ".", "quit", ")", "\n", "sm", ".", "wg", ".", "Wait", "(", ")", "\n", "return", "nil", "\n", "}" ]
// Stop gracefully shuts down the sync manager by stopping all asynchronous // handlers and waiting for them to finish.
[ "Stop", "gracefully", "shuts", "down", "the", "sync", "manager", "by", "stopping", "all", "asynchronous", "handlers", "and", "waiting", "for", "them", "to", "finish", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1519-L1530
train
btcsuite/btcd
netsync/manager.go
SyncPeerID
func (sm *SyncManager) SyncPeerID() int32 { reply := make(chan int32) sm.msgChan <- getSyncPeerMsg{reply: reply} return <-reply }
go
func (sm *SyncManager) SyncPeerID() int32 { reply := make(chan int32) sm.msgChan <- getSyncPeerMsg{reply: reply} return <-reply }
[ "func", "(", "sm", "*", "SyncManager", ")", "SyncPeerID", "(", ")", "int32", "{", "reply", ":=", "make", "(", "chan", "int32", ")", "\n", "sm", ".", "msgChan", "<-", "getSyncPeerMsg", "{", "reply", ":", "reply", "}", "\n", "return", "<-", "reply", "\n", "}" ]
// SyncPeerID returns the ID of the current sync peer, or 0 if there is none.
[ "SyncPeerID", "returns", "the", "ID", "of", "the", "current", "sync", "peer", "or", "0", "if", "there", "is", "none", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1533-L1537
train
btcsuite/btcd
netsync/manager.go
ProcessBlock
func (sm *SyncManager) ProcessBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) { reply := make(chan processBlockResponse, 1) sm.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply} response := <-reply return response.isOrphan, response.err }
go
func (sm *SyncManager) ProcessBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) { reply := make(chan processBlockResponse, 1) sm.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply} response := <-reply return response.isOrphan, response.err }
[ "func", "(", "sm", "*", "SyncManager", ")", "ProcessBlock", "(", "block", "*", "btcutil", ".", "Block", ",", "flags", "blockchain", ".", "BehaviorFlags", ")", "(", "bool", ",", "error", ")", "{", "reply", ":=", "make", "(", "chan", "processBlockResponse", ",", "1", ")", "\n", "sm", ".", "msgChan", "<-", "processBlockMsg", "{", "block", ":", "block", ",", "flags", ":", "flags", ",", "reply", ":", "reply", "}", "\n", "response", ":=", "<-", "reply", "\n", "return", "response", ".", "isOrphan", ",", "response", ".", "err", "\n", "}" ]
// ProcessBlock makes use of ProcessBlock on an internal instance of a block // chain.
[ "ProcessBlock", "makes", "use", "of", "ProcessBlock", "on", "an", "internal", "instance", "of", "a", "block", "chain", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1541-L1546
train
btcsuite/btcd
netsync/manager.go
IsCurrent
func (sm *SyncManager) IsCurrent() bool { reply := make(chan bool) sm.msgChan <- isCurrentMsg{reply: reply} return <-reply }
go
func (sm *SyncManager) IsCurrent() bool { reply := make(chan bool) sm.msgChan <- isCurrentMsg{reply: reply} return <-reply }
[ "func", "(", "sm", "*", "SyncManager", ")", "IsCurrent", "(", ")", "bool", "{", "reply", ":=", "make", "(", "chan", "bool", ")", "\n", "sm", ".", "msgChan", "<-", "isCurrentMsg", "{", "reply", ":", "reply", "}", "\n", "return", "<-", "reply", "\n", "}" ]
// IsCurrent returns whether or not the sync manager believes it is synced with // the connected peers.
[ "IsCurrent", "returns", "whether", "or", "not", "the", "sync", "manager", "believes", "it", "is", "synced", "with", "the", "connected", "peers", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1550-L1554
train
btcsuite/btcd
netsync/manager.go
Pause
func (sm *SyncManager) Pause() chan<- struct{} { c := make(chan struct{}) sm.msgChan <- pauseMsg{c} return c }
go
func (sm *SyncManager) Pause() chan<- struct{} { c := make(chan struct{}) sm.msgChan <- pauseMsg{c} return c }
[ "func", "(", "sm", "*", "SyncManager", ")", "Pause", "(", ")", "chan", "<-", "struct", "{", "}", "{", "c", ":=", "make", "(", "chan", "struct", "{", "}", ")", "\n", "sm", ".", "msgChan", "<-", "pauseMsg", "{", "c", "}", "\n", "return", "c", "\n", "}" ]
// Pause pauses the sync manager until the returned channel is closed. // // Note that while paused, all peer and block processing is halted. The // message sender should avoid pausing the sync manager for long durations.
[ "Pause", "pauses", "the", "sync", "manager", "until", "the", "returned", "channel", "is", "closed", ".", "Note", "that", "while", "paused", "all", "peer", "and", "block", "processing", "is", "halted", ".", "The", "message", "sender", "should", "avoid", "pausing", "the", "sync", "manager", "for", "long", "durations", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1560-L1564
train
btcsuite/btcd
netsync/manager.go
New
func New(config *Config) (*SyncManager, error) { sm := SyncManager{ peerNotifier: config.PeerNotifier, chain: config.Chain, txMemPool: config.TxMemPool, chainParams: config.ChainParams, rejectedTxns: make(map[chainhash.Hash]struct{}), requestedTxns: make(map[chainhash.Hash]struct{}), requestedBlocks: make(map[chainhash.Hash]struct{}), peerStates: make(map[*peerpkg.Peer]*peerSyncState), progressLogger: newBlockProgressLogger("Processed", log), msgChan: make(chan interface{}, config.MaxPeers*3), headerList: list.New(), quit: make(chan struct{}), feeEstimator: config.FeeEstimator, } best := sm.chain.BestSnapshot() if !config.DisableCheckpoints { // Initialize the next checkpoint based on the current height. sm.nextCheckpoint = sm.findNextHeaderCheckpoint(best.Height) if sm.nextCheckpoint != nil { sm.resetHeaderState(&best.Hash, best.Height) } } else { log.Info("Checkpoints are disabled") } sm.chain.Subscribe(sm.handleBlockchainNotification) return &sm, nil }
go
func New(config *Config) (*SyncManager, error) { sm := SyncManager{ peerNotifier: config.PeerNotifier, chain: config.Chain, txMemPool: config.TxMemPool, chainParams: config.ChainParams, rejectedTxns: make(map[chainhash.Hash]struct{}), requestedTxns: make(map[chainhash.Hash]struct{}), requestedBlocks: make(map[chainhash.Hash]struct{}), peerStates: make(map[*peerpkg.Peer]*peerSyncState), progressLogger: newBlockProgressLogger("Processed", log), msgChan: make(chan interface{}, config.MaxPeers*3), headerList: list.New(), quit: make(chan struct{}), feeEstimator: config.FeeEstimator, } best := sm.chain.BestSnapshot() if !config.DisableCheckpoints { // Initialize the next checkpoint based on the current height. sm.nextCheckpoint = sm.findNextHeaderCheckpoint(best.Height) if sm.nextCheckpoint != nil { sm.resetHeaderState(&best.Hash, best.Height) } } else { log.Info("Checkpoints are disabled") } sm.chain.Subscribe(sm.handleBlockchainNotification) return &sm, nil }
[ "func", "New", "(", "config", "*", "Config", ")", "(", "*", "SyncManager", ",", "error", ")", "{", "sm", ":=", "SyncManager", "{", "peerNotifier", ":", "config", ".", "PeerNotifier", ",", "chain", ":", "config", ".", "Chain", ",", "txMemPool", ":", "config", ".", "TxMemPool", ",", "chainParams", ":", "config", ".", "ChainParams", ",", "rejectedTxns", ":", "make", "(", "map", "[", "chainhash", ".", "Hash", "]", "struct", "{", "}", ")", ",", "requestedTxns", ":", "make", "(", "map", "[", "chainhash", ".", "Hash", "]", "struct", "{", "}", ")", ",", "requestedBlocks", ":", "make", "(", "map", "[", "chainhash", ".", "Hash", "]", "struct", "{", "}", ")", ",", "peerStates", ":", "make", "(", "map", "[", "*", "peerpkg", ".", "Peer", "]", "*", "peerSyncState", ")", ",", "progressLogger", ":", "newBlockProgressLogger", "(", "\"", "\"", ",", "log", ")", ",", "msgChan", ":", "make", "(", "chan", "interface", "{", "}", ",", "config", ".", "MaxPeers", "*", "3", ")", ",", "headerList", ":", "list", ".", "New", "(", ")", ",", "quit", ":", "make", "(", "chan", "struct", "{", "}", ")", ",", "feeEstimator", ":", "config", ".", "FeeEstimator", ",", "}", "\n\n", "best", ":=", "sm", ".", "chain", ".", "BestSnapshot", "(", ")", "\n", "if", "!", "config", ".", "DisableCheckpoints", "{", "// Initialize the next checkpoint based on the current height.", "sm", ".", "nextCheckpoint", "=", "sm", ".", "findNextHeaderCheckpoint", "(", "best", ".", "Height", ")", "\n", "if", "sm", ".", "nextCheckpoint", "!=", "nil", "{", "sm", ".", "resetHeaderState", "(", "&", "best", ".", "Hash", ",", "best", ".", "Height", ")", "\n", "}", "\n", "}", "else", "{", "log", ".", "Info", "(", "\"", "\"", ")", "\n", "}", "\n\n", "sm", ".", "chain", ".", "Subscribe", "(", "sm", ".", "handleBlockchainNotification", ")", "\n\n", "return", "&", "sm", ",", "nil", "\n", "}" ]
// New constructs a new SyncManager. Use Start to begin processing asynchronous // block, tx, and inv updates.
[ "New", "constructs", "a", "new", "SyncManager", ".", "Use", "Start", "to", "begin", "processing", "asynchronous", "block", "tx", "and", "inv", "updates", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/netsync/manager.go#L1568-L1599
train
btcsuite/btcd
blockchain/upgrade.go
migrateBlockIndex
func migrateBlockIndex(db database.DB) error { // Hardcoded bucket names so updates to the global values do not affect // old upgrades. v1BucketName := []byte("ffldb-blockidx") v2BucketName := []byte("blockheaderidx") err := db.Update(func(dbTx database.Tx) error { v1BlockIdxBucket := dbTx.Metadata().Bucket(v1BucketName) if v1BlockIdxBucket == nil { return fmt.Errorf("Bucket %s does not exist", v1BucketName) } log.Info("Re-indexing block information in the database. This might take a while...") v2BlockIdxBucket, err := dbTx.Metadata().CreateBucketIfNotExists(v2BucketName) if err != nil { return err } // Get tip of the main chain. serializedData := dbTx.Metadata().Get(chainStateKeyName) state, err := deserializeBestChainState(serializedData) if err != nil { return err } tip := &state.hash // Scan the old block index bucket and construct a mapping of each block // to parent block and all child blocks. blocksMap, err := readBlockTree(v1BlockIdxBucket) if err != nil { return err } // Use the block graph to calculate the height of each block. err = determineBlockHeights(blocksMap) if err != nil { return err } // Find blocks on the main chain with the block graph and current tip. determineMainChainBlocks(blocksMap, tip) // Now that we have heights for all blocks, scan the old block index // bucket and insert all rows into the new one. return v1BlockIdxBucket.ForEach(func(hashBytes, blockRow []byte) error { endOffset := blockHdrOffset + blockHdrSize headerBytes := blockRow[blockHdrOffset:endOffset:endOffset] var hash chainhash.Hash copy(hash[:], hashBytes[0:chainhash.HashSize]) chainContext := blocksMap[hash] if chainContext.height == -1 { return fmt.Errorf("Unable to calculate chain height for "+ "stored block %s", hash) } // Mark blocks as valid if they are part of the main chain. status := statusDataStored if chainContext.mainChain { status |= statusValid } // Write header to v2 bucket value := make([]byte, blockHdrSize+1) copy(value[0:blockHdrSize], headerBytes) value[blockHdrSize] = byte(status) key := blockIndexKey(&hash, uint32(chainContext.height)) err := v2BlockIdxBucket.Put(key, value) if err != nil { return err } // Delete header from v1 bucket truncatedRow := blockRow[0:blockHdrOffset:blockHdrOffset] return v1BlockIdxBucket.Put(hashBytes, truncatedRow) }) }) if err != nil { return err } log.Infof("Block database migration complete") return nil }
go
func migrateBlockIndex(db database.DB) error { // Hardcoded bucket names so updates to the global values do not affect // old upgrades. v1BucketName := []byte("ffldb-blockidx") v2BucketName := []byte("blockheaderidx") err := db.Update(func(dbTx database.Tx) error { v1BlockIdxBucket := dbTx.Metadata().Bucket(v1BucketName) if v1BlockIdxBucket == nil { return fmt.Errorf("Bucket %s does not exist", v1BucketName) } log.Info("Re-indexing block information in the database. This might take a while...") v2BlockIdxBucket, err := dbTx.Metadata().CreateBucketIfNotExists(v2BucketName) if err != nil { return err } // Get tip of the main chain. serializedData := dbTx.Metadata().Get(chainStateKeyName) state, err := deserializeBestChainState(serializedData) if err != nil { return err } tip := &state.hash // Scan the old block index bucket and construct a mapping of each block // to parent block and all child blocks. blocksMap, err := readBlockTree(v1BlockIdxBucket) if err != nil { return err } // Use the block graph to calculate the height of each block. err = determineBlockHeights(blocksMap) if err != nil { return err } // Find blocks on the main chain with the block graph and current tip. determineMainChainBlocks(blocksMap, tip) // Now that we have heights for all blocks, scan the old block index // bucket and insert all rows into the new one. return v1BlockIdxBucket.ForEach(func(hashBytes, blockRow []byte) error { endOffset := blockHdrOffset + blockHdrSize headerBytes := blockRow[blockHdrOffset:endOffset:endOffset] var hash chainhash.Hash copy(hash[:], hashBytes[0:chainhash.HashSize]) chainContext := blocksMap[hash] if chainContext.height == -1 { return fmt.Errorf("Unable to calculate chain height for "+ "stored block %s", hash) } // Mark blocks as valid if they are part of the main chain. status := statusDataStored if chainContext.mainChain { status |= statusValid } // Write header to v2 bucket value := make([]byte, blockHdrSize+1) copy(value[0:blockHdrSize], headerBytes) value[blockHdrSize] = byte(status) key := blockIndexKey(&hash, uint32(chainContext.height)) err := v2BlockIdxBucket.Put(key, value) if err != nil { return err } // Delete header from v1 bucket truncatedRow := blockRow[0:blockHdrOffset:blockHdrOffset] return v1BlockIdxBucket.Put(hashBytes, truncatedRow) }) }) if err != nil { return err } log.Infof("Block database migration complete") return nil }
[ "func", "migrateBlockIndex", "(", "db", "database", ".", "DB", ")", "error", "{", "// Hardcoded bucket names so updates to the global values do not affect", "// old upgrades.", "v1BucketName", ":=", "[", "]", "byte", "(", "\"", "\"", ")", "\n", "v2BucketName", ":=", "[", "]", "byte", "(", "\"", "\"", ")", "\n\n", "err", ":=", "db", ".", "Update", "(", "func", "(", "dbTx", "database", ".", "Tx", ")", "error", "{", "v1BlockIdxBucket", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Bucket", "(", "v1BucketName", ")", "\n", "if", "v1BlockIdxBucket", "==", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "v1BucketName", ")", "\n", "}", "\n\n", "log", ".", "Info", "(", "\"", "\"", ")", "\n\n", "v2BlockIdxBucket", ",", "err", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "CreateBucketIfNotExists", "(", "v2BucketName", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Get tip of the main chain.", "serializedData", ":=", "dbTx", ".", "Metadata", "(", ")", ".", "Get", "(", "chainStateKeyName", ")", "\n", "state", ",", "err", ":=", "deserializeBestChainState", "(", "serializedData", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "tip", ":=", "&", "state", ".", "hash", "\n\n", "// Scan the old block index bucket and construct a mapping of each block", "// to parent block and all child blocks.", "blocksMap", ",", "err", ":=", "readBlockTree", "(", "v1BlockIdxBucket", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Use the block graph to calculate the height of each block.", "err", "=", "determineBlockHeights", "(", "blocksMap", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Find blocks on the main chain with the block graph and current tip.", "determineMainChainBlocks", "(", "blocksMap", ",", "tip", ")", "\n\n", "// Now that we have heights for all blocks, scan the old block index", "// bucket and insert all rows into the new one.", "return", "v1BlockIdxBucket", ".", "ForEach", "(", "func", "(", "hashBytes", ",", "blockRow", "[", "]", "byte", ")", "error", "{", "endOffset", ":=", "blockHdrOffset", "+", "blockHdrSize", "\n", "headerBytes", ":=", "blockRow", "[", "blockHdrOffset", ":", "endOffset", ":", "endOffset", "]", "\n\n", "var", "hash", "chainhash", ".", "Hash", "\n", "copy", "(", "hash", "[", ":", "]", ",", "hashBytes", "[", "0", ":", "chainhash", ".", "HashSize", "]", ")", "\n", "chainContext", ":=", "blocksMap", "[", "hash", "]", "\n\n", "if", "chainContext", ".", "height", "==", "-", "1", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", "+", "\"", "\"", ",", "hash", ")", "\n", "}", "\n\n", "// Mark blocks as valid if they are part of the main chain.", "status", ":=", "statusDataStored", "\n", "if", "chainContext", ".", "mainChain", "{", "status", "|=", "statusValid", "\n", "}", "\n\n", "// Write header to v2 bucket", "value", ":=", "make", "(", "[", "]", "byte", ",", "blockHdrSize", "+", "1", ")", "\n", "copy", "(", "value", "[", "0", ":", "blockHdrSize", "]", ",", "headerBytes", ")", "\n", "value", "[", "blockHdrSize", "]", "=", "byte", "(", "status", ")", "\n\n", "key", ":=", "blockIndexKey", "(", "&", "hash", ",", "uint32", "(", "chainContext", ".", "height", ")", ")", "\n", "err", ":=", "v2BlockIdxBucket", ".", "Put", "(", "key", ",", "value", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Delete header from v1 bucket", "truncatedRow", ":=", "blockRow", "[", "0", ":", "blockHdrOffset", ":", "blockHdrOffset", "]", "\n", "return", "v1BlockIdxBucket", ".", "Put", "(", "hashBytes", ",", "truncatedRow", ")", "\n", "}", ")", "\n", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "log", ".", "Infof", "(", "\"", "\"", ")", "\n", "return", "nil", "\n", "}" ]
// migrateBlockIndex migrates all block entries from the v1 block index bucket // to the v2 bucket. The v1 bucket stores all block entries keyed by block hash, // whereas the v2 bucket stores the exact same values, but keyed instead by // block height + hash.
[ "migrateBlockIndex", "migrates", "all", "block", "entries", "from", "the", "v1", "block", "index", "bucket", "to", "the", "v2", "bucket", ".", "The", "v1", "bucket", "stores", "all", "block", "entries", "keyed", "by", "block", "hash", "whereas", "the", "v2", "bucket", "stores", "the", "exact", "same", "values", "but", "keyed", "instead", "by", "block", "height", "+", "hash", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/upgrade.go#L59-L146
train
btcsuite/btcd
blockchain/upgrade.go
readBlockTree
func readBlockTree(v1BlockIdxBucket database.Bucket) (map[chainhash.Hash]*blockChainContext, error) { blocksMap := make(map[chainhash.Hash]*blockChainContext) err := v1BlockIdxBucket.ForEach(func(_, blockRow []byte) error { var header wire.BlockHeader endOffset := blockHdrOffset + blockHdrSize headerBytes := blockRow[blockHdrOffset:endOffset:endOffset] err := header.Deserialize(bytes.NewReader(headerBytes)) if err != nil { return err } blockHash := header.BlockHash() prevHash := header.PrevBlock if blocksMap[blockHash] == nil { blocksMap[blockHash] = &blockChainContext{height: -1} } if blocksMap[prevHash] == nil { blocksMap[prevHash] = &blockChainContext{height: -1} } blocksMap[blockHash].parent = &prevHash blocksMap[prevHash].children = append(blocksMap[prevHash].children, &blockHash) return nil }) return blocksMap, err }
go
func readBlockTree(v1BlockIdxBucket database.Bucket) (map[chainhash.Hash]*blockChainContext, error) { blocksMap := make(map[chainhash.Hash]*blockChainContext) err := v1BlockIdxBucket.ForEach(func(_, blockRow []byte) error { var header wire.BlockHeader endOffset := blockHdrOffset + blockHdrSize headerBytes := blockRow[blockHdrOffset:endOffset:endOffset] err := header.Deserialize(bytes.NewReader(headerBytes)) if err != nil { return err } blockHash := header.BlockHash() prevHash := header.PrevBlock if blocksMap[blockHash] == nil { blocksMap[blockHash] = &blockChainContext{height: -1} } if blocksMap[prevHash] == nil { blocksMap[prevHash] = &blockChainContext{height: -1} } blocksMap[blockHash].parent = &prevHash blocksMap[prevHash].children = append(blocksMap[prevHash].children, &blockHash) return nil }) return blocksMap, err }
[ "func", "readBlockTree", "(", "v1BlockIdxBucket", "database", ".", "Bucket", ")", "(", "map", "[", "chainhash", ".", "Hash", "]", "*", "blockChainContext", ",", "error", ")", "{", "blocksMap", ":=", "make", "(", "map", "[", "chainhash", ".", "Hash", "]", "*", "blockChainContext", ")", "\n", "err", ":=", "v1BlockIdxBucket", ".", "ForEach", "(", "func", "(", "_", ",", "blockRow", "[", "]", "byte", ")", "error", "{", "var", "header", "wire", ".", "BlockHeader", "\n", "endOffset", ":=", "blockHdrOffset", "+", "blockHdrSize", "\n", "headerBytes", ":=", "blockRow", "[", "blockHdrOffset", ":", "endOffset", ":", "endOffset", "]", "\n", "err", ":=", "header", ".", "Deserialize", "(", "bytes", ".", "NewReader", "(", "headerBytes", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "blockHash", ":=", "header", ".", "BlockHash", "(", ")", "\n", "prevHash", ":=", "header", ".", "PrevBlock", "\n\n", "if", "blocksMap", "[", "blockHash", "]", "==", "nil", "{", "blocksMap", "[", "blockHash", "]", "=", "&", "blockChainContext", "{", "height", ":", "-", "1", "}", "\n", "}", "\n", "if", "blocksMap", "[", "prevHash", "]", "==", "nil", "{", "blocksMap", "[", "prevHash", "]", "=", "&", "blockChainContext", "{", "height", ":", "-", "1", "}", "\n", "}", "\n\n", "blocksMap", "[", "blockHash", "]", ".", "parent", "=", "&", "prevHash", "\n", "blocksMap", "[", "prevHash", "]", ".", "children", "=", "append", "(", "blocksMap", "[", "prevHash", "]", ".", "children", ",", "&", "blockHash", ")", "\n", "return", "nil", "\n", "}", ")", "\n", "return", "blocksMap", ",", "err", "\n", "}" ]
// readBlockTree reads the old block index bucket and constructs a mapping of // each block to its parent block and all child blocks. This mapping represents // the full tree of blocks. This function does not populate the height or // mainChain fields of the returned blockChainContext values.
[ "readBlockTree", "reads", "the", "old", "block", "index", "bucket", "and", "constructs", "a", "mapping", "of", "each", "block", "to", "its", "parent", "block", "and", "all", "child", "blocks", ".", "This", "mapping", "represents", "the", "full", "tree", "of", "blocks", ".", "This", "function", "does", "not", "populate", "the", "height", "or", "mainChain", "fields", "of", "the", "returned", "blockChainContext", "values", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/upgrade.go#L152-L179
train
btcsuite/btcd
blockchain/upgrade.go
determineBlockHeights
func determineBlockHeights(blocksMap map[chainhash.Hash]*blockChainContext) error { queue := list.New() // The genesis block is included in blocksMap as a child of the zero hash // because that is the value of the PrevBlock field in the genesis header. preGenesisContext, exists := blocksMap[zeroHash] if !exists || len(preGenesisContext.children) == 0 { return fmt.Errorf("Unable to find genesis block") } for _, genesisHash := range preGenesisContext.children { blocksMap[*genesisHash].height = 0 queue.PushBack(genesisHash) } for e := queue.Front(); e != nil; e = queue.Front() { queue.Remove(e) hash := e.Value.(*chainhash.Hash) height := blocksMap[*hash].height // For each block with this one as a parent, assign it a height and // push to queue for future processing. for _, childHash := range blocksMap[*hash].children { blocksMap[*childHash].height = height + 1 queue.PushBack(childHash) } } return nil }
go
func determineBlockHeights(blocksMap map[chainhash.Hash]*blockChainContext) error { queue := list.New() // The genesis block is included in blocksMap as a child of the zero hash // because that is the value of the PrevBlock field in the genesis header. preGenesisContext, exists := blocksMap[zeroHash] if !exists || len(preGenesisContext.children) == 0 { return fmt.Errorf("Unable to find genesis block") } for _, genesisHash := range preGenesisContext.children { blocksMap[*genesisHash].height = 0 queue.PushBack(genesisHash) } for e := queue.Front(); e != nil; e = queue.Front() { queue.Remove(e) hash := e.Value.(*chainhash.Hash) height := blocksMap[*hash].height // For each block with this one as a parent, assign it a height and // push to queue for future processing. for _, childHash := range blocksMap[*hash].children { blocksMap[*childHash].height = height + 1 queue.PushBack(childHash) } } return nil }
[ "func", "determineBlockHeights", "(", "blocksMap", "map", "[", "chainhash", ".", "Hash", "]", "*", "blockChainContext", ")", "error", "{", "queue", ":=", "list", ".", "New", "(", ")", "\n\n", "// The genesis block is included in blocksMap as a child of the zero hash", "// because that is the value of the PrevBlock field in the genesis header.", "preGenesisContext", ",", "exists", ":=", "blocksMap", "[", "zeroHash", "]", "\n", "if", "!", "exists", "||", "len", "(", "preGenesisContext", ".", "children", ")", "==", "0", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ")", "\n", "}", "\n\n", "for", "_", ",", "genesisHash", ":=", "range", "preGenesisContext", ".", "children", "{", "blocksMap", "[", "*", "genesisHash", "]", ".", "height", "=", "0", "\n", "queue", ".", "PushBack", "(", "genesisHash", ")", "\n", "}", "\n\n", "for", "e", ":=", "queue", ".", "Front", "(", ")", ";", "e", "!=", "nil", ";", "e", "=", "queue", ".", "Front", "(", ")", "{", "queue", ".", "Remove", "(", "e", ")", "\n", "hash", ":=", "e", ".", "Value", ".", "(", "*", "chainhash", ".", "Hash", ")", "\n", "height", ":=", "blocksMap", "[", "*", "hash", "]", ".", "height", "\n\n", "// For each block with this one as a parent, assign it a height and", "// push to queue for future processing.", "for", "_", ",", "childHash", ":=", "range", "blocksMap", "[", "*", "hash", "]", ".", "children", "{", "blocksMap", "[", "*", "childHash", "]", ".", "height", "=", "height", "+", "1", "\n", "queue", ".", "PushBack", "(", "childHash", ")", "\n", "}", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// determineBlockHeights takes a map of block hashes to a slice of child hashes // and uses it to compute the height for each block. The function assigns a // height of 0 to the genesis hash and explores the tree of blocks // breadth-first, assigning a height to every block with a path back to the // genesis block. This function modifies the height field on the blocksMap // entries.
[ "determineBlockHeights", "takes", "a", "map", "of", "block", "hashes", "to", "a", "slice", "of", "child", "hashes", "and", "uses", "it", "to", "compute", "the", "height", "for", "each", "block", ".", "The", "function", "assigns", "a", "height", "of", "0", "to", "the", "genesis", "hash", "and", "explores", "the", "tree", "of", "blocks", "breadth", "-", "first", "assigning", "a", "height", "to", "every", "block", "with", "a", "path", "back", "to", "the", "genesis", "block", ".", "This", "function", "modifies", "the", "height", "field", "on", "the", "blocksMap", "entries", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/upgrade.go#L187-L216
train
btcsuite/btcd
blockchain/upgrade.go
determineMainChainBlocks
func determineMainChainBlocks(blocksMap map[chainhash.Hash]*blockChainContext, tip *chainhash.Hash) { for nextHash := tip; *nextHash != zeroHash; nextHash = blocksMap[*nextHash].parent { blocksMap[*nextHash].mainChain = true } }
go
func determineMainChainBlocks(blocksMap map[chainhash.Hash]*blockChainContext, tip *chainhash.Hash) { for nextHash := tip; *nextHash != zeroHash; nextHash = blocksMap[*nextHash].parent { blocksMap[*nextHash].mainChain = true } }
[ "func", "determineMainChainBlocks", "(", "blocksMap", "map", "[", "chainhash", ".", "Hash", "]", "*", "blockChainContext", ",", "tip", "*", "chainhash", ".", "Hash", ")", "{", "for", "nextHash", ":=", "tip", ";", "*", "nextHash", "!=", "zeroHash", ";", "nextHash", "=", "blocksMap", "[", "*", "nextHash", "]", ".", "parent", "{", "blocksMap", "[", "*", "nextHash", "]", ".", "mainChain", "=", "true", "\n", "}", "\n", "}" ]
// determineMainChainBlocks traverses the block graph down from the tip to // determine which block hashes that are part of the main chain. This function // modifies the mainChain field on the blocksMap entries.
[ "determineMainChainBlocks", "traverses", "the", "block", "graph", "down", "from", "the", "tip", "to", "determine", "which", "block", "hashes", "that", "are", "part", "of", "the", "main", "chain", ".", "This", "function", "modifies", "the", "mainChain", "field", "on", "the", "blocksMap", "entries", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/upgrade.go#L221-L225
train
btcsuite/btcd
blockchain/upgrade.go
maybeUpgradeDbBuckets
func (b *BlockChain) maybeUpgradeDbBuckets(interrupt <-chan struct{}) error { // Load or create bucket versions as needed. var utxoSetVersion uint32 err := b.db.Update(func(dbTx database.Tx) error { // Load the utxo set version from the database or create it and // initialize it to version 1 if it doesn't exist. var err error utxoSetVersion, err = dbFetchOrCreateVersion(dbTx, utxoSetVersionKeyName, 1) return err }) if err != nil { return err } // Update the utxo set to v2 if needed. if utxoSetVersion < 2 { if err := upgradeUtxoSetToV2(b.db, interrupt); err != nil { return err } } return nil }
go
func (b *BlockChain) maybeUpgradeDbBuckets(interrupt <-chan struct{}) error { // Load or create bucket versions as needed. var utxoSetVersion uint32 err := b.db.Update(func(dbTx database.Tx) error { // Load the utxo set version from the database or create it and // initialize it to version 1 if it doesn't exist. var err error utxoSetVersion, err = dbFetchOrCreateVersion(dbTx, utxoSetVersionKeyName, 1) return err }) if err != nil { return err } // Update the utxo set to v2 if needed. if utxoSetVersion < 2 { if err := upgradeUtxoSetToV2(b.db, interrupt); err != nil { return err } } return nil }
[ "func", "(", "b", "*", "BlockChain", ")", "maybeUpgradeDbBuckets", "(", "interrupt", "<-", "chan", "struct", "{", "}", ")", "error", "{", "// Load or create bucket versions as needed.", "var", "utxoSetVersion", "uint32", "\n", "err", ":=", "b", ".", "db", ".", "Update", "(", "func", "(", "dbTx", "database", ".", "Tx", ")", "error", "{", "// Load the utxo set version from the database or create it and", "// initialize it to version 1 if it doesn't exist.", "var", "err", "error", "\n", "utxoSetVersion", ",", "err", "=", "dbFetchOrCreateVersion", "(", "dbTx", ",", "utxoSetVersionKeyName", ",", "1", ")", "\n", "return", "err", "\n", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "// Update the utxo set to v2 if needed.", "if", "utxoSetVersion", "<", "2", "{", "if", "err", ":=", "upgradeUtxoSetToV2", "(", "b", ".", "db", ",", "interrupt", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n\n", "return", "nil", "\n", "}" ]
// maybeUpgradeDbBuckets checks the database version of the buckets used by this // package and performs any needed upgrades to bring them to the latest version. // // All buckets used by this package are guaranteed to be the latest version if // this function returns without error.
[ "maybeUpgradeDbBuckets", "checks", "the", "database", "version", "of", "the", "buckets", "used", "by", "this", "package", "and", "performs", "any", "needed", "upgrades", "to", "bring", "them", "to", "the", "latest", "version", ".", "All", "buckets", "used", "by", "this", "package", "are", "guaranteed", "to", "be", "the", "latest", "version", "if", "this", "function", "returns", "without", "error", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/upgrade.go#L581-L604
train
btcsuite/btcd
txscript/scriptbuilder.go
AddOp
func (b *ScriptBuilder) AddOp(opcode byte) *ScriptBuilder { if b.err != nil { return b } // Pushes that would cause the script to exceed the largest allowed // script size would result in a non-canonical script. if len(b.script)+1 > MaxScriptSize { str := fmt.Sprintf("adding an opcode would exceed the maximum "+ "allowed canonical script length of %d", MaxScriptSize) b.err = ErrScriptNotCanonical(str) return b } b.script = append(b.script, opcode) return b }
go
func (b *ScriptBuilder) AddOp(opcode byte) *ScriptBuilder { if b.err != nil { return b } // Pushes that would cause the script to exceed the largest allowed // script size would result in a non-canonical script. if len(b.script)+1 > MaxScriptSize { str := fmt.Sprintf("adding an opcode would exceed the maximum "+ "allowed canonical script length of %d", MaxScriptSize) b.err = ErrScriptNotCanonical(str) return b } b.script = append(b.script, opcode) return b }
[ "func", "(", "b", "*", "ScriptBuilder", ")", "AddOp", "(", "opcode", "byte", ")", "*", "ScriptBuilder", "{", "if", "b", ".", "err", "!=", "nil", "{", "return", "b", "\n", "}", "\n\n", "// Pushes that would cause the script to exceed the largest allowed", "// script size would result in a non-canonical script.", "if", "len", "(", "b", ".", "script", ")", "+", "1", ">", "MaxScriptSize", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", ",", "MaxScriptSize", ")", "\n", "b", ".", "err", "=", "ErrScriptNotCanonical", "(", "str", ")", "\n", "return", "b", "\n", "}", "\n\n", "b", ".", "script", "=", "append", "(", "b", ".", "script", ",", "opcode", ")", "\n", "return", "b", "\n", "}" ]
// AddOp pushes the passed opcode to the end of the script. The script will not // be modified if pushing the opcode would cause the script to exceed the // maximum allowed script engine size.
[ "AddOp", "pushes", "the", "passed", "opcode", "to", "the", "end", "of", "the", "script", ".", "The", "script", "will", "not", "be", "modified", "if", "pushing", "the", "opcode", "would", "cause", "the", "script", "to", "exceed", "the", "maximum", "allowed", "script", "engine", "size", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/txscript/scriptbuilder.go#L58-L74
train
btcsuite/btcd
txscript/scriptbuilder.go
AddOps
func (b *ScriptBuilder) AddOps(opcodes []byte) *ScriptBuilder { if b.err != nil { return b } // Pushes that would cause the script to exceed the largest allowed // script size would result in a non-canonical script. if len(b.script)+len(opcodes) > MaxScriptSize { str := fmt.Sprintf("adding opcodes would exceed the maximum "+ "allowed canonical script length of %d", MaxScriptSize) b.err = ErrScriptNotCanonical(str) return b } b.script = append(b.script, opcodes...) return b }
go
func (b *ScriptBuilder) AddOps(opcodes []byte) *ScriptBuilder { if b.err != nil { return b } // Pushes that would cause the script to exceed the largest allowed // script size would result in a non-canonical script. if len(b.script)+len(opcodes) > MaxScriptSize { str := fmt.Sprintf("adding opcodes would exceed the maximum "+ "allowed canonical script length of %d", MaxScriptSize) b.err = ErrScriptNotCanonical(str) return b } b.script = append(b.script, opcodes...) return b }
[ "func", "(", "b", "*", "ScriptBuilder", ")", "AddOps", "(", "opcodes", "[", "]", "byte", ")", "*", "ScriptBuilder", "{", "if", "b", ".", "err", "!=", "nil", "{", "return", "b", "\n", "}", "\n\n", "// Pushes that would cause the script to exceed the largest allowed", "// script size would result in a non-canonical script.", "if", "len", "(", "b", ".", "script", ")", "+", "len", "(", "opcodes", ")", ">", "MaxScriptSize", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", ",", "MaxScriptSize", ")", "\n", "b", ".", "err", "=", "ErrScriptNotCanonical", "(", "str", ")", "\n", "return", "b", "\n", "}", "\n\n", "b", ".", "script", "=", "append", "(", "b", ".", "script", ",", "opcodes", "...", ")", "\n", "return", "b", "\n", "}" ]
// AddOps pushes the passed opcodes to the end of the script. The script will // not be modified if pushing the opcodes would cause the script to exceed the // maximum allowed script engine size.
[ "AddOps", "pushes", "the", "passed", "opcodes", "to", "the", "end", "of", "the", "script", ".", "The", "script", "will", "not", "be", "modified", "if", "pushing", "the", "opcodes", "would", "cause", "the", "script", "to", "exceed", "the", "maximum", "allowed", "script", "engine", "size", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/txscript/scriptbuilder.go#L79-L95
train
btcsuite/btcd
txscript/scriptbuilder.go
canonicalDataSize
func canonicalDataSize(data []byte) int { dataLen := len(data) // When the data consists of a single number that can be represented // by one of the "small integer" opcodes, that opcode will be instead // of a data push opcode followed by the number. if dataLen == 0 { return 1 } else if dataLen == 1 && data[0] <= 16 { return 1 } else if dataLen == 1 && data[0] == 0x81 { return 1 } if dataLen < OP_PUSHDATA1 { return 1 + dataLen } else if dataLen <= 0xff { return 2 + dataLen } else if dataLen <= 0xffff { return 3 + dataLen } return 5 + dataLen }
go
func canonicalDataSize(data []byte) int { dataLen := len(data) // When the data consists of a single number that can be represented // by one of the "small integer" opcodes, that opcode will be instead // of a data push opcode followed by the number. if dataLen == 0 { return 1 } else if dataLen == 1 && data[0] <= 16 { return 1 } else if dataLen == 1 && data[0] == 0x81 { return 1 } if dataLen < OP_PUSHDATA1 { return 1 + dataLen } else if dataLen <= 0xff { return 2 + dataLen } else if dataLen <= 0xffff { return 3 + dataLen } return 5 + dataLen }
[ "func", "canonicalDataSize", "(", "data", "[", "]", "byte", ")", "int", "{", "dataLen", ":=", "len", "(", "data", ")", "\n\n", "// When the data consists of a single number that can be represented", "// by one of the \"small integer\" opcodes, that opcode will be instead", "// of a data push opcode followed by the number.", "if", "dataLen", "==", "0", "{", "return", "1", "\n", "}", "else", "if", "dataLen", "==", "1", "&&", "data", "[", "0", "]", "<=", "16", "{", "return", "1", "\n", "}", "else", "if", "dataLen", "==", "1", "&&", "data", "[", "0", "]", "==", "0x81", "{", "return", "1", "\n", "}", "\n\n", "if", "dataLen", "<", "OP_PUSHDATA1", "{", "return", "1", "+", "dataLen", "\n", "}", "else", "if", "dataLen", "<=", "0xff", "{", "return", "2", "+", "dataLen", "\n", "}", "else", "if", "dataLen", "<=", "0xffff", "{", "return", "3", "+", "dataLen", "\n", "}", "\n\n", "return", "5", "+", "dataLen", "\n", "}" ]
// canonicalDataSize returns the number of bytes the canonical encoding of the // data will take.
[ "canonicalDataSize", "returns", "the", "number", "of", "bytes", "the", "canonical", "encoding", "of", "the", "data", "will", "take", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/txscript/scriptbuilder.go#L99-L122
train
btcsuite/btcd
txscript/scriptbuilder.go
AddFullData
func (b *ScriptBuilder) AddFullData(data []byte) *ScriptBuilder { if b.err != nil { return b } return b.addData(data) }
go
func (b *ScriptBuilder) AddFullData(data []byte) *ScriptBuilder { if b.err != nil { return b } return b.addData(data) }
[ "func", "(", "b", "*", "ScriptBuilder", ")", "AddFullData", "(", "data", "[", "]", "byte", ")", "*", "ScriptBuilder", "{", "if", "b", ".", "err", "!=", "nil", "{", "return", "b", "\n", "}", "\n\n", "return", "b", ".", "addData", "(", "data", ")", "\n", "}" ]
// AddFullData should not typically be used by ordinary users as it does not // include the checks which prevent data pushes larger than the maximum allowed // sizes which leads to scripts that can't be executed. This is provided for // testing purposes such as regression tests where sizes are intentionally made // larger than allowed. // // Use AddData instead.
[ "AddFullData", "should", "not", "typically", "be", "used", "by", "ordinary", "users", "as", "it", "does", "not", "include", "the", "checks", "which", "prevent", "data", "pushes", "larger", "than", "the", "maximum", "allowed", "sizes", "which", "leads", "to", "scripts", "that", "can", "t", "be", "executed", ".", "This", "is", "provided", "for", "testing", "purposes", "such", "as", "regression", "tests", "where", "sizes", "are", "intentionally", "made", "larger", "than", "allowed", ".", "Use", "AddData", "instead", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/txscript/scriptbuilder.go#L178-L184
train
btcsuite/btcd
txscript/scriptbuilder.go
AddInt64
func (b *ScriptBuilder) AddInt64(val int64) *ScriptBuilder { if b.err != nil { return b } // Pushes that would cause the script to exceed the largest allowed // script size would result in a non-canonical script. if len(b.script)+1 > MaxScriptSize { str := fmt.Sprintf("adding an integer would exceed the "+ "maximum allow canonical script length of %d", MaxScriptSize) b.err = ErrScriptNotCanonical(str) return b } // Fast path for small integers and OP_1NEGATE. if val == 0 { b.script = append(b.script, OP_0) return b } if val == -1 || (val >= 1 && val <= 16) { b.script = append(b.script, byte((OP_1-1)+val)) return b } return b.AddData(scriptNum(val).Bytes()) }
go
func (b *ScriptBuilder) AddInt64(val int64) *ScriptBuilder { if b.err != nil { return b } // Pushes that would cause the script to exceed the largest allowed // script size would result in a non-canonical script. if len(b.script)+1 > MaxScriptSize { str := fmt.Sprintf("adding an integer would exceed the "+ "maximum allow canonical script length of %d", MaxScriptSize) b.err = ErrScriptNotCanonical(str) return b } // Fast path for small integers and OP_1NEGATE. if val == 0 { b.script = append(b.script, OP_0) return b } if val == -1 || (val >= 1 && val <= 16) { b.script = append(b.script, byte((OP_1-1)+val)) return b } return b.AddData(scriptNum(val).Bytes()) }
[ "func", "(", "b", "*", "ScriptBuilder", ")", "AddInt64", "(", "val", "int64", ")", "*", "ScriptBuilder", "{", "if", "b", ".", "err", "!=", "nil", "{", "return", "b", "\n", "}", "\n\n", "// Pushes that would cause the script to exceed the largest allowed", "// script size would result in a non-canonical script.", "if", "len", "(", "b", ".", "script", ")", "+", "1", ">", "MaxScriptSize", "{", "str", ":=", "fmt", ".", "Sprintf", "(", "\"", "\"", "+", "\"", "\"", ",", "MaxScriptSize", ")", "\n", "b", ".", "err", "=", "ErrScriptNotCanonical", "(", "str", ")", "\n", "return", "b", "\n", "}", "\n\n", "// Fast path for small integers and OP_1NEGATE.", "if", "val", "==", "0", "{", "b", ".", "script", "=", "append", "(", "b", ".", "script", ",", "OP_0", ")", "\n", "return", "b", "\n", "}", "\n", "if", "val", "==", "-", "1", "||", "(", "val", ">=", "1", "&&", "val", "<=", "16", ")", "{", "b", ".", "script", "=", "append", "(", "b", ".", "script", ",", "byte", "(", "(", "OP_1", "-", "1", ")", "+", "val", ")", ")", "\n", "return", "b", "\n", "}", "\n\n", "return", "b", ".", "AddData", "(", "scriptNum", "(", "val", ")", ".", "Bytes", "(", ")", ")", "\n", "}" ]
// AddInt64 pushes the passed integer to the end of the script. The script will // not be modified if pushing the data would cause the script to exceed the // maximum allowed script engine size.
[ "AddInt64", "pushes", "the", "passed", "integer", "to", "the", "end", "of", "the", "script", ".", "The", "script", "will", "not", "be", "modified", "if", "pushing", "the", "data", "would", "cause", "the", "script", "to", "exceed", "the", "maximum", "allowed", "script", "engine", "size", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/txscript/scriptbuilder.go#L226-L252
train
btcsuite/btcd
txscript/scriptbuilder.go
Reset
func (b *ScriptBuilder) Reset() *ScriptBuilder { b.script = b.script[0:0] b.err = nil return b }
go
func (b *ScriptBuilder) Reset() *ScriptBuilder { b.script = b.script[0:0] b.err = nil return b }
[ "func", "(", "b", "*", "ScriptBuilder", ")", "Reset", "(", ")", "*", "ScriptBuilder", "{", "b", ".", "script", "=", "b", ".", "script", "[", "0", ":", "0", "]", "\n", "b", ".", "err", "=", "nil", "\n", "return", "b", "\n", "}" ]
// Reset resets the script so it has no content.
[ "Reset", "resets", "the", "script", "so", "it", "has", "no", "content", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/txscript/scriptbuilder.go#L255-L259
train
btcsuite/btcd
blockchain/blockindex.go
initBlockNode
func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parent *blockNode) { *node = blockNode{ hash: blockHeader.BlockHash(), workSum: CalcWork(blockHeader.Bits), version: blockHeader.Version, bits: blockHeader.Bits, nonce: blockHeader.Nonce, timestamp: blockHeader.Timestamp.Unix(), merkleRoot: blockHeader.MerkleRoot, } if parent != nil { node.parent = parent node.height = parent.height + 1 node.workSum = node.workSum.Add(parent.workSum, node.workSum) } }
go
func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parent *blockNode) { *node = blockNode{ hash: blockHeader.BlockHash(), workSum: CalcWork(blockHeader.Bits), version: blockHeader.Version, bits: blockHeader.Bits, nonce: blockHeader.Nonce, timestamp: blockHeader.Timestamp.Unix(), merkleRoot: blockHeader.MerkleRoot, } if parent != nil { node.parent = parent node.height = parent.height + 1 node.workSum = node.workSum.Add(parent.workSum, node.workSum) } }
[ "func", "initBlockNode", "(", "node", "*", "blockNode", ",", "blockHeader", "*", "wire", ".", "BlockHeader", ",", "parent", "*", "blockNode", ")", "{", "*", "node", "=", "blockNode", "{", "hash", ":", "blockHeader", ".", "BlockHash", "(", ")", ",", "workSum", ":", "CalcWork", "(", "blockHeader", ".", "Bits", ")", ",", "version", ":", "blockHeader", ".", "Version", ",", "bits", ":", "blockHeader", ".", "Bits", ",", "nonce", ":", "blockHeader", ".", "Nonce", ",", "timestamp", ":", "blockHeader", ".", "Timestamp", ".", "Unix", "(", ")", ",", "merkleRoot", ":", "blockHeader", ".", "MerkleRoot", ",", "}", "\n", "if", "parent", "!=", "nil", "{", "node", ".", "parent", "=", "parent", "\n", "node", ".", "height", "=", "parent", ".", "height", "+", "1", "\n", "node", ".", "workSum", "=", "node", ".", "workSum", ".", "Add", "(", "parent", ".", "workSum", ",", "node", ".", "workSum", ")", "\n", "}", "\n", "}" ]
// initBlockNode initializes a block node from the given header and parent node, // calculating the height and workSum from the respective fields on the parent. // This function is NOT safe for concurrent access. It must only be called when // initially creating a node.
[ "initBlockNode", "initializes", "a", "block", "node", "from", "the", "given", "header", "and", "parent", "node", "calculating", "the", "height", "and", "workSum", "from", "the", "respective", "fields", "on", "the", "parent", ".", "This", "function", "is", "NOT", "safe", "for", "concurrent", "access", ".", "It", "must", "only", "be", "called", "when", "initially", "creating", "a", "node", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/blockindex.go#L108-L123
train
btcsuite/btcd
blockchain/blockindex.go
newBlockNode
func newBlockNode(blockHeader *wire.BlockHeader, parent *blockNode) *blockNode { var node blockNode initBlockNode(&node, blockHeader, parent) return &node }
go
func newBlockNode(blockHeader *wire.BlockHeader, parent *blockNode) *blockNode { var node blockNode initBlockNode(&node, blockHeader, parent) return &node }
[ "func", "newBlockNode", "(", "blockHeader", "*", "wire", ".", "BlockHeader", ",", "parent", "*", "blockNode", ")", "*", "blockNode", "{", "var", "node", "blockNode", "\n", "initBlockNode", "(", "&", "node", ",", "blockHeader", ",", "parent", ")", "\n", "return", "&", "node", "\n", "}" ]
// newBlockNode returns a new block node for the given block header and parent // node, calculating the height and workSum from the respective fields on the // parent. This function is NOT safe for concurrent access.
[ "newBlockNode", "returns", "a", "new", "block", "node", "for", "the", "given", "block", "header", "and", "parent", "node", "calculating", "the", "height", "and", "workSum", "from", "the", "respective", "fields", "on", "the", "parent", ".", "This", "function", "is", "NOT", "safe", "for", "concurrent", "access", "." ]
96897255fd17525dd12426345d279533780bc4e1
https://github.com/btcsuite/btcd/blob/96897255fd17525dd12426345d279533780bc4e1/blockchain/blockindex.go#L128-L132
train