repo
stringlengths
5
54
path
stringlengths
4
155
func_name
stringlengths
1
118
original_string
stringlengths
52
85.5k
language
stringclasses
1 value
code
stringlengths
52
85.5k
code_tokens
sequence
docstring
stringlengths
6
2.61k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
85
252
partition
stringclasses
1 value
nats-io/nats-streaming-server
stores/filestore.go
FileDescriptorsLimit
func FileDescriptorsLimit(limit int64) FileStoreOption { return func(o *FileStoreOptions) error { if limit < 0 { return fmt.Errorf("file descriptor limit must be a positive number") } o.FileDescriptorsLimit = limit return nil } }
go
func FileDescriptorsLimit(limit int64) FileStoreOption { return func(o *FileStoreOptions) error { if limit < 0 { return fmt.Errorf("file descriptor limit must be a positive number") } o.FileDescriptorsLimit = limit return nil } }
[ "func", "FileDescriptorsLimit", "(", "limit", "int64", ")", "FileStoreOption", "{", "return", "func", "(", "o", "*", "FileStoreOptions", ")", "error", "{", "if", "limit", "<", "0", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ")", "\n", "}", "\n", "o", ".", "FileDescriptorsLimit", "=", "limit", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// FileDescriptorsLimit is a soft limit hinting at FileStore to try to // limit the number of concurrent opened files to that limit.
[ "FileDescriptorsLimit", "is", "a", "soft", "limit", "hinting", "at", "FileStore", "to", "try", "to", "limit", "the", "number", "of", "concurrent", "opened", "files", "to", "that", "limit", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L316-L324
train
nats-io/nats-streaming-server
stores/filestore.go
ParallelRecovery
func ParallelRecovery(count int) FileStoreOption { return func(o *FileStoreOptions) error { if count <= 0 { return fmt.Errorf("parallel recovery value must be at least 1") } o.ParallelRecovery = count return nil } }
go
func ParallelRecovery(count int) FileStoreOption { return func(o *FileStoreOptions) error { if count <= 0 { return fmt.Errorf("parallel recovery value must be at least 1") } o.ParallelRecovery = count return nil } }
[ "func", "ParallelRecovery", "(", "count", "int", ")", "FileStoreOption", "{", "return", "func", "(", "o", "*", "FileStoreOptions", ")", "error", "{", "if", "count", "<=", "0", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ")", "\n", "}", "\n", "o", ".", "ParallelRecovery", "=", "count", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// ParallelRecovery is a FileStore option that allows the parallel // recovery of channels. When running with SSDs, try to use a higher // value than the default number of 1. When running with HDDs, // performance may be better if it stays at 1.
[ "ParallelRecovery", "is", "a", "FileStore", "option", "that", "allows", "the", "parallel", "recovery", "of", "channels", ".", "When", "running", "with", "SSDs", "try", "to", "use", "a", "higher", "value", "than", "the", "default", "number", "of", "1", ".", "When", "running", "with", "HDDs", "performance", "may", "be", "better", "if", "it", "stays", "at", "1", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L330-L338
train
nats-io/nats-streaming-server
stores/filestore.go
AllOptions
func AllOptions(opts *FileStoreOptions) FileStoreOption { return func(o *FileStoreOptions) error { if err := BufferSize(opts.BufferSize)(o); err != nil { return err } if err := CompactInterval(opts.CompactInterval)(o); err != nil { return err } if err := CompactFragmentation(opts.CompactFragmentation)(o); err != nil { return err } if err := CompactMinFileSize(opts.CompactMinFileSize)(o); err != nil { return err } if err := CRCPolynomial(opts.CRCPolynomial)(o); err != nil { return err } if err := SliceConfig(opts.SliceMaxMsgs, opts.SliceMaxBytes, opts.SliceMaxAge, opts.SliceArchiveScript)(o); err != nil { return err } if err := FileDescriptorsLimit(opts.FileDescriptorsLimit)(o); err != nil { return err } if err := ParallelRecovery(opts.ParallelRecovery)(o); err != nil { return err } o.CompactEnabled = opts.CompactEnabled o.DoCRC = opts.DoCRC o.DoSync = opts.DoSync o.TruncateUnexpectedEOF = opts.TruncateUnexpectedEOF return nil } }
go
func AllOptions(opts *FileStoreOptions) FileStoreOption { return func(o *FileStoreOptions) error { if err := BufferSize(opts.BufferSize)(o); err != nil { return err } if err := CompactInterval(opts.CompactInterval)(o); err != nil { return err } if err := CompactFragmentation(opts.CompactFragmentation)(o); err != nil { return err } if err := CompactMinFileSize(opts.CompactMinFileSize)(o); err != nil { return err } if err := CRCPolynomial(opts.CRCPolynomial)(o); err != nil { return err } if err := SliceConfig(opts.SliceMaxMsgs, opts.SliceMaxBytes, opts.SliceMaxAge, opts.SliceArchiveScript)(o); err != nil { return err } if err := FileDescriptorsLimit(opts.FileDescriptorsLimit)(o); err != nil { return err } if err := ParallelRecovery(opts.ParallelRecovery)(o); err != nil { return err } o.CompactEnabled = opts.CompactEnabled o.DoCRC = opts.DoCRC o.DoSync = opts.DoSync o.TruncateUnexpectedEOF = opts.TruncateUnexpectedEOF return nil } }
[ "func", "AllOptions", "(", "opts", "*", "FileStoreOptions", ")", "FileStoreOption", "{", "return", "func", "(", "o", "*", "FileStoreOptions", ")", "error", "{", "if", "err", ":=", "BufferSize", "(", "opts", ".", "BufferSize", ")", "(", "o", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "CompactInterval", "(", "opts", ".", "CompactInterval", ")", "(", "o", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "CompactFragmentation", "(", "opts", ".", "CompactFragmentation", ")", "(", "o", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "CompactMinFileSize", "(", "opts", ".", "CompactMinFileSize", ")", "(", "o", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "CRCPolynomial", "(", "opts", ".", "CRCPolynomial", ")", "(", "o", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "SliceConfig", "(", "opts", ".", "SliceMaxMsgs", ",", "opts", ".", "SliceMaxBytes", ",", "opts", ".", "SliceMaxAge", ",", "opts", ".", "SliceArchiveScript", ")", "(", "o", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "FileDescriptorsLimit", "(", "opts", ".", "FileDescriptorsLimit", ")", "(", "o", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "ParallelRecovery", "(", "opts", ".", "ParallelRecovery", ")", "(", "o", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "o", ".", "CompactEnabled", "=", "opts", ".", "CompactEnabled", "\n", "o", ".", "DoCRC", "=", "opts", ".", "DoCRC", "\n", "o", ".", "DoSync", "=", "opts", ".", "DoSync", "\n", "o", ".", "TruncateUnexpectedEOF", "=", "opts", ".", "TruncateUnexpectedEOF", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// AllOptions is a convenient option to pass all options from a FileStoreOptions // structure to the constructor.
[ "AllOptions", "is", "a", "convenient", "option", "to", "pass", "all", "options", "from", "a", "FileStoreOptions", "structure", "to", "the", "constructor", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L354-L386
train
nats-io/nats-streaming-server
stores/filestore.go
checkFileVersion
func checkFileVersion(r io.Reader) error { fv, err := util.ReadInt(r) if err != nil { return fmt.Errorf("unable to verify file version: %v", err) } if fv == 0 || fv > fileVersion { return fmt.Errorf("unsupported file version: %v (supports [1..%v])", fv, fileVersion) } return nil }
go
func checkFileVersion(r io.Reader) error { fv, err := util.ReadInt(r) if err != nil { return fmt.Errorf("unable to verify file version: %v", err) } if fv == 0 || fv > fileVersion { return fmt.Errorf("unsupported file version: %v (supports [1..%v])", fv, fileVersion) } return nil }
[ "func", "checkFileVersion", "(", "r", "io", ".", "Reader", ")", "error", "{", "fv", ",", "err", ":=", "util", ".", "ReadInt", "(", "r", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "if", "fv", "==", "0", "||", "fv", ">", "fileVersion", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "fv", ",", "fileVersion", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// check that the version of the file is understood by this interface
[ "check", "that", "the", "version", "of", "the", "file", "is", "understood", "by", "this", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L660-L669
train
nats-io/nats-streaming-server
stores/filestore.go
createNewWriter
func (w *bufferedWriter) createNewWriter(file *os.File) io.Writer { w.buf = bufio.NewWriterSize(file, w.bufSize) return w.buf }
go
func (w *bufferedWriter) createNewWriter(file *os.File) io.Writer { w.buf = bufio.NewWriterSize(file, w.bufSize) return w.buf }
[ "func", "(", "w", "*", "bufferedWriter", ")", "createNewWriter", "(", "file", "*", "os", ".", "File", ")", "io", ".", "Writer", "{", "w", ".", "buf", "=", "bufio", ".", "NewWriterSize", "(", "file", ",", "w", ".", "bufSize", ")", "\n", "return", "w", ".", "buf", "\n", "}" ]
// createNewWriter creates a new buffer writer for `file` with // the bufferedWriter's current buffer size.
[ "createNewWriter", "creates", "a", "new", "buffer", "writer", "for", "file", "with", "the", "bufferedWriter", "s", "current", "buffer", "size", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L781-L784
train
nats-io/nats-streaming-server
stores/filestore.go
tryShrinkBuffer
func (w *bufferedWriter) tryShrinkBuffer(file *os.File) (io.Writer, error) { // Nothing to do if we are already at the lowest // or file not set/opened. if w.bufSize == w.minShrinkSize || file == nil { return w.buf, nil } if !w.shrinkReq { percentFilled := w.buf.Buffered() * 100 / w.bufSize if percentFilled <= bufShrinkThreshold { w.shrinkReq = true } // Wait for next tick to see if we can shrink return w.buf, nil } if err := w.buf.Flush(); err != nil { return w.buf, err } // Reduce size, but ensure it does not go below the limit w.bufSize /= 2 if w.bufSize < w.minShrinkSize { w.bufSize = w.minShrinkSize } w.buf = bufio.NewWriterSize(file, w.bufSize) // Don't reset shrinkReq unless we are down to the limit if w.bufSize == w.minShrinkSize { w.shrinkReq = true } return w.buf, nil }
go
func (w *bufferedWriter) tryShrinkBuffer(file *os.File) (io.Writer, error) { // Nothing to do if we are already at the lowest // or file not set/opened. if w.bufSize == w.minShrinkSize || file == nil { return w.buf, nil } if !w.shrinkReq { percentFilled := w.buf.Buffered() * 100 / w.bufSize if percentFilled <= bufShrinkThreshold { w.shrinkReq = true } // Wait for next tick to see if we can shrink return w.buf, nil } if err := w.buf.Flush(); err != nil { return w.buf, err } // Reduce size, but ensure it does not go below the limit w.bufSize /= 2 if w.bufSize < w.minShrinkSize { w.bufSize = w.minShrinkSize } w.buf = bufio.NewWriterSize(file, w.bufSize) // Don't reset shrinkReq unless we are down to the limit if w.bufSize == w.minShrinkSize { w.shrinkReq = true } return w.buf, nil }
[ "func", "(", "w", "*", "bufferedWriter", ")", "tryShrinkBuffer", "(", "file", "*", "os", ".", "File", ")", "(", "io", ".", "Writer", ",", "error", ")", "{", "// Nothing to do if we are already at the lowest", "// or file not set/opened.", "if", "w", ".", "bufSize", "==", "w", ".", "minShrinkSize", "||", "file", "==", "nil", "{", "return", "w", ".", "buf", ",", "nil", "\n", "}", "\n\n", "if", "!", "w", ".", "shrinkReq", "{", "percentFilled", ":=", "w", ".", "buf", ".", "Buffered", "(", ")", "*", "100", "/", "w", ".", "bufSize", "\n", "if", "percentFilled", "<=", "bufShrinkThreshold", "{", "w", ".", "shrinkReq", "=", "true", "\n", "}", "\n", "// Wait for next tick to see if we can shrink", "return", "w", ".", "buf", ",", "nil", "\n", "}", "\n", "if", "err", ":=", "w", ".", "buf", ".", "Flush", "(", ")", ";", "err", "!=", "nil", "{", "return", "w", ".", "buf", ",", "err", "\n", "}", "\n", "// Reduce size, but ensure it does not go below the limit", "w", ".", "bufSize", "/=", "2", "\n", "if", "w", ".", "bufSize", "<", "w", ".", "minShrinkSize", "{", "w", ".", "bufSize", "=", "w", ".", "minShrinkSize", "\n", "}", "\n", "w", ".", "buf", "=", "bufio", ".", "NewWriterSize", "(", "file", ",", "w", ".", "bufSize", ")", "\n", "// Don't reset shrinkReq unless we are down to the limit", "if", "w", ".", "bufSize", "==", "w", ".", "minShrinkSize", "{", "w", ".", "shrinkReq", "=", "true", "\n", "}", "\n", "return", "w", ".", "buf", ",", "nil", "\n", "}" ]
// tryShrinkBuffer checks and possibly shrinks the buffer
[ "tryShrinkBuffer", "checks", "and", "possibly", "shrinks", "the", "buffer" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L811-L840
train
nats-io/nats-streaming-server
stores/filestore.go
checkShrinkRequest
func (w *bufferedWriter) checkShrinkRequest() { percentFilled := w.buf.Buffered() * 100 / w.bufSize // If above the threshold, cancel the request. if percentFilled > bufShrinkThreshold { w.shrinkReq = false } }
go
func (w *bufferedWriter) checkShrinkRequest() { percentFilled := w.buf.Buffered() * 100 / w.bufSize // If above the threshold, cancel the request. if percentFilled > bufShrinkThreshold { w.shrinkReq = false } }
[ "func", "(", "w", "*", "bufferedWriter", ")", "checkShrinkRequest", "(", ")", "{", "percentFilled", ":=", "w", ".", "buf", ".", "Buffered", "(", ")", "*", "100", "/", "w", ".", "bufSize", "\n", "// If above the threshold, cancel the request.", "if", "percentFilled", ">", "bufShrinkThreshold", "{", "w", ".", "shrinkReq", "=", "false", "\n", "}", "\n", "}" ]
// checkShrinkRequest checks how full the buffer is, and if is above a certain // threshold, cancels the shrink request
[ "checkShrinkRequest", "checks", "how", "full", "the", "buffer", "is", "and", "if", "is", "above", "a", "certain", "threshold", "cancels", "the", "shrink", "request" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L844-L850
train
nats-io/nats-streaming-server
stores/filestore.go
openFile
func (fm *filesManager) openFile(file *file) error { fm.Lock() if fm.isClosed { fm.Unlock() return fmt.Errorf("unable to open file %q, store is being closed", file.name) } curState := atomic.LoadInt32(&file.state) if curState == fileRemoved { fm.Unlock() return fmt.Errorf("unable to open file %q, it has been removed", file.name) } if curState != fileClosed || file.handle != nil { fm.Unlock() panic(fmt.Errorf("request to open file %q but invalid state: handle=%v - state=%v", file.name, file.handle, file.state)) } var err error if fm.limit > 0 && fm.openedFDs >= fm.limit { fm.closeUnusedFiles(file.id) } file.handle, err = openFileWithFlags(file.name, file.flags) if err == nil { atomic.StoreInt32(&file.state, fileInUse) fm.openedFDs++ } fm.Unlock() return err }
go
func (fm *filesManager) openFile(file *file) error { fm.Lock() if fm.isClosed { fm.Unlock() return fmt.Errorf("unable to open file %q, store is being closed", file.name) } curState := atomic.LoadInt32(&file.state) if curState == fileRemoved { fm.Unlock() return fmt.Errorf("unable to open file %q, it has been removed", file.name) } if curState != fileClosed || file.handle != nil { fm.Unlock() panic(fmt.Errorf("request to open file %q but invalid state: handle=%v - state=%v", file.name, file.handle, file.state)) } var err error if fm.limit > 0 && fm.openedFDs >= fm.limit { fm.closeUnusedFiles(file.id) } file.handle, err = openFileWithFlags(file.name, file.flags) if err == nil { atomic.StoreInt32(&file.state, fileInUse) fm.openedFDs++ } fm.Unlock() return err }
[ "func", "(", "fm", "*", "filesManager", ")", "openFile", "(", "file", "*", "file", ")", "error", "{", "fm", ".", "Lock", "(", ")", "\n", "if", "fm", ".", "isClosed", "{", "fm", ".", "Unlock", "(", ")", "\n", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "file", ".", "name", ")", "\n", "}", "\n", "curState", ":=", "atomic", ".", "LoadInt32", "(", "&", "file", ".", "state", ")", "\n", "if", "curState", "==", "fileRemoved", "{", "fm", ".", "Unlock", "(", ")", "\n", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "file", ".", "name", ")", "\n", "}", "\n", "if", "curState", "!=", "fileClosed", "||", "file", ".", "handle", "!=", "nil", "{", "fm", ".", "Unlock", "(", ")", "\n", "panic", "(", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "file", ".", "name", ",", "file", ".", "handle", ",", "file", ".", "state", ")", ")", "\n", "}", "\n", "var", "err", "error", "\n", "if", "fm", ".", "limit", ">", "0", "&&", "fm", ".", "openedFDs", ">=", "fm", ".", "limit", "{", "fm", ".", "closeUnusedFiles", "(", "file", ".", "id", ")", "\n", "}", "\n", "file", ".", "handle", ",", "err", "=", "openFileWithFlags", "(", "file", ".", "name", ",", "file", ".", "flags", ")", "\n", "if", "err", "==", "nil", "{", "atomic", ".", "StoreInt32", "(", "&", "file", ".", "state", ",", "fileInUse", ")", "\n", "fm", ".", "openedFDs", "++", "\n", "}", "\n", "fm", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// openFile opens the given file and sets its state to `fileInUse`. // If the file manager has been closed or the file removed, this call // returns an error. // Otherwise, if the file's state is not `fileClosed` this call will panic. // This call will possibly cause opened but unused files to be closed if the // number of open file requests is above the set limit.
[ "openFile", "opens", "the", "given", "file", "and", "sets", "its", "state", "to", "fileInUse", ".", "If", "the", "file", "manager", "has", "been", "closed", "or", "the", "file", "removed", "this", "call", "returns", "an", "error", ".", "Otherwise", "if", "the", "file", "s", "state", "is", "not", "fileClosed", "this", "call", "will", "panic", ".", "This", "call", "will", "possibly", "cause", "opened", "but", "unused", "files", "to", "be", "closed", "if", "the", "number", "of", "open", "file", "requests", "is", "above", "the", "set", "limit", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L926-L952
train
nats-io/nats-streaming-server
stores/filestore.go
closeLockedFile
func (fm *filesManager) closeLockedFile(file *file) error { if !atomic.CompareAndSwapInt32(&file.state, fileInUse, fileClosing) { panic(fmt.Errorf("file %q is requested to be closed but was not locked by caller", file.name)) } fm.Lock() err := fm.doClose(file) fm.Unlock() return err }
go
func (fm *filesManager) closeLockedFile(file *file) error { if !atomic.CompareAndSwapInt32(&file.state, fileInUse, fileClosing) { panic(fmt.Errorf("file %q is requested to be closed but was not locked by caller", file.name)) } fm.Lock() err := fm.doClose(file) fm.Unlock() return err }
[ "func", "(", "fm", "*", "filesManager", ")", "closeLockedFile", "(", "file", "*", "file", ")", "error", "{", "if", "!", "atomic", ".", "CompareAndSwapInt32", "(", "&", "file", ".", "state", ",", "fileInUse", ",", "fileClosing", ")", "{", "panic", "(", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "file", ".", "name", ")", ")", "\n", "}", "\n", "fm", ".", "Lock", "(", ")", "\n", "err", ":=", "fm", ".", "doClose", "(", "file", ")", "\n", "fm", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// closeLockedFile closes the handle of the given file, but only if the caller // has locked the file. Will panic otherwise. // If the file's beforeClose callback is not nil, this callback is invoked // before the file handle is closed.
[ "closeLockedFile", "closes", "the", "handle", "of", "the", "given", "file", "but", "only", "if", "the", "caller", "has", "locked", "the", "file", ".", "Will", "panic", "otherwise", ".", "If", "the", "file", "s", "beforeClose", "callback", "is", "not", "nil", "this", "callback", "is", "invoked", "before", "the", "file", "handle", "is", "closed", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L958-L966
train
nats-io/nats-streaming-server
stores/filestore.go
closeFileIfOpened
func (fm *filesManager) closeFileIfOpened(file *file) error { if !atomic.CompareAndSwapInt32(&file.state, fileOpened, fileClosing) { return nil } fm.Lock() err := fm.doClose(file) fm.Unlock() return err }
go
func (fm *filesManager) closeFileIfOpened(file *file) error { if !atomic.CompareAndSwapInt32(&file.state, fileOpened, fileClosing) { return nil } fm.Lock() err := fm.doClose(file) fm.Unlock() return err }
[ "func", "(", "fm", "*", "filesManager", ")", "closeFileIfOpened", "(", "file", "*", "file", ")", "error", "{", "if", "!", "atomic", ".", "CompareAndSwapInt32", "(", "&", "file", ".", "state", ",", "fileOpened", ",", "fileClosing", ")", "{", "return", "nil", "\n", "}", "\n", "fm", ".", "Lock", "(", ")", "\n", "err", ":=", "fm", ".", "doClose", "(", "file", ")", "\n", "fm", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// closeFileIfOpened closes the handle of the given file, but only if the // file is opened and not currently locked. Does not return any error or panic // if file is in any other state. // If the file's beforeClose callback is not nil, this callback is invoked // before the file handle is closed.
[ "closeFileIfOpened", "closes", "the", "handle", "of", "the", "given", "file", "but", "only", "if", "the", "file", "is", "opened", "and", "not", "currently", "locked", ".", "Does", "not", "return", "any", "error", "or", "panic", "if", "file", "is", "in", "any", "other", "state", ".", "If", "the", "file", "s", "beforeClose", "callback", "is", "not", "nil", "this", "callback", "is", "invoked", "before", "the", "file", "handle", "is", "closed", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L973-L981
train
nats-io/nats-streaming-server
stores/filestore.go
doClose
func (fm *filesManager) doClose(file *file) error { var err error if file.beforeClose != nil { err = file.beforeClose() } util.CloseFile(err, file.handle) // Regardless of error, we need to change the state to closed. file.handle = nil atomic.StoreInt32(&file.state, fileClosed) fm.openedFDs-- return err }
go
func (fm *filesManager) doClose(file *file) error { var err error if file.beforeClose != nil { err = file.beforeClose() } util.CloseFile(err, file.handle) // Regardless of error, we need to change the state to closed. file.handle = nil atomic.StoreInt32(&file.state, fileClosed) fm.openedFDs-- return err }
[ "func", "(", "fm", "*", "filesManager", ")", "doClose", "(", "file", "*", "file", ")", "error", "{", "var", "err", "error", "\n", "if", "file", ".", "beforeClose", "!=", "nil", "{", "err", "=", "file", ".", "beforeClose", "(", ")", "\n", "}", "\n", "util", ".", "CloseFile", "(", "err", ",", "file", ".", "handle", ")", "\n", "// Regardless of error, we need to change the state to closed.", "file", ".", "handle", "=", "nil", "\n", "atomic", ".", "StoreInt32", "(", "&", "file", ".", "state", ",", "fileClosed", ")", "\n", "fm", ".", "openedFDs", "--", "\n", "return", "err", "\n", "}" ]
// doClose closes the file handle, setting it to nil and switching state to `fileClosed`. // If a `beforeClose` callback was registered on file creation, it is invoked // before the file handler is actually closed. // Lock is required on entry.
[ "doClose", "closes", "the", "file", "handle", "setting", "it", "to", "nil", "and", "switching", "state", "to", "fileClosed", ".", "If", "a", "beforeClose", "callback", "was", "registered", "on", "file", "creation", "it", "is", "invoked", "before", "the", "file", "handler", "is", "actually", "closed", ".", "Lock", "is", "required", "on", "entry", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1006-L1017
train
nats-io/nats-streaming-server
stores/filestore.go
lockFile
func (fm *filesManager) lockFile(file *file) (bool, error) { if atomic.CompareAndSwapInt32(&file.state, fileOpened, fileInUse) { return true, nil } return false, fm.openFile(file) }
go
func (fm *filesManager) lockFile(file *file) (bool, error) { if atomic.CompareAndSwapInt32(&file.state, fileOpened, fileInUse) { return true, nil } return false, fm.openFile(file) }
[ "func", "(", "fm", "*", "filesManager", ")", "lockFile", "(", "file", "*", "file", ")", "(", "bool", ",", "error", ")", "{", "if", "atomic", ".", "CompareAndSwapInt32", "(", "&", "file", ".", "state", ",", "fileOpened", ",", "fileInUse", ")", "{", "return", "true", ",", "nil", "\n", "}", "\n", "return", "false", ",", "fm", ".", "openFile", "(", "file", ")", "\n", "}" ]
// lockFile locks the given file. // If the file was already opened, the boolean returned is true, // otherwise, the file is opened and the call returns false.
[ "lockFile", "locks", "the", "given", "file", ".", "If", "the", "file", "was", "already", "opened", "the", "boolean", "returned", "is", "true", "otherwise", "the", "file", "is", "opened", "and", "the", "call", "returns", "false", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1022-L1027
train
nats-io/nats-streaming-server
stores/filestore.go
unlockFile
func (fm *filesManager) unlockFile(file *file) { if !atomic.CompareAndSwapInt32(&file.state, fileInUse, fileOpened) { panic(fmt.Errorf("failed to switch state from fileInUse to fileOpened for file %q, state=%v", file.name, file.state)) } }
go
func (fm *filesManager) unlockFile(file *file) { if !atomic.CompareAndSwapInt32(&file.state, fileInUse, fileOpened) { panic(fmt.Errorf("failed to switch state from fileInUse to fileOpened for file %q, state=%v", file.name, file.state)) } }
[ "func", "(", "fm", "*", "filesManager", ")", "unlockFile", "(", "file", "*", "file", ")", "{", "if", "!", "atomic", ".", "CompareAndSwapInt32", "(", "&", "file", ".", "state", ",", "fileInUse", ",", "fileOpened", ")", "{", "panic", "(", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "file", ".", "name", ",", "file", ".", "state", ")", ")", "\n", "}", "\n", "}" ]
// unlockFile unlocks the file if currently locked, otherwise panic.
[ "unlockFile", "unlocks", "the", "file", "if", "currently", "locked", "otherwise", "panic", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1036-L1041
train
nats-io/nats-streaming-server
stores/filestore.go
trySwitchState
func (fm *filesManager) trySwitchState(file *file, newState int32) (bool, error) { wasOpened := false wasClosed := false for i := 0; i < 10000; i++ { if atomic.CompareAndSwapInt32(&file.state, fileOpened, newState) { wasOpened = true break } if atomic.CompareAndSwapInt32(&file.state, fileClosed, newState) { wasClosed = true break } if i%1000 == 1 { time.Sleep(time.Millisecond) } } if !wasOpened && !wasClosed { return false, fmt.Errorf("file %q is still probably locked", file.name) } return wasOpened, nil }
go
func (fm *filesManager) trySwitchState(file *file, newState int32) (bool, error) { wasOpened := false wasClosed := false for i := 0; i < 10000; i++ { if atomic.CompareAndSwapInt32(&file.state, fileOpened, newState) { wasOpened = true break } if atomic.CompareAndSwapInt32(&file.state, fileClosed, newState) { wasClosed = true break } if i%1000 == 1 { time.Sleep(time.Millisecond) } } if !wasOpened && !wasClosed { return false, fmt.Errorf("file %q is still probably locked", file.name) } return wasOpened, nil }
[ "func", "(", "fm", "*", "filesManager", ")", "trySwitchState", "(", "file", "*", "file", ",", "newState", "int32", ")", "(", "bool", ",", "error", ")", "{", "wasOpened", ":=", "false", "\n", "wasClosed", ":=", "false", "\n", "for", "i", ":=", "0", ";", "i", "<", "10000", ";", "i", "++", "{", "if", "atomic", ".", "CompareAndSwapInt32", "(", "&", "file", ".", "state", ",", "fileOpened", ",", "newState", ")", "{", "wasOpened", "=", "true", "\n", "break", "\n", "}", "\n", "if", "atomic", ".", "CompareAndSwapInt32", "(", "&", "file", ".", "state", ",", "fileClosed", ",", "newState", ")", "{", "wasClosed", "=", "true", "\n", "break", "\n", "}", "\n", "if", "i", "%", "1000", "==", "1", "{", "time", ".", "Sleep", "(", "time", ".", "Millisecond", ")", "\n", "}", "\n", "}", "\n", "if", "!", "wasOpened", "&&", "!", "wasClosed", "{", "return", "false", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "file", ".", "name", ")", "\n", "}", "\n", "return", "wasOpened", ",", "nil", "\n", "}" ]
// trySwitchState attempts to switch an initial state of `fileOpened` // or `fileClosed` to the given newState. If it can't it will return an // error, otherwise, returned a boolean to indicate if the initial state // was `fileOpened`.
[ "trySwitchState", "attempts", "to", "switch", "an", "initial", "state", "of", "fileOpened", "or", "fileClosed", "to", "the", "given", "newState", ".", "If", "it", "can", "t", "it", "will", "return", "an", "error", "otherwise", "returned", "a", "boolean", "to", "indicate", "if", "the", "initial", "state", "was", "fileOpened", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1047-L1067
train
nats-io/nats-streaming-server
stores/filestore.go
close
func (fm *filesManager) close() error { fm.Lock() if fm.isClosed { fm.Unlock() return nil } fm.isClosed = true files := make([]*file, 0, len(fm.files)) for _, file := range fm.files { files = append(files, file) } fm.files = nil fm.Unlock() var err error for _, file := range files { wasOpened, sserr := fm.trySwitchState(file, fmClosed) if sserr != nil { if err == nil { err = sserr } } else if wasOpened { fm.Lock() if cerr := fm.doClose(file); cerr != nil && err == nil { err = cerr } fm.Unlock() } } return err }
go
func (fm *filesManager) close() error { fm.Lock() if fm.isClosed { fm.Unlock() return nil } fm.isClosed = true files := make([]*file, 0, len(fm.files)) for _, file := range fm.files { files = append(files, file) } fm.files = nil fm.Unlock() var err error for _, file := range files { wasOpened, sserr := fm.trySwitchState(file, fmClosed) if sserr != nil { if err == nil { err = sserr } } else if wasOpened { fm.Lock() if cerr := fm.doClose(file); cerr != nil && err == nil { err = cerr } fm.Unlock() } } return err }
[ "func", "(", "fm", "*", "filesManager", ")", "close", "(", ")", "error", "{", "fm", ".", "Lock", "(", ")", "\n", "if", "fm", ".", "isClosed", "{", "fm", ".", "Unlock", "(", ")", "\n", "return", "nil", "\n", "}", "\n", "fm", ".", "isClosed", "=", "true", "\n\n", "files", ":=", "make", "(", "[", "]", "*", "file", ",", "0", ",", "len", "(", "fm", ".", "files", ")", ")", "\n", "for", "_", ",", "file", ":=", "range", "fm", ".", "files", "{", "files", "=", "append", "(", "files", ",", "file", ")", "\n", "}", "\n", "fm", ".", "files", "=", "nil", "\n", "fm", ".", "Unlock", "(", ")", "\n\n", "var", "err", "error", "\n", "for", "_", ",", "file", ":=", "range", "files", "{", "wasOpened", ",", "sserr", ":=", "fm", ".", "trySwitchState", "(", "file", ",", "fmClosed", ")", "\n", "if", "sserr", "!=", "nil", "{", "if", "err", "==", "nil", "{", "err", "=", "sserr", "\n", "}", "\n", "}", "else", "if", "wasOpened", "{", "fm", ".", "Lock", "(", ")", "\n", "if", "cerr", ":=", "fm", ".", "doClose", "(", "file", ")", ";", "cerr", "!=", "nil", "&&", "err", "==", "nil", "{", "err", "=", "cerr", "\n", "}", "\n", "fm", ".", "Unlock", "(", ")", "\n", "}", "\n", "}", "\n", "return", "err", "\n", "}" ]
// close the files manager, including all files currently opened. // Returns the first error encountered when closing the files.
[ "close", "the", "files", "manager", "including", "all", "files", "currently", "opened", ".", "Returns", "the", "first", "error", "encountered", "when", "closing", "the", "files", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1147-L1178
train
nats-io/nats-streaming-server
stores/filestore.go
Init
func (fs *FileStore) Init(info *spb.ServerInfo) error { fs.Lock() defer fs.Unlock() if fs.serverFile == nil { var err error // Open/Create the server file (note that this file must not be opened, // in APPEND mode to allow truncate to work). fs.serverFile, err = fs.fm.createFile(serverFileName, os.O_RDWR|os.O_CREATE, nil) if err != nil { return err } } else { if _, err := fs.fm.lockFile(fs.serverFile); err != nil { return err } } f := fs.serverFile.handle // defer is ok for this function... defer fs.fm.unlockFile(fs.serverFile) // Truncate the file (4 is the size of the fileVersion record) if err := f.Truncate(4); err != nil { return err } // Move offset to 4 (truncate does not do that) if _, err := f.Seek(4, io.SeekStart); err != nil { return err } // ServerInfo record is not typed. We also don't pass a reusable buffer. if _, _, err := writeRecord(f, nil, recNoType, info, info.Size(), fs.crcTable); err != nil { return err } return nil }
go
func (fs *FileStore) Init(info *spb.ServerInfo) error { fs.Lock() defer fs.Unlock() if fs.serverFile == nil { var err error // Open/Create the server file (note that this file must not be opened, // in APPEND mode to allow truncate to work). fs.serverFile, err = fs.fm.createFile(serverFileName, os.O_RDWR|os.O_CREATE, nil) if err != nil { return err } } else { if _, err := fs.fm.lockFile(fs.serverFile); err != nil { return err } } f := fs.serverFile.handle // defer is ok for this function... defer fs.fm.unlockFile(fs.serverFile) // Truncate the file (4 is the size of the fileVersion record) if err := f.Truncate(4); err != nil { return err } // Move offset to 4 (truncate does not do that) if _, err := f.Seek(4, io.SeekStart); err != nil { return err } // ServerInfo record is not typed. We also don't pass a reusable buffer. if _, _, err := writeRecord(f, nil, recNoType, info, info.Size(), fs.crcTable); err != nil { return err } return nil }
[ "func", "(", "fs", "*", "FileStore", ")", "Init", "(", "info", "*", "spb", ".", "ServerInfo", ")", "error", "{", "fs", ".", "Lock", "(", ")", "\n", "defer", "fs", ".", "Unlock", "(", ")", "\n\n", "if", "fs", ".", "serverFile", "==", "nil", "{", "var", "err", "error", "\n", "// Open/Create the server file (note that this file must not be opened,", "// in APPEND mode to allow truncate to work).", "fs", ".", "serverFile", ",", "err", "=", "fs", ".", "fm", ".", "createFile", "(", "serverFileName", ",", "os", ".", "O_RDWR", "|", "os", ".", "O_CREATE", ",", "nil", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "else", "{", "if", "_", ",", "err", ":=", "fs", ".", "fm", ".", "lockFile", "(", "fs", ".", "serverFile", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "f", ":=", "fs", ".", "serverFile", ".", "handle", "\n", "// defer is ok for this function...", "defer", "fs", ".", "fm", ".", "unlockFile", "(", "fs", ".", "serverFile", ")", "\n\n", "// Truncate the file (4 is the size of the fileVersion record)", "if", "err", ":=", "f", ".", "Truncate", "(", "4", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// Move offset to 4 (truncate does not do that)", "if", "_", ",", "err", ":=", "f", ".", "Seek", "(", "4", ",", "io", ".", "SeekStart", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// ServerInfo record is not typed. We also don't pass a reusable buffer.", "if", "_", ",", "_", ",", "err", ":=", "writeRecord", "(", "f", ",", "nil", ",", "recNoType", ",", "info", ",", "info", ".", "Size", "(", ")", ",", "fs", ".", "crcTable", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// Init is used to persist server's information after the first start
[ "Init", "is", "used", "to", "persist", "server", "s", "information", "after", "the", "first", "start" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1461-L1495
train
nats-io/nats-streaming-server
stores/filestore.go
recoverClients
func (fs *FileStore) recoverClients() ([]*Client, error) { var err error var recType recordType var recSize int _buf := [256]byte{} buf := _buf[:] offset := int64(4) // Create a buffered reader to speed-up recovery br := bufio.NewReaderSize(fs.clientsFile.handle, defaultBufSize) for { buf, recSize, recType, err = readRecord(br, buf, true, fs.crcTable, fs.opts.DoCRC) if err != nil { switch err { case io.EOF: err = nil case errNeedRewind: err = fs.fm.truncateFile(fs.clientsFile, offset) default: err = fs.handleUnexpectedEOF(err, fs.clientsFile, offset, true) } if err == nil { break } return nil, err } readBytes := int64(recSize + recordHeaderSize) offset += readBytes fs.cliFileSize += readBytes switch recType { case addClient: c := &Client{} if err := c.ClientInfo.Unmarshal(buf[:recSize]); err != nil { return nil, err } // Add to the map. Note that if one already exists, which should // not, just replace with this most recent one. fs.clients[c.ID] = c case delClient: c := spb.ClientDelete{} if err := c.Unmarshal(buf[:recSize]); err != nil { return nil, err } delete(fs.clients, c.ID) fs.cliDeleteRecs++ default: return nil, fmt.Errorf("invalid client record type: %v", recType) } } clients := make([]*Client, len(fs.clients)) i := 0 // Convert the map into an array for _, c := range fs.clients { clients[i] = c i++ } return clients, nil }
go
func (fs *FileStore) recoverClients() ([]*Client, error) { var err error var recType recordType var recSize int _buf := [256]byte{} buf := _buf[:] offset := int64(4) // Create a buffered reader to speed-up recovery br := bufio.NewReaderSize(fs.clientsFile.handle, defaultBufSize) for { buf, recSize, recType, err = readRecord(br, buf, true, fs.crcTable, fs.opts.DoCRC) if err != nil { switch err { case io.EOF: err = nil case errNeedRewind: err = fs.fm.truncateFile(fs.clientsFile, offset) default: err = fs.handleUnexpectedEOF(err, fs.clientsFile, offset, true) } if err == nil { break } return nil, err } readBytes := int64(recSize + recordHeaderSize) offset += readBytes fs.cliFileSize += readBytes switch recType { case addClient: c := &Client{} if err := c.ClientInfo.Unmarshal(buf[:recSize]); err != nil { return nil, err } // Add to the map. Note that if one already exists, which should // not, just replace with this most recent one. fs.clients[c.ID] = c case delClient: c := spb.ClientDelete{} if err := c.Unmarshal(buf[:recSize]); err != nil { return nil, err } delete(fs.clients, c.ID) fs.cliDeleteRecs++ default: return nil, fmt.Errorf("invalid client record type: %v", recType) } } clients := make([]*Client, len(fs.clients)) i := 0 // Convert the map into an array for _, c := range fs.clients { clients[i] = c i++ } return clients, nil }
[ "func", "(", "fs", "*", "FileStore", ")", "recoverClients", "(", ")", "(", "[", "]", "*", "Client", ",", "error", ")", "{", "var", "err", "error", "\n", "var", "recType", "recordType", "\n", "var", "recSize", "int", "\n\n", "_buf", ":=", "[", "256", "]", "byte", "{", "}", "\n", "buf", ":=", "_buf", "[", ":", "]", "\n", "offset", ":=", "int64", "(", "4", ")", "\n\n", "// Create a buffered reader to speed-up recovery", "br", ":=", "bufio", ".", "NewReaderSize", "(", "fs", ".", "clientsFile", ".", "handle", ",", "defaultBufSize", ")", "\n\n", "for", "{", "buf", ",", "recSize", ",", "recType", ",", "err", "=", "readRecord", "(", "br", ",", "buf", ",", "true", ",", "fs", ".", "crcTable", ",", "fs", ".", "opts", ".", "DoCRC", ")", "\n", "if", "err", "!=", "nil", "{", "switch", "err", "{", "case", "io", ".", "EOF", ":", "err", "=", "nil", "\n", "case", "errNeedRewind", ":", "err", "=", "fs", ".", "fm", ".", "truncateFile", "(", "fs", ".", "clientsFile", ",", "offset", ")", "\n", "default", ":", "err", "=", "fs", ".", "handleUnexpectedEOF", "(", "err", ",", "fs", ".", "clientsFile", ",", "offset", ",", "true", ")", "\n", "}", "\n", "if", "err", "==", "nil", "{", "break", "\n", "}", "\n", "return", "nil", ",", "err", "\n", "}", "\n", "readBytes", ":=", "int64", "(", "recSize", "+", "recordHeaderSize", ")", "\n", "offset", "+=", "readBytes", "\n", "fs", ".", "cliFileSize", "+=", "readBytes", "\n", "switch", "recType", "{", "case", "addClient", ":", "c", ":=", "&", "Client", "{", "}", "\n", "if", "err", ":=", "c", ".", "ClientInfo", ".", "Unmarshal", "(", "buf", "[", ":", "recSize", "]", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "// Add to the map. Note that if one already exists, which should", "// not, just replace with this most recent one.", "fs", ".", "clients", "[", "c", ".", "ID", "]", "=", "c", "\n", "case", "delClient", ":", "c", ":=", "spb", ".", "ClientDelete", "{", "}", "\n", "if", "err", ":=", "c", ".", "Unmarshal", "(", "buf", "[", ":", "recSize", "]", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "delete", "(", "fs", ".", "clients", ",", "c", ".", "ID", ")", "\n", "fs", ".", "cliDeleteRecs", "++", "\n", "default", ":", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "recType", ")", "\n", "}", "\n", "}", "\n", "clients", ":=", "make", "(", "[", "]", "*", "Client", ",", "len", "(", "fs", ".", "clients", ")", ")", "\n", "i", ":=", "0", "\n", "// Convert the map into an array", "for", "_", ",", "c", ":=", "range", "fs", ".", "clients", "{", "clients", "[", "i", "]", "=", "c", "\n", "i", "++", "\n", "}", "\n", "return", "clients", ",", "nil", "\n", "}" ]
// recoverClients reads the client files and returns an array of RecoveredClient
[ "recoverClients", "reads", "the", "client", "files", "and", "returns", "an", "array", "of", "RecoveredClient" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1498-L1557
train
nats-io/nats-streaming-server
stores/filestore.go
recoverServerInfo
func (fs *FileStore) recoverServerInfo() (*spb.ServerInfo, error) { info := &spb.ServerInfo{} buf, size, _, err := readRecord(fs.serverFile.handle, nil, false, fs.crcTable, fs.opts.DoCRC) if err != nil { if err == io.EOF { // We are done, no state recovered return nil, nil } fs.log.Errorf("Server file %q corrupted: %v", fs.serverFile.name, err) fs.log.Errorf("Follow instructions in documentation in order to recover from this") return nil, err } // Check that the size of the file is consistent with the size // of the record we are supposed to recover. Account for the // 12 bytes (4 + recordHeaderSize) corresponding to the fileVersion and // record header. fstat, err := fs.serverFile.handle.Stat() if err != nil { return nil, err } expectedSize := int64(size + 4 + recordHeaderSize) if fstat.Size() != expectedSize { return nil, fmt.Errorf("incorrect file size, expected %v bytes, got %v bytes", expectedSize, fstat.Size()) } // Reconstruct now if err := info.Unmarshal(buf[:size]); err != nil { return nil, err } return info, nil }
go
func (fs *FileStore) recoverServerInfo() (*spb.ServerInfo, error) { info := &spb.ServerInfo{} buf, size, _, err := readRecord(fs.serverFile.handle, nil, false, fs.crcTable, fs.opts.DoCRC) if err != nil { if err == io.EOF { // We are done, no state recovered return nil, nil } fs.log.Errorf("Server file %q corrupted: %v", fs.serverFile.name, err) fs.log.Errorf("Follow instructions in documentation in order to recover from this") return nil, err } // Check that the size of the file is consistent with the size // of the record we are supposed to recover. Account for the // 12 bytes (4 + recordHeaderSize) corresponding to the fileVersion and // record header. fstat, err := fs.serverFile.handle.Stat() if err != nil { return nil, err } expectedSize := int64(size + 4 + recordHeaderSize) if fstat.Size() != expectedSize { return nil, fmt.Errorf("incorrect file size, expected %v bytes, got %v bytes", expectedSize, fstat.Size()) } // Reconstruct now if err := info.Unmarshal(buf[:size]); err != nil { return nil, err } return info, nil }
[ "func", "(", "fs", "*", "FileStore", ")", "recoverServerInfo", "(", ")", "(", "*", "spb", ".", "ServerInfo", ",", "error", ")", "{", "info", ":=", "&", "spb", ".", "ServerInfo", "{", "}", "\n", "buf", ",", "size", ",", "_", ",", "err", ":=", "readRecord", "(", "fs", ".", "serverFile", ".", "handle", ",", "nil", ",", "false", ",", "fs", ".", "crcTable", ",", "fs", ".", "opts", ".", "DoCRC", ")", "\n", "if", "err", "!=", "nil", "{", "if", "err", "==", "io", ".", "EOF", "{", "// We are done, no state recovered", "return", "nil", ",", "nil", "\n", "}", "\n", "fs", ".", "log", ".", "Errorf", "(", "\"", "\"", ",", "fs", ".", "serverFile", ".", "name", ",", "err", ")", "\n", "fs", ".", "log", ".", "Errorf", "(", "\"", "\"", ")", "\n", "return", "nil", ",", "err", "\n", "}", "\n", "// Check that the size of the file is consistent with the size", "// of the record we are supposed to recover. Account for the", "// 12 bytes (4 + recordHeaderSize) corresponding to the fileVersion and", "// record header.", "fstat", ",", "err", ":=", "fs", ".", "serverFile", ".", "handle", ".", "Stat", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "expectedSize", ":=", "int64", "(", "size", "+", "4", "+", "recordHeaderSize", ")", "\n", "if", "fstat", ".", "Size", "(", ")", "!=", "expectedSize", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "expectedSize", ",", "fstat", ".", "Size", "(", ")", ")", "\n", "}", "\n", "// Reconstruct now", "if", "err", ":=", "info", ".", "Unmarshal", "(", "buf", "[", ":", "size", "]", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "return", "info", ",", "nil", "\n", "}" ]
// recoverServerInfo reads the server file and returns a ServerInfo structure
[ "recoverServerInfo", "reads", "the", "server", "file", "and", "returns", "a", "ServerInfo", "structure" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1560-L1590
train
nats-io/nats-streaming-server
stores/filestore.go
shouldCompactClientFile
func (fs *FileStore) shouldCompactClientFile() bool { // Global switch if !fs.opts.CompactEnabled { return false } // Check that if minimum file size is set, the client file // is at least at the minimum. if fs.opts.CompactMinFileSize > 0 && fs.cliFileSize < fs.opts.CompactMinFileSize { return false } // Check fragmentation frag := fs.cliDeleteRecs * 100 / (fs.cliDeleteRecs + len(fs.clients)) if frag < fs.opts.CompactFragmentation { return false } // Check that we don't do too often if time.Since(fs.cliCompactTS) < fs.compactItvl { return false } return true }
go
func (fs *FileStore) shouldCompactClientFile() bool { // Global switch if !fs.opts.CompactEnabled { return false } // Check that if minimum file size is set, the client file // is at least at the minimum. if fs.opts.CompactMinFileSize > 0 && fs.cliFileSize < fs.opts.CompactMinFileSize { return false } // Check fragmentation frag := fs.cliDeleteRecs * 100 / (fs.cliDeleteRecs + len(fs.clients)) if frag < fs.opts.CompactFragmentation { return false } // Check that we don't do too often if time.Since(fs.cliCompactTS) < fs.compactItvl { return false } return true }
[ "func", "(", "fs", "*", "FileStore", ")", "shouldCompactClientFile", "(", ")", "bool", "{", "// Global switch", "if", "!", "fs", ".", "opts", ".", "CompactEnabled", "{", "return", "false", "\n", "}", "\n", "// Check that if minimum file size is set, the client file", "// is at least at the minimum.", "if", "fs", ".", "opts", ".", "CompactMinFileSize", ">", "0", "&&", "fs", ".", "cliFileSize", "<", "fs", ".", "opts", ".", "CompactMinFileSize", "{", "return", "false", "\n", "}", "\n", "// Check fragmentation", "frag", ":=", "fs", ".", "cliDeleteRecs", "*", "100", "/", "(", "fs", ".", "cliDeleteRecs", "+", "len", "(", "fs", ".", "clients", ")", ")", "\n", "if", "frag", "<", "fs", ".", "opts", ".", "CompactFragmentation", "{", "return", "false", "\n", "}", "\n", "// Check that we don't do too often", "if", "time", ".", "Since", "(", "fs", ".", "cliCompactTS", ")", "<", "fs", ".", "compactItvl", "{", "return", "false", "\n", "}", "\n", "return", "true", "\n", "}" ]
// shouldCompactClientFile returns true if the client file should be compacted // Lock is held by caller
[ "shouldCompactClientFile", "returns", "true", "if", "the", "client", "file", "should", "be", "compacted", "Lock", "is", "held", "by", "caller" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1704-L1724
train
nats-io/nats-streaming-server
stores/filestore.go
compactClientFile
func (fs *FileStore) compactClientFile(orgFileName string) error { // Open a temporary file tmpFile, err := getTempFile(fs.fm.rootDir, clientsFileName) if err != nil { return err } defer func() { if tmpFile != nil { tmpFile.Close() os.Remove(tmpFile.Name()) } }() bw := bufio.NewWriterSize(tmpFile, defaultBufSize) fileSize := int64(0) size := 0 _buf := [256]byte{} buf := _buf[:] // Dump the content of active clients into the temporary file. for _, c := range fs.clients { buf, size, err = writeRecord(bw, buf, addClient, &c.ClientInfo, c.ClientInfo.Size(), fs.crcTable) if err != nil { return err } fileSize += int64(size) } // Flush the buffer on disk if err := bw.Flush(); err != nil { return err } // Start by closing the temporary file. if err := tmpFile.Close(); err != nil { return err } // Rename the tmp file to original file name if err := os.Rename(tmpFile.Name(), orgFileName); err != nil { return err } // Avoid unnecessary attempt to cleanup tmpFile = nil fs.cliDeleteRecs = 0 fs.cliFileSize = fileSize fs.cliCompactTS = time.Now() return nil }
go
func (fs *FileStore) compactClientFile(orgFileName string) error { // Open a temporary file tmpFile, err := getTempFile(fs.fm.rootDir, clientsFileName) if err != nil { return err } defer func() { if tmpFile != nil { tmpFile.Close() os.Remove(tmpFile.Name()) } }() bw := bufio.NewWriterSize(tmpFile, defaultBufSize) fileSize := int64(0) size := 0 _buf := [256]byte{} buf := _buf[:] // Dump the content of active clients into the temporary file. for _, c := range fs.clients { buf, size, err = writeRecord(bw, buf, addClient, &c.ClientInfo, c.ClientInfo.Size(), fs.crcTable) if err != nil { return err } fileSize += int64(size) } // Flush the buffer on disk if err := bw.Flush(); err != nil { return err } // Start by closing the temporary file. if err := tmpFile.Close(); err != nil { return err } // Rename the tmp file to original file name if err := os.Rename(tmpFile.Name(), orgFileName); err != nil { return err } // Avoid unnecessary attempt to cleanup tmpFile = nil fs.cliDeleteRecs = 0 fs.cliFileSize = fileSize fs.cliCompactTS = time.Now() return nil }
[ "func", "(", "fs", "*", "FileStore", ")", "compactClientFile", "(", "orgFileName", "string", ")", "error", "{", "// Open a temporary file", "tmpFile", ",", "err", ":=", "getTempFile", "(", "fs", ".", "fm", ".", "rootDir", ",", "clientsFileName", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "defer", "func", "(", ")", "{", "if", "tmpFile", "!=", "nil", "{", "tmpFile", ".", "Close", "(", ")", "\n", "os", ".", "Remove", "(", "tmpFile", ".", "Name", "(", ")", ")", "\n", "}", "\n", "}", "(", ")", "\n", "bw", ":=", "bufio", ".", "NewWriterSize", "(", "tmpFile", ",", "defaultBufSize", ")", "\n", "fileSize", ":=", "int64", "(", "0", ")", "\n", "size", ":=", "0", "\n", "_buf", ":=", "[", "256", "]", "byte", "{", "}", "\n", "buf", ":=", "_buf", "[", ":", "]", "\n", "// Dump the content of active clients into the temporary file.", "for", "_", ",", "c", ":=", "range", "fs", ".", "clients", "{", "buf", ",", "size", ",", "err", "=", "writeRecord", "(", "bw", ",", "buf", ",", "addClient", ",", "&", "c", ".", "ClientInfo", ",", "c", ".", "ClientInfo", ".", "Size", "(", ")", ",", "fs", ".", "crcTable", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "fileSize", "+=", "int64", "(", "size", ")", "\n", "}", "\n", "// Flush the buffer on disk", "if", "err", ":=", "bw", ".", "Flush", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// Start by closing the temporary file.", "if", "err", ":=", "tmpFile", ".", "Close", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// Rename the tmp file to original file name", "if", "err", ":=", "os", ".", "Rename", "(", "tmpFile", ".", "Name", "(", ")", ",", "orgFileName", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// Avoid unnecessary attempt to cleanup", "tmpFile", "=", "nil", "\n\n", "fs", ".", "cliDeleteRecs", "=", "0", "\n", "fs", ".", "cliFileSize", "=", "fileSize", "\n", "fs", ".", "cliCompactTS", "=", "time", ".", "Now", "(", ")", "\n", "return", "nil", "\n", "}" ]
// Rewrite the content of the clients map into a temporary file, // then swap back to active file. // Store lock held on entry
[ "Rewrite", "the", "content", "of", "the", "clients", "map", "into", "a", "temporary", "file", "then", "swap", "back", "to", "active", "file", ".", "Store", "lock", "held", "on", "entry" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1729-L1773
train
nats-io/nats-streaming-server
stores/filestore.go
Close
func (fs *FileStore) Close() error { fs.Lock() if fs.closed { fs.Unlock() return nil } fs.closed = true err := fs.genericStore.close() fm := fs.fm lockFile := fs.lockFile fs.Unlock() if fm != nil { if fmerr := fm.close(); fmerr != nil && err == nil { err = fmerr } } if lockFile != nil { err = util.CloseFile(err, lockFile) } return err }
go
func (fs *FileStore) Close() error { fs.Lock() if fs.closed { fs.Unlock() return nil } fs.closed = true err := fs.genericStore.close() fm := fs.fm lockFile := fs.lockFile fs.Unlock() if fm != nil { if fmerr := fm.close(); fmerr != nil && err == nil { err = fmerr } } if lockFile != nil { err = util.CloseFile(err, lockFile) } return err }
[ "func", "(", "fs", "*", "FileStore", ")", "Close", "(", ")", "error", "{", "fs", ".", "Lock", "(", ")", "\n", "if", "fs", ".", "closed", "{", "fs", ".", "Unlock", "(", ")", "\n", "return", "nil", "\n", "}", "\n", "fs", ".", "closed", "=", "true", "\n\n", "err", ":=", "fs", ".", "genericStore", ".", "close", "(", ")", "\n\n", "fm", ":=", "fs", ".", "fm", "\n", "lockFile", ":=", "fs", ".", "lockFile", "\n", "fs", ".", "Unlock", "(", ")", "\n\n", "if", "fm", "!=", "nil", "{", "if", "fmerr", ":=", "fm", ".", "close", "(", ")", ";", "fmerr", "!=", "nil", "&&", "err", "==", "nil", "{", "err", "=", "fmerr", "\n", "}", "\n", "}", "\n", "if", "lockFile", "!=", "nil", "{", "err", "=", "util", ".", "CloseFile", "(", "err", ",", "lockFile", ")", "\n", "}", "\n", "return", "err", "\n", "}" ]
// Close closes all stores.
[ "Close", "closes", "all", "stores", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L1788-L1811
train
nats-io/nats-streaming-server
stores/filestore.go
beforeDataFileCloseCb
func (ms *FileMsgStore) beforeDataFileCloseCb(fslice *fileSlice) beforeFileClose { return func() error { if fslice != ms.writeSlice { return nil } if ms.bw != nil && ms.bw.buf != nil && ms.bw.buf.Buffered() > 0 { if err := ms.bw.buf.Flush(); err != nil { return err } } if ms.fstore.opts.DoSync { if err := fslice.file.handle.Sync(); err != nil { return err } } ms.writer = nil return nil } }
go
func (ms *FileMsgStore) beforeDataFileCloseCb(fslice *fileSlice) beforeFileClose { return func() error { if fslice != ms.writeSlice { return nil } if ms.bw != nil && ms.bw.buf != nil && ms.bw.buf.Buffered() > 0 { if err := ms.bw.buf.Flush(); err != nil { return err } } if ms.fstore.opts.DoSync { if err := fslice.file.handle.Sync(); err != nil { return err } } ms.writer = nil return nil } }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "beforeDataFileCloseCb", "(", "fslice", "*", "fileSlice", ")", "beforeFileClose", "{", "return", "func", "(", ")", "error", "{", "if", "fslice", "!=", "ms", ".", "writeSlice", "{", "return", "nil", "\n", "}", "\n", "if", "ms", ".", "bw", "!=", "nil", "&&", "ms", ".", "bw", ".", "buf", "!=", "nil", "&&", "ms", ".", "bw", ".", "buf", ".", "Buffered", "(", ")", ">", "0", "{", "if", "err", ":=", "ms", ".", "bw", ".", "buf", ".", "Flush", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "if", "ms", ".", "fstore", ".", "opts", ".", "DoSync", "{", "if", "err", ":=", "fslice", ".", "file", ".", "handle", ".", "Sync", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "ms", ".", "writer", "=", "nil", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// beforeDataFileCloseCb returns a beforeFileClose callback to be used // by FileMsgStore's files when a data file for that slice is being closed. // This is invoked asynchronously and should not acquire the store's lock. // That being said, we have the guarantee that this will be not be invoked // concurrently for a given file and that the store will not be using this file.
[ "beforeDataFileCloseCb", "returns", "a", "beforeFileClose", "callback", "to", "be", "used", "by", "FileMsgStore", "s", "files", "when", "a", "data", "file", "for", "that", "slice", "is", "being", "closed", ".", "This", "is", "invoked", "asynchronously", "and", "should", "not", "acquire", "the", "store", "s", "lock", ".", "That", "being", "said", "we", "have", "the", "guarantee", "that", "this", "will", "be", "not", "be", "invoked", "concurrently", "for", "a", "given", "file", "and", "that", "the", "store", "will", "not", "be", "using", "this", "file", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2069-L2087
train
nats-io/nats-streaming-server
stores/filestore.go
beforeIndexFileCloseCb
func (ms *FileMsgStore) beforeIndexFileCloseCb(fslice *fileSlice) beforeFileClose { return func() error { if fslice != ms.writeSlice { return nil } if len(ms.bufferedMsgs) > 0 { if err := ms.processBufferedMsgs(fslice); err != nil { return err } } if ms.fstore.opts.DoSync { if err := fslice.idxFile.handle.Sync(); err != nil { return err } } return nil } }
go
func (ms *FileMsgStore) beforeIndexFileCloseCb(fslice *fileSlice) beforeFileClose { return func() error { if fslice != ms.writeSlice { return nil } if len(ms.bufferedMsgs) > 0 { if err := ms.processBufferedMsgs(fslice); err != nil { return err } } if ms.fstore.opts.DoSync { if err := fslice.idxFile.handle.Sync(); err != nil { return err } } return nil } }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "beforeIndexFileCloseCb", "(", "fslice", "*", "fileSlice", ")", "beforeFileClose", "{", "return", "func", "(", ")", "error", "{", "if", "fslice", "!=", "ms", ".", "writeSlice", "{", "return", "nil", "\n", "}", "\n", "if", "len", "(", "ms", ".", "bufferedMsgs", ")", ">", "0", "{", "if", "err", ":=", "ms", ".", "processBufferedMsgs", "(", "fslice", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "if", "ms", ".", "fstore", ".", "opts", ".", "DoSync", "{", "if", "err", ":=", "fslice", ".", "idxFile", ".", "handle", ".", "Sync", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// beforeIndexFileCloseCb returns a beforeFileClose callback to be used // by FileMsgStore's files when an index file for that slice is being closed. // This is invoked asynchronously and should not acquire the store's lock. // That being said, we have the guarantee that this will be not be invoked // concurrently for a given file and that the store will not be using this file.
[ "beforeIndexFileCloseCb", "returns", "a", "beforeFileClose", "callback", "to", "be", "used", "by", "FileMsgStore", "s", "files", "when", "an", "index", "file", "for", "that", "slice", "is", "being", "closed", ".", "This", "is", "invoked", "asynchronously", "and", "should", "not", "acquire", "the", "store", "s", "lock", ".", "That", "being", "said", "we", "have", "the", "guarantee", "that", "this", "will", "be", "not", "be", "invoked", "concurrently", "for", "a", "given", "file", "and", "that", "the", "store", "will", "not", "be", "using", "this", "file", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2094-L2111
train
nats-io/nats-streaming-server
stores/filestore.go
setFile
func (ms *FileMsgStore) setFile(fslice *fileSlice, offset int64) error { var err error file := fslice.file.handle ms.writer = file if file != nil && ms.bw != nil { ms.writer = ms.bw.createNewWriter(file) } if offset == -1 { ms.wOffset, err = file.Seek(0, io.SeekEnd) } else { ms.wOffset = offset } return err }
go
func (ms *FileMsgStore) setFile(fslice *fileSlice, offset int64) error { var err error file := fslice.file.handle ms.writer = file if file != nil && ms.bw != nil { ms.writer = ms.bw.createNewWriter(file) } if offset == -1 { ms.wOffset, err = file.Seek(0, io.SeekEnd) } else { ms.wOffset = offset } return err }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "setFile", "(", "fslice", "*", "fileSlice", ",", "offset", "int64", ")", "error", "{", "var", "err", "error", "\n", "file", ":=", "fslice", ".", "file", ".", "handle", "\n", "ms", ".", "writer", "=", "file", "\n", "if", "file", "!=", "nil", "&&", "ms", ".", "bw", "!=", "nil", "{", "ms", ".", "writer", "=", "ms", ".", "bw", ".", "createNewWriter", "(", "file", ")", "\n", "}", "\n", "if", "offset", "==", "-", "1", "{", "ms", ".", "wOffset", ",", "err", "=", "file", ".", "Seek", "(", "0", ",", "io", ".", "SeekEnd", ")", "\n", "}", "else", "{", "ms", ".", "wOffset", "=", "offset", "\n", "}", "\n", "return", "err", "\n", "}" ]
// setFile sets the current data and index file. // The buffered writer is recreated.
[ "setFile", "sets", "the", "current", "data", "and", "index", "file", ".", "The", "buffered", "writer", "is", "recreated", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2115-L2128
train
nats-io/nats-streaming-server
stores/filestore.go
lockFiles
func (ms *FileMsgStore) lockFiles(fslice *fileSlice) error { return ms.doLockFiles(fslice, false) }
go
func (ms *FileMsgStore) lockFiles(fslice *fileSlice) error { return ms.doLockFiles(fslice, false) }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "lockFiles", "(", "fslice", "*", "fileSlice", ")", "error", "{", "return", "ms", ".", "doLockFiles", "(", "fslice", ",", "false", ")", "\n", "}" ]
// lockFiles locks the data and index files of the given file slice. // If files were closed they are opened in this call, and if so, // and if this slice is the write slice, the writer and offset are reset.
[ "lockFiles", "locks", "the", "data", "and", "index", "files", "of", "the", "given", "file", "slice", ".", "If", "files", "were", "closed", "they", "are", "opened", "in", "this", "call", "and", "if", "so", "and", "if", "this", "slice", "is", "the", "write", "slice", "the", "writer", "and", "offset", "are", "reset", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2174-L2176
train
nats-io/nats-streaming-server
stores/filestore.go
lockIndexFile
func (ms *FileMsgStore) lockIndexFile(fslice *fileSlice) error { return ms.doLockFiles(fslice, true) }
go
func (ms *FileMsgStore) lockIndexFile(fslice *fileSlice) error { return ms.doLockFiles(fslice, true) }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "lockIndexFile", "(", "fslice", "*", "fileSlice", ")", "error", "{", "return", "ms", ".", "doLockFiles", "(", "fslice", ",", "true", ")", "\n", "}" ]
// lockIndexFile locks the index file of the given file slice. // If the file was closed it is opened in this call.
[ "lockIndexFile", "locks", "the", "index", "file", "of", "the", "given", "file", "slice", ".", "If", "the", "file", "was", "closed", "it", "is", "opened", "in", "this", "call", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2180-L2182
train
nats-io/nats-streaming-server
stores/filestore.go
unlockIndexFile
func (ms *FileMsgStore) unlockIndexFile(fslice *fileSlice) { ms.fm.unlockFile(fslice.idxFile) }
go
func (ms *FileMsgStore) unlockIndexFile(fslice *fileSlice) { ms.fm.unlockFile(fslice.idxFile) }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "unlockIndexFile", "(", "fslice", "*", "fileSlice", ")", "{", "ms", ".", "fm", ".", "unlockFile", "(", "fslice", ".", "idxFile", ")", "\n", "}" ]
// unlockIndexFile unlocks the already locked index file of the given file slice.
[ "unlockIndexFile", "unlocks", "the", "already", "locked", "index", "file", "of", "the", "given", "file", "slice", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2185-L2187
train
nats-io/nats-streaming-server
stores/filestore.go
unlockFiles
func (ms *FileMsgStore) unlockFiles(fslice *fileSlice) { ms.fm.unlockFile(fslice.file) ms.fm.unlockFile(fslice.idxFile) }
go
func (ms *FileMsgStore) unlockFiles(fslice *fileSlice) { ms.fm.unlockFile(fslice.file) ms.fm.unlockFile(fslice.idxFile) }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "unlockFiles", "(", "fslice", "*", "fileSlice", ")", "{", "ms", ".", "fm", ".", "unlockFile", "(", "fslice", ".", "file", ")", "\n", "ms", ".", "fm", ".", "unlockFile", "(", "fslice", ".", "idxFile", ")", "\n", "}" ]
// unlockFiles unlocks both data and index files of the given file slice.
[ "unlockFiles", "unlocks", "both", "data", "and", "index", "files", "of", "the", "given", "file", "slice", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2190-L2193
train
nats-io/nats-streaming-server
stores/filestore.go
writeIndex
func (ms *FileMsgStore) writeIndex(w io.Writer, seq uint64, offset, timestamp int64, msgSize int) error { _buf := [msgIndexRecSize]byte{} buf := _buf[:] ms.addIndex(buf, seq, offset, timestamp, msgSize) _, err := w.Write(buf[:msgIndexRecSize]) return err }
go
func (ms *FileMsgStore) writeIndex(w io.Writer, seq uint64, offset, timestamp int64, msgSize int) error { _buf := [msgIndexRecSize]byte{} buf := _buf[:] ms.addIndex(buf, seq, offset, timestamp, msgSize) _, err := w.Write(buf[:msgIndexRecSize]) return err }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "writeIndex", "(", "w", "io", ".", "Writer", ",", "seq", "uint64", ",", "offset", ",", "timestamp", "int64", ",", "msgSize", "int", ")", "error", "{", "_buf", ":=", "[", "msgIndexRecSize", "]", "byte", "{", "}", "\n", "buf", ":=", "_buf", "[", ":", "]", "\n", "ms", ".", "addIndex", "(", "buf", ",", "seq", ",", "offset", ",", "timestamp", ",", "msgSize", ")", "\n", "_", ",", "err", ":=", "w", ".", "Write", "(", "buf", "[", ":", "msgIndexRecSize", "]", ")", "\n", "return", "err", "\n", "}" ]
// writeIndex writes a message index record to the writer `w`
[ "writeIndex", "writes", "a", "message", "index", "record", "to", "the", "writer", "w" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2480-L2486
train
nats-io/nats-streaming-server
stores/filestore.go
addIndex
func (ms *FileMsgStore) addIndex(buf []byte, seq uint64, offset, timestamp int64, msgSize int) { util.ByteOrder.PutUint64(buf, seq) util.ByteOrder.PutUint64(buf[8:], uint64(offset)) util.ByteOrder.PutUint64(buf[16:], uint64(timestamp)) util.ByteOrder.PutUint32(buf[24:], uint32(msgSize)) crc := crc32.Checksum(buf[:msgIndexRecSize-crcSize], ms.fstore.crcTable) util.ByteOrder.PutUint32(buf[msgIndexRecSize-crcSize:], crc) }
go
func (ms *FileMsgStore) addIndex(buf []byte, seq uint64, offset, timestamp int64, msgSize int) { util.ByteOrder.PutUint64(buf, seq) util.ByteOrder.PutUint64(buf[8:], uint64(offset)) util.ByteOrder.PutUint64(buf[16:], uint64(timestamp)) util.ByteOrder.PutUint32(buf[24:], uint32(msgSize)) crc := crc32.Checksum(buf[:msgIndexRecSize-crcSize], ms.fstore.crcTable) util.ByteOrder.PutUint32(buf[msgIndexRecSize-crcSize:], crc) }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "addIndex", "(", "buf", "[", "]", "byte", ",", "seq", "uint64", ",", "offset", ",", "timestamp", "int64", ",", "msgSize", "int", ")", "{", "util", ".", "ByteOrder", ".", "PutUint64", "(", "buf", ",", "seq", ")", "\n", "util", ".", "ByteOrder", ".", "PutUint64", "(", "buf", "[", "8", ":", "]", ",", "uint64", "(", "offset", ")", ")", "\n", "util", ".", "ByteOrder", ".", "PutUint64", "(", "buf", "[", "16", ":", "]", ",", "uint64", "(", "timestamp", ")", ")", "\n", "util", ".", "ByteOrder", ".", "PutUint32", "(", "buf", "[", "24", ":", "]", ",", "uint32", "(", "msgSize", ")", ")", "\n", "crc", ":=", "crc32", ".", "Checksum", "(", "buf", "[", ":", "msgIndexRecSize", "-", "crcSize", "]", ",", "ms", ".", "fstore", ".", "crcTable", ")", "\n", "util", ".", "ByteOrder", ".", "PutUint32", "(", "buf", "[", "msgIndexRecSize", "-", "crcSize", ":", "]", ",", "crc", ")", "\n", "}" ]
// addIndex adds a message index record in the given buffer
[ "addIndex", "adds", "a", "message", "index", "record", "in", "the", "given", "buffer" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2489-L2496
train
nats-io/nats-streaming-server
stores/filestore.go
readIndex
func (ms *FileMsgStore) readIndex(r io.Reader) (uint64, *msgIndex, error) { _buf := [msgIndexRecSize]byte{} buf := _buf[:] if _, err := io.ReadFull(r, buf); err != nil { return 0, nil, err } mindex := &msgIndex{} seq := util.ByteOrder.Uint64(buf) mindex.offset = int64(util.ByteOrder.Uint64(buf[8:])) mindex.timestamp = int64(util.ByteOrder.Uint64(buf[16:])) mindex.msgSize = util.ByteOrder.Uint32(buf[24:]) // If all zeros, return that caller should rewind (for recovery) if seq == 0 && mindex.offset == 0 && mindex.timestamp == 0 && mindex.msgSize == 0 { storedCRC := util.ByteOrder.Uint32(buf[msgIndexRecSize-crcSize:]) if storedCRC == 0 { return 0, nil, errNeedRewind } } if ms.fstore.opts.DoCRC { storedCRC := util.ByteOrder.Uint32(buf[msgIndexRecSize-crcSize:]) crc := crc32.Checksum(buf[:msgIndexRecSize-crcSize], ms.fstore.crcTable) if storedCRC != crc { return 0, nil, fmt.Errorf("corrupted data, expected crc to be 0x%08x, got 0x%08x", storedCRC, crc) } } return seq, mindex, nil }
go
func (ms *FileMsgStore) readIndex(r io.Reader) (uint64, *msgIndex, error) { _buf := [msgIndexRecSize]byte{} buf := _buf[:] if _, err := io.ReadFull(r, buf); err != nil { return 0, nil, err } mindex := &msgIndex{} seq := util.ByteOrder.Uint64(buf) mindex.offset = int64(util.ByteOrder.Uint64(buf[8:])) mindex.timestamp = int64(util.ByteOrder.Uint64(buf[16:])) mindex.msgSize = util.ByteOrder.Uint32(buf[24:]) // If all zeros, return that caller should rewind (for recovery) if seq == 0 && mindex.offset == 0 && mindex.timestamp == 0 && mindex.msgSize == 0 { storedCRC := util.ByteOrder.Uint32(buf[msgIndexRecSize-crcSize:]) if storedCRC == 0 { return 0, nil, errNeedRewind } } if ms.fstore.opts.DoCRC { storedCRC := util.ByteOrder.Uint32(buf[msgIndexRecSize-crcSize:]) crc := crc32.Checksum(buf[:msgIndexRecSize-crcSize], ms.fstore.crcTable) if storedCRC != crc { return 0, nil, fmt.Errorf("corrupted data, expected crc to be 0x%08x, got 0x%08x", storedCRC, crc) } } return seq, mindex, nil }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "readIndex", "(", "r", "io", ".", "Reader", ")", "(", "uint64", ",", "*", "msgIndex", ",", "error", ")", "{", "_buf", ":=", "[", "msgIndexRecSize", "]", "byte", "{", "}", "\n", "buf", ":=", "_buf", "[", ":", "]", "\n", "if", "_", ",", "err", ":=", "io", ".", "ReadFull", "(", "r", ",", "buf", ")", ";", "err", "!=", "nil", "{", "return", "0", ",", "nil", ",", "err", "\n", "}", "\n", "mindex", ":=", "&", "msgIndex", "{", "}", "\n", "seq", ":=", "util", ".", "ByteOrder", ".", "Uint64", "(", "buf", ")", "\n", "mindex", ".", "offset", "=", "int64", "(", "util", ".", "ByteOrder", ".", "Uint64", "(", "buf", "[", "8", ":", "]", ")", ")", "\n", "mindex", ".", "timestamp", "=", "int64", "(", "util", ".", "ByteOrder", ".", "Uint64", "(", "buf", "[", "16", ":", "]", ")", ")", "\n", "mindex", ".", "msgSize", "=", "util", ".", "ByteOrder", ".", "Uint32", "(", "buf", "[", "24", ":", "]", ")", "\n", "// If all zeros, return that caller should rewind (for recovery)", "if", "seq", "==", "0", "&&", "mindex", ".", "offset", "==", "0", "&&", "mindex", ".", "timestamp", "==", "0", "&&", "mindex", ".", "msgSize", "==", "0", "{", "storedCRC", ":=", "util", ".", "ByteOrder", ".", "Uint32", "(", "buf", "[", "msgIndexRecSize", "-", "crcSize", ":", "]", ")", "\n", "if", "storedCRC", "==", "0", "{", "return", "0", ",", "nil", ",", "errNeedRewind", "\n", "}", "\n", "}", "\n", "if", "ms", ".", "fstore", ".", "opts", ".", "DoCRC", "{", "storedCRC", ":=", "util", ".", "ByteOrder", ".", "Uint32", "(", "buf", "[", "msgIndexRecSize", "-", "crcSize", ":", "]", ")", "\n", "crc", ":=", "crc32", ".", "Checksum", "(", "buf", "[", ":", "msgIndexRecSize", "-", "crcSize", "]", ",", "ms", ".", "fstore", ".", "crcTable", ")", "\n", "if", "storedCRC", "!=", "crc", "{", "return", "0", ",", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "storedCRC", ",", "crc", ")", "\n", "}", "\n", "}", "\n", "return", "seq", ",", "mindex", ",", "nil", "\n", "}" ]
// readIndex reads a message index record from the given reader // and returns an allocated msgIndex object.
[ "readIndex", "reads", "a", "message", "index", "record", "from", "the", "given", "reader", "and", "returns", "an", "allocated", "msgIndex", "object", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2500-L2526
train
nats-io/nats-streaming-server
stores/filestore.go
processBufferedMsgs
func (ms *FileMsgStore) processBufferedMsgs(fslice *fileSlice) error { idxBufferSize := len(ms.bufferedMsgs) * msgIndexRecSize ms.tmpMsgBuf = util.EnsureBufBigEnough(ms.tmpMsgBuf, idxBufferSize) bufOffset := 0 for _, pseq := range ms.bufferedSeqs { bm := ms.bufferedMsgs[pseq] if bm != nil { mindex := bm.index // We add the index info for this flushed message ms.addIndex(ms.tmpMsgBuf[bufOffset:], pseq, mindex.offset, mindex.timestamp, int(mindex.msgSize)) bufOffset += msgIndexRecSize delete(ms.bufferedMsgs, pseq) } } if bufOffset > 0 { if _, err := fslice.idxFile.handle.Write(ms.tmpMsgBuf[:bufOffset]); err != nil { return err } } ms.bufferedSeqs = ms.bufferedSeqs[:0] return nil }
go
func (ms *FileMsgStore) processBufferedMsgs(fslice *fileSlice) error { idxBufferSize := len(ms.bufferedMsgs) * msgIndexRecSize ms.tmpMsgBuf = util.EnsureBufBigEnough(ms.tmpMsgBuf, idxBufferSize) bufOffset := 0 for _, pseq := range ms.bufferedSeqs { bm := ms.bufferedMsgs[pseq] if bm != nil { mindex := bm.index // We add the index info for this flushed message ms.addIndex(ms.tmpMsgBuf[bufOffset:], pseq, mindex.offset, mindex.timestamp, int(mindex.msgSize)) bufOffset += msgIndexRecSize delete(ms.bufferedMsgs, pseq) } } if bufOffset > 0 { if _, err := fslice.idxFile.handle.Write(ms.tmpMsgBuf[:bufOffset]); err != nil { return err } } ms.bufferedSeqs = ms.bufferedSeqs[:0] return nil }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "processBufferedMsgs", "(", "fslice", "*", "fileSlice", ")", "error", "{", "idxBufferSize", ":=", "len", "(", "ms", ".", "bufferedMsgs", ")", "*", "msgIndexRecSize", "\n", "ms", ".", "tmpMsgBuf", "=", "util", ".", "EnsureBufBigEnough", "(", "ms", ".", "tmpMsgBuf", ",", "idxBufferSize", ")", "\n", "bufOffset", ":=", "0", "\n", "for", "_", ",", "pseq", ":=", "range", "ms", ".", "bufferedSeqs", "{", "bm", ":=", "ms", ".", "bufferedMsgs", "[", "pseq", "]", "\n", "if", "bm", "!=", "nil", "{", "mindex", ":=", "bm", ".", "index", "\n", "// We add the index info for this flushed message", "ms", ".", "addIndex", "(", "ms", ".", "tmpMsgBuf", "[", "bufOffset", ":", "]", ",", "pseq", ",", "mindex", ".", "offset", ",", "mindex", ".", "timestamp", ",", "int", "(", "mindex", ".", "msgSize", ")", ")", "\n", "bufOffset", "+=", "msgIndexRecSize", "\n", "delete", "(", "ms", ".", "bufferedMsgs", ",", "pseq", ")", "\n", "}", "\n", "}", "\n", "if", "bufOffset", ">", "0", "{", "if", "_", ",", "err", ":=", "fslice", ".", "idxFile", ".", "handle", ".", "Write", "(", "ms", ".", "tmpMsgBuf", "[", ":", "bufOffset", "]", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "ms", ".", "bufferedSeqs", "=", "ms", ".", "bufferedSeqs", "[", ":", "0", "]", "\n", "return", "nil", "\n", "}" ]
// processBufferedMsgs adds message index records in the given buffer // for every pending buffered messages.
[ "processBufferedMsgs", "adds", "message", "index", "records", "in", "the", "given", "buffer", "for", "every", "pending", "buffered", "messages", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2767-L2789
train
nats-io/nats-streaming-server
stores/filestore.go
readMsgIndex
func (ms *FileMsgStore) readMsgIndex(slice *fileSlice, seq uint64) (*msgIndex, error) { // Compute the offset in the index file itself. idxFileOffset := 4 + (int64(seq-slice.firstSeq)+int64(slice.rmCount))*msgIndexRecSize // Then position the file pointer of the index file. if _, err := slice.idxFile.handle.Seek(idxFileOffset, io.SeekStart); err != nil { return nil, err } // Read the index record and ensure we have what we expect seqInIndexFile, msgIndex, err := ms.readIndex(slice.idxFile.handle) if err != nil { return nil, err } if seqInIndexFile != seq { return nil, fmt.Errorf("wrong sequence, wanted %v got %v", seq, seqInIndexFile) } return msgIndex, nil }
go
func (ms *FileMsgStore) readMsgIndex(slice *fileSlice, seq uint64) (*msgIndex, error) { // Compute the offset in the index file itself. idxFileOffset := 4 + (int64(seq-slice.firstSeq)+int64(slice.rmCount))*msgIndexRecSize // Then position the file pointer of the index file. if _, err := slice.idxFile.handle.Seek(idxFileOffset, io.SeekStart); err != nil { return nil, err } // Read the index record and ensure we have what we expect seqInIndexFile, msgIndex, err := ms.readIndex(slice.idxFile.handle) if err != nil { return nil, err } if seqInIndexFile != seq { return nil, fmt.Errorf("wrong sequence, wanted %v got %v", seq, seqInIndexFile) } return msgIndex, nil }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "readMsgIndex", "(", "slice", "*", "fileSlice", ",", "seq", "uint64", ")", "(", "*", "msgIndex", ",", "error", ")", "{", "// Compute the offset in the index file itself.", "idxFileOffset", ":=", "4", "+", "(", "int64", "(", "seq", "-", "slice", ".", "firstSeq", ")", "+", "int64", "(", "slice", ".", "rmCount", ")", ")", "*", "msgIndexRecSize", "\n", "// Then position the file pointer of the index file.", "if", "_", ",", "err", ":=", "slice", ".", "idxFile", ".", "handle", ".", "Seek", "(", "idxFileOffset", ",", "io", ".", "SeekStart", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "// Read the index record and ensure we have what we expect", "seqInIndexFile", ",", "msgIndex", ",", "err", ":=", "ms", ".", "readIndex", "(", "slice", ".", "idxFile", ".", "handle", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "if", "seqInIndexFile", "!=", "seq", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "seq", ",", "seqInIndexFile", ")", "\n", "}", "\n", "return", "msgIndex", ",", "nil", "\n", "}" ]
// readMsgIndex reads a message index record from disk and returns a msgIndex // object. Same than getMsgIndex but without checking for message in // ms.bufferedMsgs first.
[ "readMsgIndex", "reads", "a", "message", "index", "record", "from", "disk", "and", "returns", "a", "msgIndex", "object", ".", "Same", "than", "getMsgIndex", "but", "without", "checking", "for", "message", "in", "ms", ".", "bufferedMsgs", "first", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2912-L2928
train
nats-io/nats-streaming-server
stores/filestore.go
removeFirstMsg
func (ms *FileMsgStore) removeFirstMsg(mindex *msgIndex, lockFile bool) error { // Work with the first slice slice := ms.files[ms.firstFSlSeq] // Get the message index for the first valid message in this slice if mindex == nil { if lockFile || slice != ms.writeSlice { ms.lockIndexFile(slice) } var err error mindex, err = ms.getMsgIndex(slice, slice.firstSeq) if lockFile || slice != ms.writeSlice { ms.unlockIndexFile(slice) } if err != nil { return err } } // Size of the first message in this slice firstMsgSize := mindex.msgSize // For size, we count the size of serialized message + record header + // the corresponding index record size := uint64(firstMsgSize + msgRecordOverhead) // Keep track of number of "removed" messages in this slice slice.rmCount++ // Update total counts ms.totalCount-- ms.totalBytes -= size // Messages sequence is incremental with no gap on a given msgstore. ms.first++ // Invalidate ms.firstMsg, it will be looked-up on demand. ms.firstMsg = nil // Invalidate ms.lastMsg if it was the last message being removed. if ms.first > ms.last { ms.lastMsg = nil } // Is file slice is "empty" and not the last one if slice.msgsCount == slice.rmCount && len(ms.files) > 1 { ms.removeFirstSlice() } else { // This is the new first message in this slice. slice.firstSeq = ms.first } return nil }
go
func (ms *FileMsgStore) removeFirstMsg(mindex *msgIndex, lockFile bool) error { // Work with the first slice slice := ms.files[ms.firstFSlSeq] // Get the message index for the first valid message in this slice if mindex == nil { if lockFile || slice != ms.writeSlice { ms.lockIndexFile(slice) } var err error mindex, err = ms.getMsgIndex(slice, slice.firstSeq) if lockFile || slice != ms.writeSlice { ms.unlockIndexFile(slice) } if err != nil { return err } } // Size of the first message in this slice firstMsgSize := mindex.msgSize // For size, we count the size of serialized message + record header + // the corresponding index record size := uint64(firstMsgSize + msgRecordOverhead) // Keep track of number of "removed" messages in this slice slice.rmCount++ // Update total counts ms.totalCount-- ms.totalBytes -= size // Messages sequence is incremental with no gap on a given msgstore. ms.first++ // Invalidate ms.firstMsg, it will be looked-up on demand. ms.firstMsg = nil // Invalidate ms.lastMsg if it was the last message being removed. if ms.first > ms.last { ms.lastMsg = nil } // Is file slice is "empty" and not the last one if slice.msgsCount == slice.rmCount && len(ms.files) > 1 { ms.removeFirstSlice() } else { // This is the new first message in this slice. slice.firstSeq = ms.first } return nil }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "removeFirstMsg", "(", "mindex", "*", "msgIndex", ",", "lockFile", "bool", ")", "error", "{", "// Work with the first slice", "slice", ":=", "ms", ".", "files", "[", "ms", ".", "firstFSlSeq", "]", "\n", "// Get the message index for the first valid message in this slice", "if", "mindex", "==", "nil", "{", "if", "lockFile", "||", "slice", "!=", "ms", ".", "writeSlice", "{", "ms", ".", "lockIndexFile", "(", "slice", ")", "\n", "}", "\n", "var", "err", "error", "\n", "mindex", ",", "err", "=", "ms", ".", "getMsgIndex", "(", "slice", ",", "slice", ".", "firstSeq", ")", "\n", "if", "lockFile", "||", "slice", "!=", "ms", ".", "writeSlice", "{", "ms", ".", "unlockIndexFile", "(", "slice", ")", "\n", "}", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "// Size of the first message in this slice", "firstMsgSize", ":=", "mindex", ".", "msgSize", "\n", "// For size, we count the size of serialized message + record header +", "// the corresponding index record", "size", ":=", "uint64", "(", "firstMsgSize", "+", "msgRecordOverhead", ")", "\n", "// Keep track of number of \"removed\" messages in this slice", "slice", ".", "rmCount", "++", "\n", "// Update total counts", "ms", ".", "totalCount", "--", "\n", "ms", ".", "totalBytes", "-=", "size", "\n", "// Messages sequence is incremental with no gap on a given msgstore.", "ms", ".", "first", "++", "\n", "// Invalidate ms.firstMsg, it will be looked-up on demand.", "ms", ".", "firstMsg", "=", "nil", "\n", "// Invalidate ms.lastMsg if it was the last message being removed.", "if", "ms", ".", "first", ">", "ms", ".", "last", "{", "ms", ".", "lastMsg", "=", "nil", "\n", "}", "\n", "// Is file slice is \"empty\" and not the last one", "if", "slice", ".", "msgsCount", "==", "slice", ".", "rmCount", "&&", "len", "(", "ms", ".", "files", ")", ">", "1", "{", "ms", ".", "removeFirstSlice", "(", ")", "\n", "}", "else", "{", "// This is the new first message in this slice.", "slice", ".", "firstSeq", "=", "ms", ".", "first", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// removeFirstMsg "removes" the first message of the first slice. // If the slice is "empty" the file slice is removed.
[ "removeFirstMsg", "removes", "the", "first", "message", "of", "the", "first", "slice", ".", "If", "the", "slice", "is", "empty", "the", "file", "slice", "is", "removed", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2932-L2975
train
nats-io/nats-streaming-server
stores/filestore.go
removeFirstSlice
func (ms *FileMsgStore) removeFirstSlice() { sl := ms.files[ms.firstFSlSeq] // We may or may not have the first slice locked, so need to close // the file knowing that files can be in either state. ms.fm.closeLockedOrOpenedFile(sl.file) ms.fm.remove(sl.file) // Close index file too. ms.fm.closeLockedOrOpenedFile(sl.idxFile) ms.fm.remove(sl.idxFile) // Assume we will remove the files remove := true // If there is an archive script invoke it first script := ms.fstore.opts.SliceArchiveScript if script != "" { datBak := sl.file.name + bakSuffix idxBak := sl.idxFile.name + bakSuffix var err error if err = os.Rename(sl.file.name, datBak); err == nil { if err = os.Rename(sl.idxFile.name, idxBak); err != nil { // Remove first backup file os.Remove(datBak) } } if err == nil { // Files have been successfully renamed, so don't attempt // to remove the original files. remove = false // We run the script in a go routine to not block the server. ms.allDone.Add(1) go func(subj, dat, idx string) { defer ms.allDone.Done() cmd := exec.Command(script, subj, dat, idx) output, err := cmd.CombinedOutput() if err != nil { ms.log.Noticef("Error invoking archive script %q: %v (output=%v)", script, err, string(output)) } else { ms.log.Noticef("Output of archive script for %s (%s and %s): %v", subj, dat, idx, string(output)) } }(ms.subject, datBak, idxBak) } } // Remove files if remove { os.Remove(sl.file.name) os.Remove(sl.idxFile.name) } // Remove slice from map delete(ms.files, ms.firstFSlSeq) // Normally, file slices have an incremental sequence number with // no gap. However, we want to support the fact that an user could // copy back some old file slice to be recovered, and so there // may be a gap. So find out what is the new first file sequence. for ms.firstFSlSeq < ms.lastFSlSeq { ms.firstFSlSeq++ if _, ok := ms.files[ms.firstFSlSeq]; ok { break } } // This should not happen! if ms.firstFSlSeq > ms.lastFSlSeq { panic("Removed last slice!") } }
go
func (ms *FileMsgStore) removeFirstSlice() { sl := ms.files[ms.firstFSlSeq] // We may or may not have the first slice locked, so need to close // the file knowing that files can be in either state. ms.fm.closeLockedOrOpenedFile(sl.file) ms.fm.remove(sl.file) // Close index file too. ms.fm.closeLockedOrOpenedFile(sl.idxFile) ms.fm.remove(sl.idxFile) // Assume we will remove the files remove := true // If there is an archive script invoke it first script := ms.fstore.opts.SliceArchiveScript if script != "" { datBak := sl.file.name + bakSuffix idxBak := sl.idxFile.name + bakSuffix var err error if err = os.Rename(sl.file.name, datBak); err == nil { if err = os.Rename(sl.idxFile.name, idxBak); err != nil { // Remove first backup file os.Remove(datBak) } } if err == nil { // Files have been successfully renamed, so don't attempt // to remove the original files. remove = false // We run the script in a go routine to not block the server. ms.allDone.Add(1) go func(subj, dat, idx string) { defer ms.allDone.Done() cmd := exec.Command(script, subj, dat, idx) output, err := cmd.CombinedOutput() if err != nil { ms.log.Noticef("Error invoking archive script %q: %v (output=%v)", script, err, string(output)) } else { ms.log.Noticef("Output of archive script for %s (%s and %s): %v", subj, dat, idx, string(output)) } }(ms.subject, datBak, idxBak) } } // Remove files if remove { os.Remove(sl.file.name) os.Remove(sl.idxFile.name) } // Remove slice from map delete(ms.files, ms.firstFSlSeq) // Normally, file slices have an incremental sequence number with // no gap. However, we want to support the fact that an user could // copy back some old file slice to be recovered, and so there // may be a gap. So find out what is the new first file sequence. for ms.firstFSlSeq < ms.lastFSlSeq { ms.firstFSlSeq++ if _, ok := ms.files[ms.firstFSlSeq]; ok { break } } // This should not happen! if ms.firstFSlSeq > ms.lastFSlSeq { panic("Removed last slice!") } }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "removeFirstSlice", "(", ")", "{", "sl", ":=", "ms", ".", "files", "[", "ms", ".", "firstFSlSeq", "]", "\n", "// We may or may not have the first slice locked, so need to close", "// the file knowing that files can be in either state.", "ms", ".", "fm", ".", "closeLockedOrOpenedFile", "(", "sl", ".", "file", ")", "\n", "ms", ".", "fm", ".", "remove", "(", "sl", ".", "file", ")", "\n", "// Close index file too.", "ms", ".", "fm", ".", "closeLockedOrOpenedFile", "(", "sl", ".", "idxFile", ")", "\n", "ms", ".", "fm", ".", "remove", "(", "sl", ".", "idxFile", ")", "\n", "// Assume we will remove the files", "remove", ":=", "true", "\n", "// If there is an archive script invoke it first", "script", ":=", "ms", ".", "fstore", ".", "opts", ".", "SliceArchiveScript", "\n", "if", "script", "!=", "\"", "\"", "{", "datBak", ":=", "sl", ".", "file", ".", "name", "+", "bakSuffix", "\n", "idxBak", ":=", "sl", ".", "idxFile", ".", "name", "+", "bakSuffix", "\n\n", "var", "err", "error", "\n", "if", "err", "=", "os", ".", "Rename", "(", "sl", ".", "file", ".", "name", ",", "datBak", ")", ";", "err", "==", "nil", "{", "if", "err", "=", "os", ".", "Rename", "(", "sl", ".", "idxFile", ".", "name", ",", "idxBak", ")", ";", "err", "!=", "nil", "{", "// Remove first backup file", "os", ".", "Remove", "(", "datBak", ")", "\n", "}", "\n", "}", "\n", "if", "err", "==", "nil", "{", "// Files have been successfully renamed, so don't attempt", "// to remove the original files.", "remove", "=", "false", "\n\n", "// We run the script in a go routine to not block the server.", "ms", ".", "allDone", ".", "Add", "(", "1", ")", "\n", "go", "func", "(", "subj", ",", "dat", ",", "idx", "string", ")", "{", "defer", "ms", ".", "allDone", ".", "Done", "(", ")", "\n", "cmd", ":=", "exec", ".", "Command", "(", "script", ",", "subj", ",", "dat", ",", "idx", ")", "\n", "output", ",", "err", ":=", "cmd", ".", "CombinedOutput", "(", ")", "\n", "if", "err", "!=", "nil", "{", "ms", ".", "log", ".", "Noticef", "(", "\"", "\"", ",", "script", ",", "err", ",", "string", "(", "output", ")", ")", "\n", "}", "else", "{", "ms", ".", "log", ".", "Noticef", "(", "\"", "\"", ",", "subj", ",", "dat", ",", "idx", ",", "string", "(", "output", ")", ")", "\n", "}", "\n", "}", "(", "ms", ".", "subject", ",", "datBak", ",", "idxBak", ")", "\n", "}", "\n", "}", "\n", "// Remove files", "if", "remove", "{", "os", ".", "Remove", "(", "sl", ".", "file", ".", "name", ")", "\n", "os", ".", "Remove", "(", "sl", ".", "idxFile", ".", "name", ")", "\n", "}", "\n", "// Remove slice from map", "delete", "(", "ms", ".", "files", ",", "ms", ".", "firstFSlSeq", ")", "\n", "// Normally, file slices have an incremental sequence number with", "// no gap. However, we want to support the fact that an user could", "// copy back some old file slice to be recovered, and so there", "// may be a gap. So find out what is the new first file sequence.", "for", "ms", ".", "firstFSlSeq", "<", "ms", ".", "lastFSlSeq", "{", "ms", ".", "firstFSlSeq", "++", "\n", "if", "_", ",", "ok", ":=", "ms", ".", "files", "[", "ms", ".", "firstFSlSeq", "]", ";", "ok", "{", "break", "\n", "}", "\n", "}", "\n", "// This should not happen!", "if", "ms", ".", "firstFSlSeq", ">", "ms", ".", "lastFSlSeq", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "}" ]
// removeFirstSlice removes the first file slice. // Should not be called if first slice is also last!
[ "removeFirstSlice", "removes", "the", "first", "file", "slice", ".", "Should", "not", "be", "called", "if", "first", "slice", "is", "also", "last!" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L2979-L3043
train
nats-io/nats-streaming-server
stores/filestore.go
getFileSliceForSeq
func (ms *FileMsgStore) getFileSliceForSeq(seq uint64) *fileSlice { if len(ms.files) == 0 { return nil } // Start with write slice slice := ms.writeSlice if (slice.firstSeq <= seq) && (seq <= slice.lastSeq) { return slice } // We want to support possible gaps in file slice sequence, so // no dichotomy, but simple iteration of the map, which in Go is // random. for _, slice := range ms.files { if (slice.firstSeq <= seq) && (seq <= slice.lastSeq) { return slice } } return nil }
go
func (ms *FileMsgStore) getFileSliceForSeq(seq uint64) *fileSlice { if len(ms.files) == 0 { return nil } // Start with write slice slice := ms.writeSlice if (slice.firstSeq <= seq) && (seq <= slice.lastSeq) { return slice } // We want to support possible gaps in file slice sequence, so // no dichotomy, but simple iteration of the map, which in Go is // random. for _, slice := range ms.files { if (slice.firstSeq <= seq) && (seq <= slice.lastSeq) { return slice } } return nil }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "getFileSliceForSeq", "(", "seq", "uint64", ")", "*", "fileSlice", "{", "if", "len", "(", "ms", ".", "files", ")", "==", "0", "{", "return", "nil", "\n", "}", "\n", "// Start with write slice", "slice", ":=", "ms", ".", "writeSlice", "\n", "if", "(", "slice", ".", "firstSeq", "<=", "seq", ")", "&&", "(", "seq", "<=", "slice", ".", "lastSeq", ")", "{", "return", "slice", "\n", "}", "\n", "// We want to support possible gaps in file slice sequence, so", "// no dichotomy, but simple iteration of the map, which in Go is", "// random.", "for", "_", ",", "slice", ":=", "range", "ms", ".", "files", "{", "if", "(", "slice", ".", "firstSeq", "<=", "seq", ")", "&&", "(", "seq", "<=", "slice", ".", "lastSeq", ")", "{", "return", "slice", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// getFileSliceForSeq returns the file slice where the message of the // given sequence is stored, or nil if the message is not found in any // of the file slices.
[ "getFileSliceForSeq", "returns", "the", "file", "slice", "where", "the", "message", "of", "the", "given", "sequence", "is", "stored", "or", "nil", "if", "the", "message", "is", "not", "found", "in", "any", "of", "the", "file", "slices", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3048-L3066
train
nats-io/nats-streaming-server
stores/filestore.go
backgroundTasks
func (ms *FileMsgStore) backgroundTasks() { defer ms.allDone.Done() ms.RLock() hasBuffer := ms.bw != nil maxAge := int64(ms.limits.MaxAge) nextExpiration := ms.expiration lastCacheCheck := ms.timeTick lastBufShrink := ms.timeTick ms.RUnlock() for { // Update time timeTick := time.Now().UnixNano() atomic.StoreInt64(&ms.timeTick, timeTick) // Close unused file slices if atomic.LoadInt64(&ms.checkSlices) == 1 { ms.Lock() opened := 0 for _, slice := range ms.files { // If no FD limit and this is the write slice, skip. if !ms.hasFDsLimit && slice == ms.writeSlice { continue } opened++ if slice.lastUsed > 0 && time.Duration(timeTick-slice.lastUsed) >= sliceCloseInterval { slice.lastUsed = 0 ms.fm.closeFileIfOpened(slice.file) ms.fm.closeFileIfOpened(slice.idxFile) opened-- } } if opened == 0 { // We can update this without atomic since we are under store lock // and this go routine is the only place where we check the value. ms.checkSlices = 0 } ms.Unlock() } // Shrink the buffer if applicable if hasBuffer && time.Duration(timeTick-lastBufShrink) >= bufShrinkInterval { ms.Lock() if ms.writeSlice != nil { file := ms.writeSlice.file if ms.fm.lockFileIfOpened(file) { ms.writer, _ = ms.bw.tryShrinkBuffer(file.handle) ms.fm.unlockFile(file) } } ms.Unlock() lastBufShrink = timeTick } // Check for expiration if maxAge > 0 && nextExpiration > 0 && timeTick >= nextExpiration { ms.Lock() // Expire messages nextExpiration = ms.expireMsgs(timeTick, maxAge) ms.Unlock() } // Check for message caching if timeTick >= lastCacheCheck+cacheTTL { tryEvict := atomic.LoadInt32(&ms.cache.tryEvict) if tryEvict == 1 { ms.Lock() // Possibly remove some/all cached messages ms.cache.evict(timeTick) ms.Unlock() } lastCacheCheck = timeTick } select { case <-ms.bkgTasksDone: return case <-ms.bkgTasksWake: // wake up from a possible sleep to run the loop ms.RLock() nextExpiration = ms.expiration ms.RUnlock() case <-time.After(bkgTasksSleepDuration): // go back to top of for loop. } } }
go
func (ms *FileMsgStore) backgroundTasks() { defer ms.allDone.Done() ms.RLock() hasBuffer := ms.bw != nil maxAge := int64(ms.limits.MaxAge) nextExpiration := ms.expiration lastCacheCheck := ms.timeTick lastBufShrink := ms.timeTick ms.RUnlock() for { // Update time timeTick := time.Now().UnixNano() atomic.StoreInt64(&ms.timeTick, timeTick) // Close unused file slices if atomic.LoadInt64(&ms.checkSlices) == 1 { ms.Lock() opened := 0 for _, slice := range ms.files { // If no FD limit and this is the write slice, skip. if !ms.hasFDsLimit && slice == ms.writeSlice { continue } opened++ if slice.lastUsed > 0 && time.Duration(timeTick-slice.lastUsed) >= sliceCloseInterval { slice.lastUsed = 0 ms.fm.closeFileIfOpened(slice.file) ms.fm.closeFileIfOpened(slice.idxFile) opened-- } } if opened == 0 { // We can update this without atomic since we are under store lock // and this go routine is the only place where we check the value. ms.checkSlices = 0 } ms.Unlock() } // Shrink the buffer if applicable if hasBuffer && time.Duration(timeTick-lastBufShrink) >= bufShrinkInterval { ms.Lock() if ms.writeSlice != nil { file := ms.writeSlice.file if ms.fm.lockFileIfOpened(file) { ms.writer, _ = ms.bw.tryShrinkBuffer(file.handle) ms.fm.unlockFile(file) } } ms.Unlock() lastBufShrink = timeTick } // Check for expiration if maxAge > 0 && nextExpiration > 0 && timeTick >= nextExpiration { ms.Lock() // Expire messages nextExpiration = ms.expireMsgs(timeTick, maxAge) ms.Unlock() } // Check for message caching if timeTick >= lastCacheCheck+cacheTTL { tryEvict := atomic.LoadInt32(&ms.cache.tryEvict) if tryEvict == 1 { ms.Lock() // Possibly remove some/all cached messages ms.cache.evict(timeTick) ms.Unlock() } lastCacheCheck = timeTick } select { case <-ms.bkgTasksDone: return case <-ms.bkgTasksWake: // wake up from a possible sleep to run the loop ms.RLock() nextExpiration = ms.expiration ms.RUnlock() case <-time.After(bkgTasksSleepDuration): // go back to top of for loop. } } }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "backgroundTasks", "(", ")", "{", "defer", "ms", ".", "allDone", ".", "Done", "(", ")", "\n\n", "ms", ".", "RLock", "(", ")", "\n", "hasBuffer", ":=", "ms", ".", "bw", "!=", "nil", "\n", "maxAge", ":=", "int64", "(", "ms", ".", "limits", ".", "MaxAge", ")", "\n", "nextExpiration", ":=", "ms", ".", "expiration", "\n", "lastCacheCheck", ":=", "ms", ".", "timeTick", "\n", "lastBufShrink", ":=", "ms", ".", "timeTick", "\n", "ms", ".", "RUnlock", "(", ")", "\n\n", "for", "{", "// Update time", "timeTick", ":=", "time", ".", "Now", "(", ")", ".", "UnixNano", "(", ")", "\n", "atomic", ".", "StoreInt64", "(", "&", "ms", ".", "timeTick", ",", "timeTick", ")", "\n\n", "// Close unused file slices", "if", "atomic", ".", "LoadInt64", "(", "&", "ms", ".", "checkSlices", ")", "==", "1", "{", "ms", ".", "Lock", "(", ")", "\n", "opened", ":=", "0", "\n", "for", "_", ",", "slice", ":=", "range", "ms", ".", "files", "{", "// If no FD limit and this is the write slice, skip.", "if", "!", "ms", ".", "hasFDsLimit", "&&", "slice", "==", "ms", ".", "writeSlice", "{", "continue", "\n", "}", "\n", "opened", "++", "\n", "if", "slice", ".", "lastUsed", ">", "0", "&&", "time", ".", "Duration", "(", "timeTick", "-", "slice", ".", "lastUsed", ")", ">=", "sliceCloseInterval", "{", "slice", ".", "lastUsed", "=", "0", "\n", "ms", ".", "fm", ".", "closeFileIfOpened", "(", "slice", ".", "file", ")", "\n", "ms", ".", "fm", ".", "closeFileIfOpened", "(", "slice", ".", "idxFile", ")", "\n", "opened", "--", "\n", "}", "\n", "}", "\n", "if", "opened", "==", "0", "{", "// We can update this without atomic since we are under store lock", "// and this go routine is the only place where we check the value.", "ms", ".", "checkSlices", "=", "0", "\n", "}", "\n", "ms", ".", "Unlock", "(", ")", "\n", "}", "\n\n", "// Shrink the buffer if applicable", "if", "hasBuffer", "&&", "time", ".", "Duration", "(", "timeTick", "-", "lastBufShrink", ")", ">=", "bufShrinkInterval", "{", "ms", ".", "Lock", "(", ")", "\n", "if", "ms", ".", "writeSlice", "!=", "nil", "{", "file", ":=", "ms", ".", "writeSlice", ".", "file", "\n", "if", "ms", ".", "fm", ".", "lockFileIfOpened", "(", "file", ")", "{", "ms", ".", "writer", ",", "_", "=", "ms", ".", "bw", ".", "tryShrinkBuffer", "(", "file", ".", "handle", ")", "\n", "ms", ".", "fm", ".", "unlockFile", "(", "file", ")", "\n", "}", "\n", "}", "\n", "ms", ".", "Unlock", "(", ")", "\n", "lastBufShrink", "=", "timeTick", "\n", "}", "\n\n", "// Check for expiration", "if", "maxAge", ">", "0", "&&", "nextExpiration", ">", "0", "&&", "timeTick", ">=", "nextExpiration", "{", "ms", ".", "Lock", "(", ")", "\n", "// Expire messages", "nextExpiration", "=", "ms", ".", "expireMsgs", "(", "timeTick", ",", "maxAge", ")", "\n", "ms", ".", "Unlock", "(", ")", "\n", "}", "\n\n", "// Check for message caching", "if", "timeTick", ">=", "lastCacheCheck", "+", "cacheTTL", "{", "tryEvict", ":=", "atomic", ".", "LoadInt32", "(", "&", "ms", ".", "cache", ".", "tryEvict", ")", "\n", "if", "tryEvict", "==", "1", "{", "ms", ".", "Lock", "(", ")", "\n", "// Possibly remove some/all cached messages", "ms", ".", "cache", ".", "evict", "(", "timeTick", ")", "\n", "ms", ".", "Unlock", "(", ")", "\n", "}", "\n", "lastCacheCheck", "=", "timeTick", "\n", "}", "\n\n", "select", "{", "case", "<-", "ms", ".", "bkgTasksDone", ":", "return", "\n", "case", "<-", "ms", ".", "bkgTasksWake", ":", "// wake up from a possible sleep to run the loop", "ms", ".", "RLock", "(", ")", "\n", "nextExpiration", "=", "ms", ".", "expiration", "\n", "ms", ".", "RUnlock", "(", ")", "\n", "case", "<-", "time", ".", "After", "(", "bkgTasksSleepDuration", ")", ":", "// go back to top of for loop.", "}", "\n", "}", "\n", "}" ]
// backgroundTasks performs some background tasks related to this // messages store.
[ "backgroundTasks", "performs", "some", "background", "tasks", "related", "to", "this", "messages", "store", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3070-L3157
train
nats-io/nats-streaming-server
stores/filestore.go
lookup
func (ms *FileMsgStore) lookup(seq uint64) (*pb.MsgProto, error) { // Reject message for sequence outside valid range if seq < ms.first || seq > ms.last { return nil, nil } // Check first if it's in the cache. msg := ms.cache.get(seq) if msg == nil && ms.bufferedMsgs != nil { // Possibly in bufferedMsgs bm := ms.bufferedMsgs[seq] if bm != nil { msg = bm.msg ms.cache.add(seq, msg, false) } } // If not, we need to read it from disk... if msg == nil { fslice := ms.getFileSliceForSeq(seq) if fslice == nil { return nil, nil } err := ms.lockFiles(fslice) if err != nil { return nil, err } msgIndex, err := ms.readMsgIndex(fslice, seq) if msgIndex != nil { file := fslice.file.handle // Position file to message's offset. 0 means from start. _, err = file.Seek(msgIndex.offset, io.SeekStart) if err == nil { ms.tmpMsgBuf, _, _, err = readRecord(file, ms.tmpMsgBuf, false, ms.fstore.crcTable, ms.fstore.opts.DoCRC) } } ms.unlockFiles(fslice) if err != nil || msgIndex == nil { return nil, err } // Recover this message msg = &pb.MsgProto{} err = msg.Unmarshal(ms.tmpMsgBuf[:msgIndex.msgSize]) if err != nil { return nil, err } ms.cache.add(seq, msg, false) } return msg, nil }
go
func (ms *FileMsgStore) lookup(seq uint64) (*pb.MsgProto, error) { // Reject message for sequence outside valid range if seq < ms.first || seq > ms.last { return nil, nil } // Check first if it's in the cache. msg := ms.cache.get(seq) if msg == nil && ms.bufferedMsgs != nil { // Possibly in bufferedMsgs bm := ms.bufferedMsgs[seq] if bm != nil { msg = bm.msg ms.cache.add(seq, msg, false) } } // If not, we need to read it from disk... if msg == nil { fslice := ms.getFileSliceForSeq(seq) if fslice == nil { return nil, nil } err := ms.lockFiles(fslice) if err != nil { return nil, err } msgIndex, err := ms.readMsgIndex(fslice, seq) if msgIndex != nil { file := fslice.file.handle // Position file to message's offset. 0 means from start. _, err = file.Seek(msgIndex.offset, io.SeekStart) if err == nil { ms.tmpMsgBuf, _, _, err = readRecord(file, ms.tmpMsgBuf, false, ms.fstore.crcTable, ms.fstore.opts.DoCRC) } } ms.unlockFiles(fslice) if err != nil || msgIndex == nil { return nil, err } // Recover this message msg = &pb.MsgProto{} err = msg.Unmarshal(ms.tmpMsgBuf[:msgIndex.msgSize]) if err != nil { return nil, err } ms.cache.add(seq, msg, false) } return msg, nil }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "lookup", "(", "seq", "uint64", ")", "(", "*", "pb", ".", "MsgProto", ",", "error", ")", "{", "// Reject message for sequence outside valid range", "if", "seq", "<", "ms", ".", "first", "||", "seq", ">", "ms", ".", "last", "{", "return", "nil", ",", "nil", "\n", "}", "\n", "// Check first if it's in the cache.", "msg", ":=", "ms", ".", "cache", ".", "get", "(", "seq", ")", "\n", "if", "msg", "==", "nil", "&&", "ms", ".", "bufferedMsgs", "!=", "nil", "{", "// Possibly in bufferedMsgs", "bm", ":=", "ms", ".", "bufferedMsgs", "[", "seq", "]", "\n", "if", "bm", "!=", "nil", "{", "msg", "=", "bm", ".", "msg", "\n", "ms", ".", "cache", ".", "add", "(", "seq", ",", "msg", ",", "false", ")", "\n", "}", "\n", "}", "\n", "// If not, we need to read it from disk...", "if", "msg", "==", "nil", "{", "fslice", ":=", "ms", ".", "getFileSliceForSeq", "(", "seq", ")", "\n", "if", "fslice", "==", "nil", "{", "return", "nil", ",", "nil", "\n", "}", "\n", "err", ":=", "ms", ".", "lockFiles", "(", "fslice", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "msgIndex", ",", "err", ":=", "ms", ".", "readMsgIndex", "(", "fslice", ",", "seq", ")", "\n", "if", "msgIndex", "!=", "nil", "{", "file", ":=", "fslice", ".", "file", ".", "handle", "\n", "// Position file to message's offset. 0 means from start.", "_", ",", "err", "=", "file", ".", "Seek", "(", "msgIndex", ".", "offset", ",", "io", ".", "SeekStart", ")", "\n", "if", "err", "==", "nil", "{", "ms", ".", "tmpMsgBuf", ",", "_", ",", "_", ",", "err", "=", "readRecord", "(", "file", ",", "ms", ".", "tmpMsgBuf", ",", "false", ",", "ms", ".", "fstore", ".", "crcTable", ",", "ms", ".", "fstore", ".", "opts", ".", "DoCRC", ")", "\n", "}", "\n", "}", "\n", "ms", ".", "unlockFiles", "(", "fslice", ")", "\n", "if", "err", "!=", "nil", "||", "msgIndex", "==", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "// Recover this message", "msg", "=", "&", "pb", ".", "MsgProto", "{", "}", "\n", "err", "=", "msg", ".", "Unmarshal", "(", "ms", ".", "tmpMsgBuf", "[", ":", "msgIndex", ".", "msgSize", "]", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "ms", ".", "cache", ".", "add", "(", "seq", ",", "msg", ",", "false", ")", "\n", "}", "\n", "return", "msg", ",", "nil", "\n", "}" ]
// lookup returns the message for the given sequence number, possibly // reading the message from disk. // Store write lock is assumed to be held on entry
[ "lookup", "returns", "the", "message", "for", "the", "given", "sequence", "number", "possibly", "reading", "the", "message", "from", "disk", ".", "Store", "write", "lock", "is", "assumed", "to", "be", "held", "on", "entry" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3162-L3209
train
nats-io/nats-streaming-server
stores/filestore.go
initCache
func (ms *FileMsgStore) initCache() { ms.cache = &msgsCache{ seqMaps: make(map[uint64]*cachedMsg), } }
go
func (ms *FileMsgStore) initCache() { ms.cache = &msgsCache{ seqMaps: make(map[uint64]*cachedMsg), } }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "initCache", "(", ")", "{", "ms", ".", "cache", "=", "&", "msgsCache", "{", "seqMaps", ":", "make", "(", "map", "[", "uint64", "]", "*", "cachedMsg", ")", ",", "}", "\n", "}" ]
// initCache initializes the message cache
[ "initCache", "initializes", "the", "message", "cache" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3326-L3330
train
nats-io/nats-streaming-server
stores/filestore.go
add
func (c *msgsCache) add(seq uint64, msg *pb.MsgProto, isNew bool) { exp := cacheTTL if isNew { exp += msg.Timestamp } else { exp += time.Now().UnixNano() } cMsg := &cachedMsg{ expiration: exp, msg: msg, } if c.tail == nil { c.head = cMsg } else { c.tail.next = cMsg // Ensure last expiration is at least >= previous one. if cMsg.expiration < c.tail.expiration { cMsg.expiration = c.tail.expiration } } cMsg.prev = c.tail c.tail = cMsg c.seqMaps[seq] = cMsg if len(c.seqMaps) == 1 { atomic.StoreInt32(&c.tryEvict, 1) } }
go
func (c *msgsCache) add(seq uint64, msg *pb.MsgProto, isNew bool) { exp := cacheTTL if isNew { exp += msg.Timestamp } else { exp += time.Now().UnixNano() } cMsg := &cachedMsg{ expiration: exp, msg: msg, } if c.tail == nil { c.head = cMsg } else { c.tail.next = cMsg // Ensure last expiration is at least >= previous one. if cMsg.expiration < c.tail.expiration { cMsg.expiration = c.tail.expiration } } cMsg.prev = c.tail c.tail = cMsg c.seqMaps[seq] = cMsg if len(c.seqMaps) == 1 { atomic.StoreInt32(&c.tryEvict, 1) } }
[ "func", "(", "c", "*", "msgsCache", ")", "add", "(", "seq", "uint64", ",", "msg", "*", "pb", ".", "MsgProto", ",", "isNew", "bool", ")", "{", "exp", ":=", "cacheTTL", "\n", "if", "isNew", "{", "exp", "+=", "msg", ".", "Timestamp", "\n", "}", "else", "{", "exp", "+=", "time", ".", "Now", "(", ")", ".", "UnixNano", "(", ")", "\n", "}", "\n", "cMsg", ":=", "&", "cachedMsg", "{", "expiration", ":", "exp", ",", "msg", ":", "msg", ",", "}", "\n", "if", "c", ".", "tail", "==", "nil", "{", "c", ".", "head", "=", "cMsg", "\n", "}", "else", "{", "c", ".", "tail", ".", "next", "=", "cMsg", "\n", "// Ensure last expiration is at least >= previous one.", "if", "cMsg", ".", "expiration", "<", "c", ".", "tail", ".", "expiration", "{", "cMsg", ".", "expiration", "=", "c", ".", "tail", ".", "expiration", "\n", "}", "\n", "}", "\n", "cMsg", ".", "prev", "=", "c", ".", "tail", "\n", "c", ".", "tail", "=", "cMsg", "\n", "c", ".", "seqMaps", "[", "seq", "]", "=", "cMsg", "\n", "if", "len", "(", "c", ".", "seqMaps", ")", "==", "1", "{", "atomic", ".", "StoreInt32", "(", "&", "c", ".", "tryEvict", ",", "1", ")", "\n", "}", "\n", "}" ]
// add adds a message to the cache. // Store write lock is assumed held on entry
[ "add", "adds", "a", "message", "to", "the", "cache", ".", "Store", "write", "lock", "is", "assumed", "held", "on", "entry" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3334-L3360
train
nats-io/nats-streaming-server
stores/filestore.go
get
func (c *msgsCache) get(seq uint64) *pb.MsgProto { cMsg := c.seqMaps[seq] if cMsg == nil { return nil } // Bump the expiration cMsg.expiration = time.Now().UnixNano() + cacheTTL // If not already at the tail of the list, move it there if cMsg != c.tail { if cMsg.prev != nil { cMsg.prev.next = cMsg.next } if cMsg.next != nil { cMsg.next.prev = cMsg.prev } if cMsg == c.head { c.head = cMsg.next } cMsg.prev = c.tail c.tail.next = cMsg cMsg.next = nil // Ensure last expiration is at least >= previous one. if cMsg.expiration < c.tail.expiration { cMsg.expiration = c.tail.expiration } c.tail = cMsg } return cMsg.msg }
go
func (c *msgsCache) get(seq uint64) *pb.MsgProto { cMsg := c.seqMaps[seq] if cMsg == nil { return nil } // Bump the expiration cMsg.expiration = time.Now().UnixNano() + cacheTTL // If not already at the tail of the list, move it there if cMsg != c.tail { if cMsg.prev != nil { cMsg.prev.next = cMsg.next } if cMsg.next != nil { cMsg.next.prev = cMsg.prev } if cMsg == c.head { c.head = cMsg.next } cMsg.prev = c.tail c.tail.next = cMsg cMsg.next = nil // Ensure last expiration is at least >= previous one. if cMsg.expiration < c.tail.expiration { cMsg.expiration = c.tail.expiration } c.tail = cMsg } return cMsg.msg }
[ "func", "(", "c", "*", "msgsCache", ")", "get", "(", "seq", "uint64", ")", "*", "pb", ".", "MsgProto", "{", "cMsg", ":=", "c", ".", "seqMaps", "[", "seq", "]", "\n", "if", "cMsg", "==", "nil", "{", "return", "nil", "\n", "}", "\n", "// Bump the expiration", "cMsg", ".", "expiration", "=", "time", ".", "Now", "(", ")", ".", "UnixNano", "(", ")", "+", "cacheTTL", "\n", "// If not already at the tail of the list, move it there", "if", "cMsg", "!=", "c", ".", "tail", "{", "if", "cMsg", ".", "prev", "!=", "nil", "{", "cMsg", ".", "prev", ".", "next", "=", "cMsg", ".", "next", "\n", "}", "\n", "if", "cMsg", ".", "next", "!=", "nil", "{", "cMsg", ".", "next", ".", "prev", "=", "cMsg", ".", "prev", "\n", "}", "\n", "if", "cMsg", "==", "c", ".", "head", "{", "c", ".", "head", "=", "cMsg", ".", "next", "\n", "}", "\n", "cMsg", ".", "prev", "=", "c", ".", "tail", "\n", "c", ".", "tail", ".", "next", "=", "cMsg", "\n", "cMsg", ".", "next", "=", "nil", "\n", "// Ensure last expiration is at least >= previous one.", "if", "cMsg", ".", "expiration", "<", "c", ".", "tail", ".", "expiration", "{", "cMsg", ".", "expiration", "=", "c", ".", "tail", ".", "expiration", "\n", "}", "\n", "c", ".", "tail", "=", "cMsg", "\n", "}", "\n", "return", "cMsg", ".", "msg", "\n", "}" ]
// get returns a message if available in the cache. // Store write lock is assumed held on entry
[ "get", "returns", "a", "message", "if", "available", "in", "the", "cache", ".", "Store", "write", "lock", "is", "assumed", "held", "on", "entry" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3364-L3392
train
nats-io/nats-streaming-server
stores/filestore.go
evict
func (c *msgsCache) evict(now int64) { if c.head == nil { return } if now >= c.tail.expiration { // Bulk remove c.seqMaps = make(map[uint64]*cachedMsg) c.head, c.tail, c.tryEvict = nil, nil, 0 return } cMsg := c.head for cMsg != nil && cMsg.expiration <= now { delete(c.seqMaps, cMsg.msg.Sequence) cMsg = cMsg.next } if cMsg != c.head { // There should be at least one left, otherwise, they // would all have been bulk removed at top of this function. cMsg.prev = nil c.head = cMsg } }
go
func (c *msgsCache) evict(now int64) { if c.head == nil { return } if now >= c.tail.expiration { // Bulk remove c.seqMaps = make(map[uint64]*cachedMsg) c.head, c.tail, c.tryEvict = nil, nil, 0 return } cMsg := c.head for cMsg != nil && cMsg.expiration <= now { delete(c.seqMaps, cMsg.msg.Sequence) cMsg = cMsg.next } if cMsg != c.head { // There should be at least one left, otherwise, they // would all have been bulk removed at top of this function. cMsg.prev = nil c.head = cMsg } }
[ "func", "(", "c", "*", "msgsCache", ")", "evict", "(", "now", "int64", ")", "{", "if", "c", ".", "head", "==", "nil", "{", "return", "\n", "}", "\n", "if", "now", ">=", "c", ".", "tail", ".", "expiration", "{", "// Bulk remove", "c", ".", "seqMaps", "=", "make", "(", "map", "[", "uint64", "]", "*", "cachedMsg", ")", "\n", "c", ".", "head", ",", "c", ".", "tail", ",", "c", ".", "tryEvict", "=", "nil", ",", "nil", ",", "0", "\n", "return", "\n", "}", "\n", "cMsg", ":=", "c", ".", "head", "\n", "for", "cMsg", "!=", "nil", "&&", "cMsg", ".", "expiration", "<=", "now", "{", "delete", "(", "c", ".", "seqMaps", ",", "cMsg", ".", "msg", ".", "Sequence", ")", "\n", "cMsg", "=", "cMsg", ".", "next", "\n", "}", "\n", "if", "cMsg", "!=", "c", ".", "head", "{", "// There should be at least one left, otherwise, they", "// would all have been bulk removed at top of this function.", "cMsg", ".", "prev", "=", "nil", "\n", "c", ".", "head", "=", "cMsg", "\n", "}", "\n", "}" ]
// evict move down the cache maps, evicting the last one. // Store write lock is assumed held on entry
[ "evict", "move", "down", "the", "cache", "maps", "evicting", "the", "last", "one", ".", "Store", "write", "lock", "is", "assumed", "held", "on", "entry" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3396-L3417
train
nats-io/nats-streaming-server
stores/filestore.go
empty
func (c *msgsCache) empty() { atomic.StoreInt32(&c.tryEvict, 0) c.head, c.tail = nil, nil c.seqMaps = make(map[uint64]*cachedMsg) }
go
func (c *msgsCache) empty() { atomic.StoreInt32(&c.tryEvict, 0) c.head, c.tail = nil, nil c.seqMaps = make(map[uint64]*cachedMsg) }
[ "func", "(", "c", "*", "msgsCache", ")", "empty", "(", ")", "{", "atomic", ".", "StoreInt32", "(", "&", "c", ".", "tryEvict", ",", "0", ")", "\n", "c", ".", "head", ",", "c", ".", "tail", "=", "nil", ",", "nil", "\n", "c", ".", "seqMaps", "=", "make", "(", "map", "[", "uint64", "]", "*", "cachedMsg", ")", "\n", "}" ]
// empty empties the cache
[ "empty", "empties", "the", "cache" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3420-L3424
train
nats-io/nats-streaming-server
stores/filestore.go
Flush
func (ms *FileMsgStore) Flush() error { ms.Lock() var err error if ms.writeSlice != nil { err = ms.lockFiles(ms.writeSlice) if err == nil { err = ms.flush(ms.writeSlice) ms.unlockFiles(ms.writeSlice) } } ms.Unlock() return err }
go
func (ms *FileMsgStore) Flush() error { ms.Lock() var err error if ms.writeSlice != nil { err = ms.lockFiles(ms.writeSlice) if err == nil { err = ms.flush(ms.writeSlice) ms.unlockFiles(ms.writeSlice) } } ms.Unlock() return err }
[ "func", "(", "ms", "*", "FileMsgStore", ")", "Flush", "(", ")", "error", "{", "ms", ".", "Lock", "(", ")", "\n", "var", "err", "error", "\n", "if", "ms", ".", "writeSlice", "!=", "nil", "{", "err", "=", "ms", ".", "lockFiles", "(", "ms", ".", "writeSlice", ")", "\n", "if", "err", "==", "nil", "{", "err", "=", "ms", ".", "flush", "(", "ms", ".", "writeSlice", ")", "\n", "ms", ".", "unlockFiles", "(", "ms", ".", "writeSlice", ")", "\n", "}", "\n", "}", "\n", "ms", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// Flush flushes outstanding data into the store.
[ "Flush", "flushes", "outstanding", "data", "into", "the", "store", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3495-L3507
train
nats-io/nats-streaming-server
stores/filestore.go
CreateSub
func (ss *FileSubStore) CreateSub(sub *spb.SubState) error { // Check if we can create the subscription (check limits and update // subscription count) ss.Lock() defer ss.Unlock() if err := ss.createSub(sub); err != nil { return err } if err := ss.writeRecord(nil, subRecNew, sub); err != nil { delete(ss.subs, sub.ID) return err } // We need to get a copy of the passed sub, we can't hold a reference // to it. csub := *sub s := &subscription{sub: &csub, seqnos: make(map[uint64]struct{})} ss.subs[sub.ID] = s return nil }
go
func (ss *FileSubStore) CreateSub(sub *spb.SubState) error { // Check if we can create the subscription (check limits and update // subscription count) ss.Lock() defer ss.Unlock() if err := ss.createSub(sub); err != nil { return err } if err := ss.writeRecord(nil, subRecNew, sub); err != nil { delete(ss.subs, sub.ID) return err } // We need to get a copy of the passed sub, we can't hold a reference // to it. csub := *sub s := &subscription{sub: &csub, seqnos: make(map[uint64]struct{})} ss.subs[sub.ID] = s return nil }
[ "func", "(", "ss", "*", "FileSubStore", ")", "CreateSub", "(", "sub", "*", "spb", ".", "SubState", ")", "error", "{", "// Check if we can create the subscription (check limits and update", "// subscription count)", "ss", ".", "Lock", "(", ")", "\n", "defer", "ss", ".", "Unlock", "(", ")", "\n", "if", "err", ":=", "ss", ".", "createSub", "(", "sub", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "ss", ".", "writeRecord", "(", "nil", ",", "subRecNew", ",", "sub", ")", ";", "err", "!=", "nil", "{", "delete", "(", "ss", ".", "subs", ",", "sub", ".", "ID", ")", "\n", "return", "err", "\n", "}", "\n", "// We need to get a copy of the passed sub, we can't hold a reference", "// to it.", "csub", ":=", "*", "sub", "\n", "s", ":=", "&", "subscription", "{", "sub", ":", "&", "csub", ",", "seqnos", ":", "make", "(", "map", "[", "uint64", "]", "struct", "{", "}", ")", "}", "\n", "ss", ".", "subs", "[", "sub", ".", "ID", "]", "=", "s", "\n", "return", "nil", "\n", "}" ]
// CreateSub records a new subscription represented by SubState. On success, // it returns an id that is used by the other methods.
[ "CreateSub", "records", "a", "new", "subscription", "represented", "by", "SubState", ".", "On", "success", "it", "returns", "an", "id", "that", "is", "used", "by", "the", "other", "methods", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3787-L3805
train
nats-io/nats-streaming-server
stores/filestore.go
UpdateSub
func (ss *FileSubStore) UpdateSub(sub *spb.SubState) error { ss.Lock() defer ss.Unlock() if err := ss.writeRecord(nil, subRecUpdate, sub); err != nil { return err } // We need to get a copy of the passed sub, we can't hold a reference // to it. csub := *sub si := ss.subs[sub.ID] if si != nil { s := si.(*subscription) s.sub = &csub } else { s := &subscription{sub: &csub, seqnos: make(map[uint64]struct{})} ss.subs[sub.ID] = s } return nil }
go
func (ss *FileSubStore) UpdateSub(sub *spb.SubState) error { ss.Lock() defer ss.Unlock() if err := ss.writeRecord(nil, subRecUpdate, sub); err != nil { return err } // We need to get a copy of the passed sub, we can't hold a reference // to it. csub := *sub si := ss.subs[sub.ID] if si != nil { s := si.(*subscription) s.sub = &csub } else { s := &subscription{sub: &csub, seqnos: make(map[uint64]struct{})} ss.subs[sub.ID] = s } return nil }
[ "func", "(", "ss", "*", "FileSubStore", ")", "UpdateSub", "(", "sub", "*", "spb", ".", "SubState", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "defer", "ss", ".", "Unlock", "(", ")", "\n", "if", "err", ":=", "ss", ".", "writeRecord", "(", "nil", ",", "subRecUpdate", ",", "sub", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// We need to get a copy of the passed sub, we can't hold a reference", "// to it.", "csub", ":=", "*", "sub", "\n", "si", ":=", "ss", ".", "subs", "[", "sub", ".", "ID", "]", "\n", "if", "si", "!=", "nil", "{", "s", ":=", "si", ".", "(", "*", "subscription", ")", "\n", "s", ".", "sub", "=", "&", "csub", "\n", "}", "else", "{", "s", ":=", "&", "subscription", "{", "sub", ":", "&", "csub", ",", "seqnos", ":", "make", "(", "map", "[", "uint64", "]", "struct", "{", "}", ")", "}", "\n", "ss", ".", "subs", "[", "sub", ".", "ID", "]", "=", "s", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// UpdateSub updates a given subscription represented by SubState.
[ "UpdateSub", "updates", "a", "given", "subscription", "represented", "by", "SubState", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3808-L3826
train
nats-io/nats-streaming-server
stores/filestore.go
shouldCompact
func (ss *FileSubStore) shouldCompact() bool { // Gobal switch if !ss.opts.CompactEnabled { return false } // Check that if minimum file size is set, the client file // is at least at the minimum. if ss.opts.CompactMinFileSize > 0 && ss.fileSize < ss.opts.CompactMinFileSize { return false } // Check fragmentation frag := 0 if ss.numRecs == 0 { frag = 100 } else { frag = ss.delRecs * 100 / ss.numRecs } if frag < ss.opts.CompactFragmentation { return false } // Check that we don't compact too often if time.Since(ss.compactTS) < ss.compactItvl { return false } return true }
go
func (ss *FileSubStore) shouldCompact() bool { // Gobal switch if !ss.opts.CompactEnabled { return false } // Check that if minimum file size is set, the client file // is at least at the minimum. if ss.opts.CompactMinFileSize > 0 && ss.fileSize < ss.opts.CompactMinFileSize { return false } // Check fragmentation frag := 0 if ss.numRecs == 0 { frag = 100 } else { frag = ss.delRecs * 100 / ss.numRecs } if frag < ss.opts.CompactFragmentation { return false } // Check that we don't compact too often if time.Since(ss.compactTS) < ss.compactItvl { return false } return true }
[ "func", "(", "ss", "*", "FileSubStore", ")", "shouldCompact", "(", ")", "bool", "{", "// Gobal switch", "if", "!", "ss", ".", "opts", ".", "CompactEnabled", "{", "return", "false", "\n", "}", "\n", "// Check that if minimum file size is set, the client file", "// is at least at the minimum.", "if", "ss", ".", "opts", ".", "CompactMinFileSize", ">", "0", "&&", "ss", ".", "fileSize", "<", "ss", ".", "opts", ".", "CompactMinFileSize", "{", "return", "false", "\n", "}", "\n", "// Check fragmentation", "frag", ":=", "0", "\n", "if", "ss", ".", "numRecs", "==", "0", "{", "frag", "=", "100", "\n", "}", "else", "{", "frag", "=", "ss", ".", "delRecs", "*", "100", "/", "ss", ".", "numRecs", "\n", "}", "\n", "if", "frag", "<", "ss", ".", "opts", ".", "CompactFragmentation", "{", "return", "false", "\n", "}", "\n", "// Check that we don't compact too often", "if", "time", ".", "Since", "(", "ss", ".", "compactTS", ")", "<", "ss", ".", "compactItvl", "{", "return", "false", "\n", "}", "\n", "return", "true", "\n", "}" ]
// shouldCompact returns a boolean indicating if we should compact // Lock is held by caller
[ "shouldCompact", "returns", "a", "boolean", "indicating", "if", "we", "should", "compact", "Lock", "is", "held", "by", "caller" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3853-L3878
train
nats-io/nats-streaming-server
stores/filestore.go
AddSeqPending
func (ss *FileSubStore) AddSeqPending(subid, seqno uint64) error { ss.Lock() ss.updateSub.ID, ss.updateSub.Seqno = subid, seqno if err := ss.writeRecord(nil, subRecMsg, &ss.updateSub); err != nil { ss.Unlock() return err } si := ss.subs[subid] if si != nil { s := si.(*subscription) if seqno > s.sub.LastSent { s.sub.LastSent = seqno } s.seqnos[seqno] = struct{}{} } ss.Unlock() return nil }
go
func (ss *FileSubStore) AddSeqPending(subid, seqno uint64) error { ss.Lock() ss.updateSub.ID, ss.updateSub.Seqno = subid, seqno if err := ss.writeRecord(nil, subRecMsg, &ss.updateSub); err != nil { ss.Unlock() return err } si := ss.subs[subid] if si != nil { s := si.(*subscription) if seqno > s.sub.LastSent { s.sub.LastSent = seqno } s.seqnos[seqno] = struct{}{} } ss.Unlock() return nil }
[ "func", "(", "ss", "*", "FileSubStore", ")", "AddSeqPending", "(", "subid", ",", "seqno", "uint64", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "ss", ".", "updateSub", ".", "ID", ",", "ss", ".", "updateSub", ".", "Seqno", "=", "subid", ",", "seqno", "\n", "if", "err", ":=", "ss", ".", "writeRecord", "(", "nil", ",", "subRecMsg", ",", "&", "ss", ".", "updateSub", ")", ";", "err", "!=", "nil", "{", "ss", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}", "\n", "si", ":=", "ss", ".", "subs", "[", "subid", "]", "\n", "if", "si", "!=", "nil", "{", "s", ":=", "si", ".", "(", "*", "subscription", ")", "\n", "if", "seqno", ">", "s", ".", "sub", ".", "LastSent", "{", "s", ".", "sub", ".", "LastSent", "=", "seqno", "\n", "}", "\n", "s", ".", "seqnos", "[", "seqno", "]", "=", "struct", "{", "}", "{", "}", "\n", "}", "\n", "ss", ".", "Unlock", "(", ")", "\n", "return", "nil", "\n", "}" ]
// AddSeqPending adds the given message seqno to the given subscription.
[ "AddSeqPending", "adds", "the", "given", "message", "seqno", "to", "the", "given", "subscription", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3881-L3898
train
nats-io/nats-streaming-server
stores/filestore.go
AckSeqPending
func (ss *FileSubStore) AckSeqPending(subid, seqno uint64) error { ss.Lock() ss.updateSub.ID, ss.updateSub.Seqno = subid, seqno if err := ss.writeRecord(nil, subRecAck, &ss.updateSub); err != nil { ss.Unlock() return err } si := ss.subs[subid] if si != nil { s := si.(*subscription) delete(s.seqnos, seqno) // Test if we should compact if ss.shouldCompact() { ss.fm.closeFileIfOpened(ss.file) ss.compact(ss.file.name) } } ss.Unlock() return nil }
go
func (ss *FileSubStore) AckSeqPending(subid, seqno uint64) error { ss.Lock() ss.updateSub.ID, ss.updateSub.Seqno = subid, seqno if err := ss.writeRecord(nil, subRecAck, &ss.updateSub); err != nil { ss.Unlock() return err } si := ss.subs[subid] if si != nil { s := si.(*subscription) delete(s.seqnos, seqno) // Test if we should compact if ss.shouldCompact() { ss.fm.closeFileIfOpened(ss.file) ss.compact(ss.file.name) } } ss.Unlock() return nil }
[ "func", "(", "ss", "*", "FileSubStore", ")", "AckSeqPending", "(", "subid", ",", "seqno", "uint64", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "ss", ".", "updateSub", ".", "ID", ",", "ss", ".", "updateSub", ".", "Seqno", "=", "subid", ",", "seqno", "\n", "if", "err", ":=", "ss", ".", "writeRecord", "(", "nil", ",", "subRecAck", ",", "&", "ss", ".", "updateSub", ")", ";", "err", "!=", "nil", "{", "ss", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}", "\n", "si", ":=", "ss", ".", "subs", "[", "subid", "]", "\n", "if", "si", "!=", "nil", "{", "s", ":=", "si", ".", "(", "*", "subscription", ")", "\n", "delete", "(", "s", ".", "seqnos", ",", "seqno", ")", "\n", "// Test if we should compact", "if", "ss", ".", "shouldCompact", "(", ")", "{", "ss", ".", "fm", ".", "closeFileIfOpened", "(", "ss", ".", "file", ")", "\n", "ss", ".", "compact", "(", "ss", ".", "file", ".", "name", ")", "\n", "}", "\n", "}", "\n", "ss", ".", "Unlock", "(", ")", "\n", "return", "nil", "\n", "}" ]
// AckSeqPending records that the given message seqno has been acknowledged // by the given subscription.
[ "AckSeqPending", "records", "that", "the", "given", "message", "seqno", "has", "been", "acknowledged", "by", "the", "given", "subscription", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3902-L3921
train
nats-io/nats-streaming-server
stores/filestore.go
compact
func (ss *FileSubStore) compact(orgFileName string) error { tmpFile, err := getTempFile(ss.fm.rootDir, "subs") if err != nil { return err } tmpBW := bufio.NewWriterSize(tmpFile, defaultBufSize) // Save values in case of failed compaction savedNumRecs := ss.numRecs savedDelRecs := ss.delRecs savedFileSize := ss.fileSize // Cleanup in case of error during compact defer func() { if tmpFile != nil { tmpFile.Close() os.Remove(tmpFile.Name()) // Since we failed compaction, restore values ss.numRecs = savedNumRecs ss.delRecs = savedDelRecs ss.fileSize = savedFileSize } }() // Reset to 0 since writeRecord() is updating the values. ss.numRecs = 0 ss.delRecs = 0 ss.fileSize = 0 for _, subi := range ss.subs { sub := subi.(*subscription) err = ss.writeRecord(tmpBW, subRecNew, sub.sub) if err != nil { return err } ss.updateSub.ID = sub.sub.ID for seqno := range sub.seqnos { ss.updateSub.Seqno = seqno err = ss.writeRecord(tmpBW, subRecMsg, &ss.updateSub) if err != nil { return err } } } // Flush and sync the temporary file err = tmpBW.Flush() if err != nil { return err } err = tmpFile.Sync() if err != nil { return err } // Start by closing the temporary file. if err := tmpFile.Close(); err != nil { return err } // Rename the tmp file to original file name if err := os.Rename(tmpFile.Name(), orgFileName); err != nil { return err } // Prevent cleanup on success tmpFile = nil // Update the timestamp of this last successful compact ss.compactTS = time.Now() return nil }
go
func (ss *FileSubStore) compact(orgFileName string) error { tmpFile, err := getTempFile(ss.fm.rootDir, "subs") if err != nil { return err } tmpBW := bufio.NewWriterSize(tmpFile, defaultBufSize) // Save values in case of failed compaction savedNumRecs := ss.numRecs savedDelRecs := ss.delRecs savedFileSize := ss.fileSize // Cleanup in case of error during compact defer func() { if tmpFile != nil { tmpFile.Close() os.Remove(tmpFile.Name()) // Since we failed compaction, restore values ss.numRecs = savedNumRecs ss.delRecs = savedDelRecs ss.fileSize = savedFileSize } }() // Reset to 0 since writeRecord() is updating the values. ss.numRecs = 0 ss.delRecs = 0 ss.fileSize = 0 for _, subi := range ss.subs { sub := subi.(*subscription) err = ss.writeRecord(tmpBW, subRecNew, sub.sub) if err != nil { return err } ss.updateSub.ID = sub.sub.ID for seqno := range sub.seqnos { ss.updateSub.Seqno = seqno err = ss.writeRecord(tmpBW, subRecMsg, &ss.updateSub) if err != nil { return err } } } // Flush and sync the temporary file err = tmpBW.Flush() if err != nil { return err } err = tmpFile.Sync() if err != nil { return err } // Start by closing the temporary file. if err := tmpFile.Close(); err != nil { return err } // Rename the tmp file to original file name if err := os.Rename(tmpFile.Name(), orgFileName); err != nil { return err } // Prevent cleanup on success tmpFile = nil // Update the timestamp of this last successful compact ss.compactTS = time.Now() return nil }
[ "func", "(", "ss", "*", "FileSubStore", ")", "compact", "(", "orgFileName", "string", ")", "error", "{", "tmpFile", ",", "err", ":=", "getTempFile", "(", "ss", ".", "fm", ".", "rootDir", ",", "\"", "\"", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "tmpBW", ":=", "bufio", ".", "NewWriterSize", "(", "tmpFile", ",", "defaultBufSize", ")", "\n", "// Save values in case of failed compaction", "savedNumRecs", ":=", "ss", ".", "numRecs", "\n", "savedDelRecs", ":=", "ss", ".", "delRecs", "\n", "savedFileSize", ":=", "ss", ".", "fileSize", "\n", "// Cleanup in case of error during compact", "defer", "func", "(", ")", "{", "if", "tmpFile", "!=", "nil", "{", "tmpFile", ".", "Close", "(", ")", "\n", "os", ".", "Remove", "(", "tmpFile", ".", "Name", "(", ")", ")", "\n", "// Since we failed compaction, restore values", "ss", ".", "numRecs", "=", "savedNumRecs", "\n", "ss", ".", "delRecs", "=", "savedDelRecs", "\n", "ss", ".", "fileSize", "=", "savedFileSize", "\n", "}", "\n", "}", "(", ")", "\n", "// Reset to 0 since writeRecord() is updating the values.", "ss", ".", "numRecs", "=", "0", "\n", "ss", ".", "delRecs", "=", "0", "\n", "ss", ".", "fileSize", "=", "0", "\n", "for", "_", ",", "subi", ":=", "range", "ss", ".", "subs", "{", "sub", ":=", "subi", ".", "(", "*", "subscription", ")", "\n", "err", "=", "ss", ".", "writeRecord", "(", "tmpBW", ",", "subRecNew", ",", "sub", ".", "sub", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "ss", ".", "updateSub", ".", "ID", "=", "sub", ".", "sub", ".", "ID", "\n", "for", "seqno", ":=", "range", "sub", ".", "seqnos", "{", "ss", ".", "updateSub", ".", "Seqno", "=", "seqno", "\n", "err", "=", "ss", ".", "writeRecord", "(", "tmpBW", ",", "subRecMsg", ",", "&", "ss", ".", "updateSub", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "}", "\n", "// Flush and sync the temporary file", "err", "=", "tmpBW", ".", "Flush", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "err", "=", "tmpFile", ".", "Sync", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// Start by closing the temporary file.", "if", "err", ":=", "tmpFile", ".", "Close", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// Rename the tmp file to original file name", "if", "err", ":=", "os", ".", "Rename", "(", "tmpFile", ".", "Name", "(", ")", ",", "orgFileName", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// Prevent cleanup on success", "tmpFile", "=", "nil", "\n", "// Update the timestamp of this last successful compact", "ss", ".", "compactTS", "=", "time", ".", "Now", "(", ")", "\n", "return", "nil", "\n", "}" ]
// compact rewrites all subscriptions on a temporary file, reducing the size // since we get rid of deleted subscriptions and message sequences that have // been acknowledged. On success, the subscriptions file is replaced by this // temporary file. // Lock is held by caller
[ "compact", "rewrites", "all", "subscriptions", "on", "a", "temporary", "file", "reducing", "the", "size", "since", "we", "get", "rid", "of", "deleted", "subscriptions", "and", "message", "sequences", "that", "have", "been", "acknowledged", ".", "On", "success", "the", "subscriptions", "file", "is", "replaced", "by", "this", "temporary", "file", ".", "Lock", "is", "held", "by", "caller" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3928-L3990
train
nats-io/nats-streaming-server
stores/filestore.go
writeRecord
func (ss *FileSubStore) writeRecord(w io.Writer, recType recordType, rec record) error { var err error totalSize := 0 recSize := rec.Size() var bwBuf *bufio.Writer needsUnlock := false if w == nil { if err := ss.lockFile(); err != nil { return err } needsUnlock = true if ss.bw != nil { bwBuf = ss.bw.buf // If we are using the buffer writer on this call, and the buffer is // not already at the max size... if bwBuf != nil && ss.bw.bufSize != ss.opts.BufferSize { // Check if record fits required := recSize + recordHeaderSize if required > bwBuf.Available() { ss.writer, err = ss.bw.expand(ss.file.handle, required) if err != nil { ss.fm.unlockFile(ss.file) return err } bwBuf = ss.bw.buf } } } w = ss.writer } ss.tmpSubBuf, totalSize, err = writeRecord(w, ss.tmpSubBuf, recType, rec, recSize, ss.crcTable) if err != nil { if needsUnlock { ss.fm.unlockFile(ss.file) } return err } if bwBuf != nil && ss.bw.shrinkReq { ss.bw.checkShrinkRequest() } // Indicate that we wrote something to the buffer/file ss.activity = true switch recType { case subRecNew: ss.numRecs++ case subRecMsg: ss.numRecs++ case subRecAck: // An ack makes the message record free space ss.delRecs++ case subRecUpdate: ss.numRecs++ // An update makes the old record free space ss.delRecs++ case subRecDel: ss.delRecs++ default: panic(fmt.Errorf("record type %v unknown", recType)) } ss.fileSize += int64(totalSize) if needsUnlock { ss.fm.unlockFile(ss.file) } return nil }
go
func (ss *FileSubStore) writeRecord(w io.Writer, recType recordType, rec record) error { var err error totalSize := 0 recSize := rec.Size() var bwBuf *bufio.Writer needsUnlock := false if w == nil { if err := ss.lockFile(); err != nil { return err } needsUnlock = true if ss.bw != nil { bwBuf = ss.bw.buf // If we are using the buffer writer on this call, and the buffer is // not already at the max size... if bwBuf != nil && ss.bw.bufSize != ss.opts.BufferSize { // Check if record fits required := recSize + recordHeaderSize if required > bwBuf.Available() { ss.writer, err = ss.bw.expand(ss.file.handle, required) if err != nil { ss.fm.unlockFile(ss.file) return err } bwBuf = ss.bw.buf } } } w = ss.writer } ss.tmpSubBuf, totalSize, err = writeRecord(w, ss.tmpSubBuf, recType, rec, recSize, ss.crcTable) if err != nil { if needsUnlock { ss.fm.unlockFile(ss.file) } return err } if bwBuf != nil && ss.bw.shrinkReq { ss.bw.checkShrinkRequest() } // Indicate that we wrote something to the buffer/file ss.activity = true switch recType { case subRecNew: ss.numRecs++ case subRecMsg: ss.numRecs++ case subRecAck: // An ack makes the message record free space ss.delRecs++ case subRecUpdate: ss.numRecs++ // An update makes the old record free space ss.delRecs++ case subRecDel: ss.delRecs++ default: panic(fmt.Errorf("record type %v unknown", recType)) } ss.fileSize += int64(totalSize) if needsUnlock { ss.fm.unlockFile(ss.file) } return nil }
[ "func", "(", "ss", "*", "FileSubStore", ")", "writeRecord", "(", "w", "io", ".", "Writer", ",", "recType", "recordType", ",", "rec", "record", ")", "error", "{", "var", "err", "error", "\n", "totalSize", ":=", "0", "\n", "recSize", ":=", "rec", ".", "Size", "(", ")", "\n\n", "var", "bwBuf", "*", "bufio", ".", "Writer", "\n", "needsUnlock", ":=", "false", "\n\n", "if", "w", "==", "nil", "{", "if", "err", ":=", "ss", ".", "lockFile", "(", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "needsUnlock", "=", "true", "\n", "if", "ss", ".", "bw", "!=", "nil", "{", "bwBuf", "=", "ss", ".", "bw", ".", "buf", "\n", "// If we are using the buffer writer on this call, and the buffer is", "// not already at the max size...", "if", "bwBuf", "!=", "nil", "&&", "ss", ".", "bw", ".", "bufSize", "!=", "ss", ".", "opts", ".", "BufferSize", "{", "// Check if record fits", "required", ":=", "recSize", "+", "recordHeaderSize", "\n", "if", "required", ">", "bwBuf", ".", "Available", "(", ")", "{", "ss", ".", "writer", ",", "err", "=", "ss", ".", "bw", ".", "expand", "(", "ss", ".", "file", ".", "handle", ",", "required", ")", "\n", "if", "err", "!=", "nil", "{", "ss", ".", "fm", ".", "unlockFile", "(", "ss", ".", "file", ")", "\n", "return", "err", "\n", "}", "\n", "bwBuf", "=", "ss", ".", "bw", ".", "buf", "\n", "}", "\n", "}", "\n", "}", "\n", "w", "=", "ss", ".", "writer", "\n", "}", "\n", "ss", ".", "tmpSubBuf", ",", "totalSize", ",", "err", "=", "writeRecord", "(", "w", ",", "ss", ".", "tmpSubBuf", ",", "recType", ",", "rec", ",", "recSize", ",", "ss", ".", "crcTable", ")", "\n", "if", "err", "!=", "nil", "{", "if", "needsUnlock", "{", "ss", ".", "fm", ".", "unlockFile", "(", "ss", ".", "file", ")", "\n", "}", "\n", "return", "err", "\n", "}", "\n", "if", "bwBuf", "!=", "nil", "&&", "ss", ".", "bw", ".", "shrinkReq", "{", "ss", ".", "bw", ".", "checkShrinkRequest", "(", ")", "\n", "}", "\n", "// Indicate that we wrote something to the buffer/file", "ss", ".", "activity", "=", "true", "\n", "switch", "recType", "{", "case", "subRecNew", ":", "ss", ".", "numRecs", "++", "\n", "case", "subRecMsg", ":", "ss", ".", "numRecs", "++", "\n", "case", "subRecAck", ":", "// An ack makes the message record free space", "ss", ".", "delRecs", "++", "\n", "case", "subRecUpdate", ":", "ss", ".", "numRecs", "++", "\n", "// An update makes the old record free space", "ss", ".", "delRecs", "++", "\n", "case", "subRecDel", ":", "ss", ".", "delRecs", "++", "\n", "default", ":", "panic", "(", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "recType", ")", ")", "\n", "}", "\n", "ss", ".", "fileSize", "+=", "int64", "(", "totalSize", ")", "\n", "if", "needsUnlock", "{", "ss", ".", "fm", ".", "unlockFile", "(", "ss", ".", "file", ")", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// writes a record in the subscriptions file. // store's lock is held on entry.
[ "writes", "a", "record", "in", "the", "subscriptions", "file", ".", "store", "s", "lock", "is", "held", "on", "entry", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L3994-L4060
train
nats-io/nats-streaming-server
stores/filestore.go
Flush
func (ss *FileSubStore) Flush() error { ss.Lock() err := ss.lockFile() if err == nil { err = ss.flush() ss.fm.unlockFile(ss.file) } ss.Unlock() return err }
go
func (ss *FileSubStore) Flush() error { ss.Lock() err := ss.lockFile() if err == nil { err = ss.flush() ss.fm.unlockFile(ss.file) } ss.Unlock() return err }
[ "func", "(", "ss", "*", "FileSubStore", ")", "Flush", "(", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "err", ":=", "ss", ".", "lockFile", "(", ")", "\n", "if", "err", "==", "nil", "{", "err", "=", "ss", ".", "flush", "(", ")", "\n", "ss", ".", "fm", ".", "unlockFile", "(", "ss", ".", "file", ")", "\n", "}", "\n", "ss", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// Flush persists buffered operations to disk.
[ "Flush", "persists", "buffered", "operations", "to", "disk", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L4081-L4090
train
nats-io/nats-streaming-server
stores/filestore.go
Close
func (ss *FileSubStore) Close() error { ss.Lock() if ss.closed { ss.Unlock() return nil } ss.closed = true if ss.shrinkTimer != nil { if ss.shrinkTimer.Stop() { // If we can stop, timer callback won't fire, // so we need to decrement the wait group. ss.allDone.Done() } } ss.Unlock() // Wait on timers/callbacks ss.allDone.Wait() ss.Lock() var err error if ss.fm.remove(ss.file) { if ss.file.handle != nil { err = ss.flush() err = util.CloseFile(err, ss.file.handle) } } ss.Unlock() return err }
go
func (ss *FileSubStore) Close() error { ss.Lock() if ss.closed { ss.Unlock() return nil } ss.closed = true if ss.shrinkTimer != nil { if ss.shrinkTimer.Stop() { // If we can stop, timer callback won't fire, // so we need to decrement the wait group. ss.allDone.Done() } } ss.Unlock() // Wait on timers/callbacks ss.allDone.Wait() ss.Lock() var err error if ss.fm.remove(ss.file) { if ss.file.handle != nil { err = ss.flush() err = util.CloseFile(err, ss.file.handle) } } ss.Unlock() return err }
[ "func", "(", "ss", "*", "FileSubStore", ")", "Close", "(", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "if", "ss", ".", "closed", "{", "ss", ".", "Unlock", "(", ")", "\n", "return", "nil", "\n", "}", "\n\n", "ss", ".", "closed", "=", "true", "\n\n", "if", "ss", ".", "shrinkTimer", "!=", "nil", "{", "if", "ss", ".", "shrinkTimer", ".", "Stop", "(", ")", "{", "// If we can stop, timer callback won't fire,", "// so we need to decrement the wait group.", "ss", ".", "allDone", ".", "Done", "(", ")", "\n", "}", "\n", "}", "\n", "ss", ".", "Unlock", "(", ")", "\n\n", "// Wait on timers/callbacks", "ss", ".", "allDone", ".", "Wait", "(", ")", "\n\n", "ss", ".", "Lock", "(", ")", "\n", "var", "err", "error", "\n", "if", "ss", ".", "fm", ".", "remove", "(", "ss", ".", "file", ")", "{", "if", "ss", ".", "file", ".", "handle", "!=", "nil", "{", "err", "=", "ss", ".", "flush", "(", ")", "\n", "err", "=", "util", ".", "CloseFile", "(", "err", ",", "ss", ".", "file", ".", "handle", ")", "\n", "}", "\n", "}", "\n", "ss", ".", "Unlock", "(", ")", "\n\n", "return", "err", "\n", "}" ]
// Close closes this store
[ "Close", "closes", "this", "store" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L4093-L4125
train
nats-io/nats-streaming-server
server/raft_transport.go
Dial
func (n *natsStreamLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) { if !n.conn.IsConnected() { return nil, errors.New("raft-nats: dial failed, not connected") } // QUESTION: The Raft NetTransport does connection pooling, which is useful // for TCP sockets. The NATS transport simulates a socket using a // subscription at each endpoint, but everything goes over the same NATS // socket. This means there is little advantage to pooling here currently. // Should we actually Dial a new NATS connection here and rely on pooling? connect := &connectRequestProto{ ID: n.localAddr.String(), Inbox: fmt.Sprintf(natsRequestInbox, n.localAddr.String(), nats.NewInbox()), } data, err := json.Marshal(connect) if err != nil { panic(err) } peerConn := n.newNATSConn(string(address)) // Setup inbox. sub, err := n.conn.Subscribe(connect.Inbox, peerConn.msgHandler) if err != nil { return nil, err } sub.SetPendingLimits(-1, -1) if err := n.conn.FlushTimeout(n.timeout); err != nil { sub.Unsubscribe() return nil, err } // Make connect request to peer. msg, err := n.conn.Request(fmt.Sprintf(natsConnectInbox, address), data, timeout) if err != nil { sub.Unsubscribe() return nil, err } var resp connectResponseProto if err := json.Unmarshal(msg.Data, &resp); err != nil { sub.Unsubscribe() return nil, err } peerConn.sub = sub peerConn.outbox = resp.Inbox n.mu.Lock() n.conns[peerConn] = struct{}{} n.mu.Unlock() return peerConn, nil }
go
func (n *natsStreamLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) { if !n.conn.IsConnected() { return nil, errors.New("raft-nats: dial failed, not connected") } // QUESTION: The Raft NetTransport does connection pooling, which is useful // for TCP sockets. The NATS transport simulates a socket using a // subscription at each endpoint, but everything goes over the same NATS // socket. This means there is little advantage to pooling here currently. // Should we actually Dial a new NATS connection here and rely on pooling? connect := &connectRequestProto{ ID: n.localAddr.String(), Inbox: fmt.Sprintf(natsRequestInbox, n.localAddr.String(), nats.NewInbox()), } data, err := json.Marshal(connect) if err != nil { panic(err) } peerConn := n.newNATSConn(string(address)) // Setup inbox. sub, err := n.conn.Subscribe(connect.Inbox, peerConn.msgHandler) if err != nil { return nil, err } sub.SetPendingLimits(-1, -1) if err := n.conn.FlushTimeout(n.timeout); err != nil { sub.Unsubscribe() return nil, err } // Make connect request to peer. msg, err := n.conn.Request(fmt.Sprintf(natsConnectInbox, address), data, timeout) if err != nil { sub.Unsubscribe() return nil, err } var resp connectResponseProto if err := json.Unmarshal(msg.Data, &resp); err != nil { sub.Unsubscribe() return nil, err } peerConn.sub = sub peerConn.outbox = resp.Inbox n.mu.Lock() n.conns[peerConn] = struct{}{} n.mu.Unlock() return peerConn, nil }
[ "func", "(", "n", "*", "natsStreamLayer", ")", "Dial", "(", "address", "raft", ".", "ServerAddress", ",", "timeout", "time", ".", "Duration", ")", "(", "net", ".", "Conn", ",", "error", ")", "{", "if", "!", "n", ".", "conn", ".", "IsConnected", "(", ")", "{", "return", "nil", ",", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n\n", "// QUESTION: The Raft NetTransport does connection pooling, which is useful", "// for TCP sockets. The NATS transport simulates a socket using a", "// subscription at each endpoint, but everything goes over the same NATS", "// socket. This means there is little advantage to pooling here currently.", "// Should we actually Dial a new NATS connection here and rely on pooling?", "connect", ":=", "&", "connectRequestProto", "{", "ID", ":", "n", ".", "localAddr", ".", "String", "(", ")", ",", "Inbox", ":", "fmt", ".", "Sprintf", "(", "natsRequestInbox", ",", "n", ".", "localAddr", ".", "String", "(", ")", ",", "nats", ".", "NewInbox", "(", ")", ")", ",", "}", "\n", "data", ",", "err", ":=", "json", ".", "Marshal", "(", "connect", ")", "\n", "if", "err", "!=", "nil", "{", "panic", "(", "err", ")", "\n", "}", "\n\n", "peerConn", ":=", "n", ".", "newNATSConn", "(", "string", "(", "address", ")", ")", "\n\n", "// Setup inbox.", "sub", ",", "err", ":=", "n", ".", "conn", ".", "Subscribe", "(", "connect", ".", "Inbox", ",", "peerConn", ".", "msgHandler", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "sub", ".", "SetPendingLimits", "(", "-", "1", ",", "-", "1", ")", "\n", "if", "err", ":=", "n", ".", "conn", ".", "FlushTimeout", "(", "n", ".", "timeout", ")", ";", "err", "!=", "nil", "{", "sub", ".", "Unsubscribe", "(", ")", "\n", "return", "nil", ",", "err", "\n", "}", "\n\n", "// Make connect request to peer.", "msg", ",", "err", ":=", "n", ".", "conn", ".", "Request", "(", "fmt", ".", "Sprintf", "(", "natsConnectInbox", ",", "address", ")", ",", "data", ",", "timeout", ")", "\n", "if", "err", "!=", "nil", "{", "sub", ".", "Unsubscribe", "(", ")", "\n", "return", "nil", ",", "err", "\n", "}", "\n", "var", "resp", "connectResponseProto", "\n", "if", "err", ":=", "json", ".", "Unmarshal", "(", "msg", ".", "Data", ",", "&", "resp", ")", ";", "err", "!=", "nil", "{", "sub", ".", "Unsubscribe", "(", ")", "\n", "return", "nil", ",", "err", "\n", "}", "\n\n", "peerConn", ".", "sub", "=", "sub", "\n", "peerConn", ".", "outbox", "=", "resp", ".", "Inbox", "\n", "n", ".", "mu", ".", "Lock", "(", ")", "\n", "n", ".", "conns", "[", "peerConn", "]", "=", "struct", "{", "}", "{", "}", "\n", "n", ".", "mu", ".", "Unlock", "(", ")", "\n", "return", "peerConn", ",", "nil", "\n", "}" ]
// Dial creates a new net.Conn with the remote address. This is implemented by // performing a handshake over NATS which establishes unique inboxes at each // endpoint for streaming data.
[ "Dial", "creates", "a", "new", "net", ".", "Conn", "with", "the", "remote", "address", ".", "This", "is", "implemented", "by", "performing", "a", "handshake", "over", "NATS", "which", "establishes", "unique", "inboxes", "at", "each", "endpoint", "for", "streaming", "data", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L225-L276
train
nats-io/nats-streaming-server
server/raft_transport.go
newNATSTransport
func newNATSTransport(id string, conn *nats.Conn, timeout time.Duration, logOutput io.Writer) (*raft.NetworkTransport, error) { if logOutput == nil { logOutput = os.Stderr } return newNATSTransportWithLogger(id, conn, timeout, log.New(logOutput, "", log.LstdFlags)) }
go
func newNATSTransport(id string, conn *nats.Conn, timeout time.Duration, logOutput io.Writer) (*raft.NetworkTransport, error) { if logOutput == nil { logOutput = os.Stderr } return newNATSTransportWithLogger(id, conn, timeout, log.New(logOutput, "", log.LstdFlags)) }
[ "func", "newNATSTransport", "(", "id", "string", ",", "conn", "*", "nats", ".", "Conn", ",", "timeout", "time", ".", "Duration", ",", "logOutput", "io", ".", "Writer", ")", "(", "*", "raft", ".", "NetworkTransport", ",", "error", ")", "{", "if", "logOutput", "==", "nil", "{", "logOutput", "=", "os", ".", "Stderr", "\n", "}", "\n", "return", "newNATSTransportWithLogger", "(", "id", ",", "conn", ",", "timeout", ",", "log", ".", "New", "(", "logOutput", ",", "\"", "\"", ",", "log", ".", "LstdFlags", ")", ")", "\n", "}" ]
// newNATSTransport creates a new raft.NetworkTransport implemented with NATS // as the transport layer.
[ "newNATSTransport", "creates", "a", "new", "raft", ".", "NetworkTransport", "implemented", "with", "NATS", "as", "the", "transport", "layer", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L350-L355
train
nats-io/nats-streaming-server
server/raft_transport.go
newNATSTransportWithLogger
func newNATSTransportWithLogger(id string, conn *nats.Conn, timeout time.Duration, logger *log.Logger) (*raft.NetworkTransport, error) { return createNATSTransport(id, conn, logger, timeout, func(stream raft.StreamLayer) *raft.NetworkTransport { return raft.NewNetworkTransportWithLogger(stream, 3, timeout, logger) }) }
go
func newNATSTransportWithLogger(id string, conn *nats.Conn, timeout time.Duration, logger *log.Logger) (*raft.NetworkTransport, error) { return createNATSTransport(id, conn, logger, timeout, func(stream raft.StreamLayer) *raft.NetworkTransport { return raft.NewNetworkTransportWithLogger(stream, 3, timeout, logger) }) }
[ "func", "newNATSTransportWithLogger", "(", "id", "string", ",", "conn", "*", "nats", ".", "Conn", ",", "timeout", "time", ".", "Duration", ",", "logger", "*", "log", ".", "Logger", ")", "(", "*", "raft", ".", "NetworkTransport", ",", "error", ")", "{", "return", "createNATSTransport", "(", "id", ",", "conn", ",", "logger", ",", "timeout", ",", "func", "(", "stream", "raft", ".", "StreamLayer", ")", "*", "raft", ".", "NetworkTransport", "{", "return", "raft", ".", "NewNetworkTransportWithLogger", "(", "stream", ",", "3", ",", "timeout", ",", "logger", ")", "\n", "}", ")", "\n", "}" ]
// newNATSTransportWithLogger creates a new raft.NetworkTransport implemented // with NATS as the transport layer using the provided Logger.
[ "newNATSTransportWithLogger", "creates", "a", "new", "raft", ".", "NetworkTransport", "implemented", "with", "NATS", "as", "the", "transport", "layer", "using", "the", "provided", "Logger", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L359-L363
train
nats-io/nats-streaming-server
server/raft_transport.go
newNATSTransportWithConfig
func newNATSTransportWithConfig(id string, conn *nats.Conn, config *raft.NetworkTransportConfig) (*raft.NetworkTransport, error) { if config.Timeout == 0 { config.Timeout = 2 * time.Second } return createNATSTransport(id, conn, config.Logger, config.Timeout, func(stream raft.StreamLayer) *raft.NetworkTransport { config.Stream = stream return raft.NewNetworkTransportWithConfig(config) }) }
go
func newNATSTransportWithConfig(id string, conn *nats.Conn, config *raft.NetworkTransportConfig) (*raft.NetworkTransport, error) { if config.Timeout == 0 { config.Timeout = 2 * time.Second } return createNATSTransport(id, conn, config.Logger, config.Timeout, func(stream raft.StreamLayer) *raft.NetworkTransport { config.Stream = stream return raft.NewNetworkTransportWithConfig(config) }) }
[ "func", "newNATSTransportWithConfig", "(", "id", "string", ",", "conn", "*", "nats", ".", "Conn", ",", "config", "*", "raft", ".", "NetworkTransportConfig", ")", "(", "*", "raft", ".", "NetworkTransport", ",", "error", ")", "{", "if", "config", ".", "Timeout", "==", "0", "{", "config", ".", "Timeout", "=", "2", "*", "time", ".", "Second", "\n", "}", "\n", "return", "createNATSTransport", "(", "id", ",", "conn", ",", "config", ".", "Logger", ",", "config", ".", "Timeout", ",", "func", "(", "stream", "raft", ".", "StreamLayer", ")", "*", "raft", ".", "NetworkTransport", "{", "config", ".", "Stream", "=", "stream", "\n", "return", "raft", ".", "NewNetworkTransportWithConfig", "(", "config", ")", "\n", "}", ")", "\n", "}" ]
// newNATSTransportWithConfig returns a raft.NetworkTransport implemented // with NATS as the transport layer, using the given config struct.
[ "newNATSTransportWithConfig", "returns", "a", "raft", ".", "NetworkTransport", "implemented", "with", "NATS", "as", "the", "transport", "layer", "using", "the", "given", "config", "struct", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_transport.go#L367-L375
train
nats-io/nats-streaming-server
stores/memstore.go
expireMsgs
func (ms *MemoryMsgStore) expireMsgs() { ms.Lock() defer ms.Unlock() if ms.closed { ms.wg.Done() return } now := time.Now().UnixNano() maxAge := int64(ms.limits.MaxAge) for { m, ok := ms.msgs[ms.first] if !ok { if ms.first < ms.last { ms.first++ continue } ms.ageTimer = nil ms.wg.Done() return } elapsed := now - m.Timestamp if elapsed >= maxAge { ms.removeFirstMsg() } else { if elapsed < 0 { ms.ageTimer.Reset(time.Duration(m.Timestamp - now + maxAge)) } else { ms.ageTimer.Reset(time.Duration(maxAge - elapsed)) } return } } }
go
func (ms *MemoryMsgStore) expireMsgs() { ms.Lock() defer ms.Unlock() if ms.closed { ms.wg.Done() return } now := time.Now().UnixNano() maxAge := int64(ms.limits.MaxAge) for { m, ok := ms.msgs[ms.first] if !ok { if ms.first < ms.last { ms.first++ continue } ms.ageTimer = nil ms.wg.Done() return } elapsed := now - m.Timestamp if elapsed >= maxAge { ms.removeFirstMsg() } else { if elapsed < 0 { ms.ageTimer.Reset(time.Duration(m.Timestamp - now + maxAge)) } else { ms.ageTimer.Reset(time.Duration(maxAge - elapsed)) } return } } }
[ "func", "(", "ms", "*", "MemoryMsgStore", ")", "expireMsgs", "(", ")", "{", "ms", ".", "Lock", "(", ")", "\n", "defer", "ms", ".", "Unlock", "(", ")", "\n", "if", "ms", ".", "closed", "{", "ms", ".", "wg", ".", "Done", "(", ")", "\n", "return", "\n", "}", "\n\n", "now", ":=", "time", ".", "Now", "(", ")", ".", "UnixNano", "(", ")", "\n", "maxAge", ":=", "int64", "(", "ms", ".", "limits", ".", "MaxAge", ")", "\n", "for", "{", "m", ",", "ok", ":=", "ms", ".", "msgs", "[", "ms", ".", "first", "]", "\n", "if", "!", "ok", "{", "if", "ms", ".", "first", "<", "ms", ".", "last", "{", "ms", ".", "first", "++", "\n", "continue", "\n", "}", "\n", "ms", ".", "ageTimer", "=", "nil", "\n", "ms", ".", "wg", ".", "Done", "(", ")", "\n", "return", "\n", "}", "\n", "elapsed", ":=", "now", "-", "m", ".", "Timestamp", "\n", "if", "elapsed", ">=", "maxAge", "{", "ms", ".", "removeFirstMsg", "(", ")", "\n", "}", "else", "{", "if", "elapsed", "<", "0", "{", "ms", ".", "ageTimer", ".", "Reset", "(", "time", ".", "Duration", "(", "m", ".", "Timestamp", "-", "now", "+", "maxAge", ")", ")", "\n", "}", "else", "{", "ms", ".", "ageTimer", ".", "Reset", "(", "time", ".", "Duration", "(", "maxAge", "-", "elapsed", ")", ")", "\n", "}", "\n", "return", "\n", "}", "\n", "}", "\n", "}" ]
// expireMsgs ensures that messages don't stay in the log longer than the // limit's MaxAge.
[ "expireMsgs", "ensures", "that", "messages", "don", "t", "stay", "in", "the", "log", "longer", "than", "the", "limit", "s", "MaxAge", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L189-L222
train
nats-io/nats-streaming-server
stores/memstore.go
removeFirstMsg
func (ms *MemoryMsgStore) removeFirstMsg() { firstMsg := ms.msgs[ms.first] ms.totalBytes -= uint64(firstMsg.Size()) ms.totalCount-- delete(ms.msgs, ms.first) ms.first++ }
go
func (ms *MemoryMsgStore) removeFirstMsg() { firstMsg := ms.msgs[ms.first] ms.totalBytes -= uint64(firstMsg.Size()) ms.totalCount-- delete(ms.msgs, ms.first) ms.first++ }
[ "func", "(", "ms", "*", "MemoryMsgStore", ")", "removeFirstMsg", "(", ")", "{", "firstMsg", ":=", "ms", ".", "msgs", "[", "ms", ".", "first", "]", "\n", "ms", ".", "totalBytes", "-=", "uint64", "(", "firstMsg", ".", "Size", "(", ")", ")", "\n", "ms", ".", "totalCount", "--", "\n", "delete", "(", "ms", ".", "msgs", ",", "ms", ".", "first", ")", "\n", "ms", ".", "first", "++", "\n", "}" ]
// removeFirstMsg removes the first message and updates totals.
[ "removeFirstMsg", "removes", "the", "first", "message", "and", "updates", "totals", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/memstore.go#L225-L231
train
nats-io/nats-streaming-server
stores/sqlstore.go
SQLNoCaching
func SQLNoCaching(noCaching bool) SQLStoreOption { return func(o *SQLStoreOptions) error { o.NoCaching = noCaching return nil } }
go
func SQLNoCaching(noCaching bool) SQLStoreOption { return func(o *SQLStoreOptions) error { o.NoCaching = noCaching return nil } }
[ "func", "SQLNoCaching", "(", "noCaching", "bool", ")", "SQLStoreOption", "{", "return", "func", "(", "o", "*", "SQLStoreOptions", ")", "error", "{", "o", ".", "NoCaching", "=", "noCaching", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// SQLNoCaching sets the NoCaching option
[ "SQLNoCaching", "sets", "the", "NoCaching", "option" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L231-L236
train
nats-io/nats-streaming-server
stores/sqlstore.go
SQLMaxOpenConns
func SQLMaxOpenConns(max int) SQLStoreOption { return func(o *SQLStoreOptions) error { o.MaxOpenConns = max return nil } }
go
func SQLMaxOpenConns(max int) SQLStoreOption { return func(o *SQLStoreOptions) error { o.MaxOpenConns = max return nil } }
[ "func", "SQLMaxOpenConns", "(", "max", "int", ")", "SQLStoreOption", "{", "return", "func", "(", "o", "*", "SQLStoreOptions", ")", "error", "{", "o", ".", "MaxOpenConns", "=", "max", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// SQLMaxOpenConns sets the MaxOpenConns option
[ "SQLMaxOpenConns", "sets", "the", "MaxOpenConns", "option" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L239-L244
train
nats-io/nats-streaming-server
stores/sqlstore.go
SQLAllOptions
func SQLAllOptions(opts *SQLStoreOptions) SQLStoreOption { return func(o *SQLStoreOptions) error { o.NoCaching = opts.NoCaching o.MaxOpenConns = opts.MaxOpenConns return nil } }
go
func SQLAllOptions(opts *SQLStoreOptions) SQLStoreOption { return func(o *SQLStoreOptions) error { o.NoCaching = opts.NoCaching o.MaxOpenConns = opts.MaxOpenConns return nil } }
[ "func", "SQLAllOptions", "(", "opts", "*", "SQLStoreOptions", ")", "SQLStoreOption", "{", "return", "func", "(", "o", "*", "SQLStoreOptions", ")", "error", "{", "o", ".", "NoCaching", "=", "opts", ".", "NoCaching", "\n", "o", ".", "MaxOpenConns", "=", "opts", ".", "MaxOpenConns", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// SQLAllOptions is a convenient option to pass all options from a SQLStoreOptions // structure to the constructor.
[ "SQLAllOptions", "is", "a", "convenient", "option", "to", "pass", "all", "options", "from", "a", "SQLStoreOptions", "structure", "to", "the", "constructor", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L248-L254
train
nats-io/nats-streaming-server
stores/sqlstore.go
sqlStmtError
func sqlStmtError(code int, err error) error { return fmt.Errorf("sql: error executing %q: %v", sqlStmts[code], err) }
go
func sqlStmtError(code int, err error) error { return fmt.Errorf("sql: error executing %q: %v", sqlStmts[code], err) }
[ "func", "sqlStmtError", "(", "code", "int", ",", "err", "error", ")", "error", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "sqlStmts", "[", "code", "]", ",", "err", ")", "\n", "}" ]
// sqlStmtError returns an error including the text of the offending SQL statement.
[ "sqlStmtError", "returns", "an", "error", "including", "the", "text", "of", "the", "offending", "SQL", "statement", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L353-L355
train
nats-io/nats-streaming-server
stores/sqlstore.go
updateDBLock
func (s *SQLStore) updateDBLock() { defer s.wg.Done() var ( ticker = time.NewTicker(sqlLockUpdateInterval) hasLock = true err error failed int ) for { select { case <-ticker.C: hasLock, _, _, err = s.acquireDBLock(false) if !hasLock || err != nil { // If there is no error but we did not get the lock, // something is really wrong, abort right away. stopNow := !hasLock && err == nil if err != nil { failed++ s.log.Errorf("Unable to update store lock (failed=%v err=%v)", failed, err) } if stopNow || failed == sqlLockLostCount { if sqlNoPanic { s.log.Fatalf("Aborting") return } panic("lost store lock, aborting") } } else { failed = 0 } case <-s.doneCh: ticker.Stop() return } } }
go
func (s *SQLStore) updateDBLock() { defer s.wg.Done() var ( ticker = time.NewTicker(sqlLockUpdateInterval) hasLock = true err error failed int ) for { select { case <-ticker.C: hasLock, _, _, err = s.acquireDBLock(false) if !hasLock || err != nil { // If there is no error but we did not get the lock, // something is really wrong, abort right away. stopNow := !hasLock && err == nil if err != nil { failed++ s.log.Errorf("Unable to update store lock (failed=%v err=%v)", failed, err) } if stopNow || failed == sqlLockLostCount { if sqlNoPanic { s.log.Fatalf("Aborting") return } panic("lost store lock, aborting") } } else { failed = 0 } case <-s.doneCh: ticker.Stop() return } } }
[ "func", "(", "s", "*", "SQLStore", ")", "updateDBLock", "(", ")", "{", "defer", "s", ".", "wg", ".", "Done", "(", ")", "\n\n", "var", "(", "ticker", "=", "time", ".", "NewTicker", "(", "sqlLockUpdateInterval", ")", "\n", "hasLock", "=", "true", "\n", "err", "error", "\n", "failed", "int", "\n", ")", "\n", "for", "{", "select", "{", "case", "<-", "ticker", ".", "C", ":", "hasLock", ",", "_", ",", "_", ",", "err", "=", "s", ".", "acquireDBLock", "(", "false", ")", "\n", "if", "!", "hasLock", "||", "err", "!=", "nil", "{", "// If there is no error but we did not get the lock,", "// something is really wrong, abort right away.", "stopNow", ":=", "!", "hasLock", "&&", "err", "==", "nil", "\n", "if", "err", "!=", "nil", "{", "failed", "++", "\n", "s", ".", "log", ".", "Errorf", "(", "\"", "\"", ",", "failed", ",", "err", ")", "\n", "}", "\n", "if", "stopNow", "||", "failed", "==", "sqlLockLostCount", "{", "if", "sqlNoPanic", "{", "s", ".", "log", ".", "Fatalf", "(", "\"", "\"", ")", "\n", "return", "\n", "}", "\n", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "}", "else", "{", "failed", "=", "0", "\n", "}", "\n", "case", "<-", "s", ".", "doneCh", ":", "ticker", ".", "Stop", "(", ")", "\n", "return", "\n", "}", "\n", "}", "\n", "}" ]
// This go-routine updates the DB store lock at regular intervals.
[ "This", "go", "-", "routine", "updates", "the", "DB", "store", "lock", "at", "regular", "intervals", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L478-L514
train
nats-io/nats-streaming-server
stores/sqlstore.go
acquireDBLock
func (s *SQLStore) acquireDBLock(steal bool) (bool, string, uint64, error) { s.dbLock.Lock() defer s.dbLock.Unlock() var ( lockID string tick uint64 hasLock bool ) tx, err := s.dbLock.db.Begin() if err != nil { return false, "", 0, err } defer func() { if tx != nil { tx.Rollback() } }() r := tx.QueryRow(sqlStmts[sqlDBLockSelect]) err = r.Scan(&lockID, &tick) if err != nil && err != sql.ErrNoRows { return false, "", 0, sqlStmtError(sqlDBLockSelect, err) } if err == sql.ErrNoRows || steal || lockID == "" || lockID == s.dbLock.id { // If we are stealing, reset tick to 0 (so it will become 1 in update statement) if steal { tick = 0 } stmt := sqlStmts[sqlDBLockUpdate] if err == sql.ErrNoRows { stmt = sqlStmts[sqlDBLockInsert] } if _, err := tx.Exec(stmt, s.dbLock.id, tick+1); err != nil { return false, "", 0, sqlStmtError(sqlDBLockUpdate, err) } hasLock = true } if err := tx.Commit(); err != nil { return false, "", 0, err } tx = nil return hasLock, lockID, tick, nil }
go
func (s *SQLStore) acquireDBLock(steal bool) (bool, string, uint64, error) { s.dbLock.Lock() defer s.dbLock.Unlock() var ( lockID string tick uint64 hasLock bool ) tx, err := s.dbLock.db.Begin() if err != nil { return false, "", 0, err } defer func() { if tx != nil { tx.Rollback() } }() r := tx.QueryRow(sqlStmts[sqlDBLockSelect]) err = r.Scan(&lockID, &tick) if err != nil && err != sql.ErrNoRows { return false, "", 0, sqlStmtError(sqlDBLockSelect, err) } if err == sql.ErrNoRows || steal || lockID == "" || lockID == s.dbLock.id { // If we are stealing, reset tick to 0 (so it will become 1 in update statement) if steal { tick = 0 } stmt := sqlStmts[sqlDBLockUpdate] if err == sql.ErrNoRows { stmt = sqlStmts[sqlDBLockInsert] } if _, err := tx.Exec(stmt, s.dbLock.id, tick+1); err != nil { return false, "", 0, sqlStmtError(sqlDBLockUpdate, err) } hasLock = true } if err := tx.Commit(); err != nil { return false, "", 0, err } tx = nil return hasLock, lockID, tick, nil }
[ "func", "(", "s", "*", "SQLStore", ")", "acquireDBLock", "(", "steal", "bool", ")", "(", "bool", ",", "string", ",", "uint64", ",", "error", ")", "{", "s", ".", "dbLock", ".", "Lock", "(", ")", "\n", "defer", "s", ".", "dbLock", ".", "Unlock", "(", ")", "\n", "var", "(", "lockID", "string", "\n", "tick", "uint64", "\n", "hasLock", "bool", "\n", ")", "\n", "tx", ",", "err", ":=", "s", ".", "dbLock", ".", "db", ".", "Begin", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "false", ",", "\"", "\"", ",", "0", ",", "err", "\n", "}", "\n", "defer", "func", "(", ")", "{", "if", "tx", "!=", "nil", "{", "tx", ".", "Rollback", "(", ")", "\n", "}", "\n", "}", "(", ")", "\n", "r", ":=", "tx", ".", "QueryRow", "(", "sqlStmts", "[", "sqlDBLockSelect", "]", ")", "\n", "err", "=", "r", ".", "Scan", "(", "&", "lockID", ",", "&", "tick", ")", "\n", "if", "err", "!=", "nil", "&&", "err", "!=", "sql", ".", "ErrNoRows", "{", "return", "false", ",", "\"", "\"", ",", "0", ",", "sqlStmtError", "(", "sqlDBLockSelect", ",", "err", ")", "\n", "}", "\n", "if", "err", "==", "sql", ".", "ErrNoRows", "||", "steal", "||", "lockID", "==", "\"", "\"", "||", "lockID", "==", "s", ".", "dbLock", ".", "id", "{", "// If we are stealing, reset tick to 0 (so it will become 1 in update statement)", "if", "steal", "{", "tick", "=", "0", "\n", "}", "\n", "stmt", ":=", "sqlStmts", "[", "sqlDBLockUpdate", "]", "\n", "if", "err", "==", "sql", ".", "ErrNoRows", "{", "stmt", "=", "sqlStmts", "[", "sqlDBLockInsert", "]", "\n", "}", "\n", "if", "_", ",", "err", ":=", "tx", ".", "Exec", "(", "stmt", ",", "s", ".", "dbLock", ".", "id", ",", "tick", "+", "1", ")", ";", "err", "!=", "nil", "{", "return", "false", ",", "\"", "\"", ",", "0", ",", "sqlStmtError", "(", "sqlDBLockUpdate", ",", "err", ")", "\n", "}", "\n", "hasLock", "=", "true", "\n", "}", "\n", "if", "err", ":=", "tx", ".", "Commit", "(", ")", ";", "err", "!=", "nil", "{", "return", "false", ",", "\"", "\"", ",", "0", ",", "err", "\n", "}", "\n", "tx", "=", "nil", "\n", "return", "hasLock", ",", "lockID", ",", "tick", ",", "nil", "\n", "}" ]
// Returns if lock is acquired, the owner and tick value of the lock record.
[ "Returns", "if", "lock", "is", "acquired", "the", "owner", "and", "tick", "value", "of", "the", "lock", "record", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L517-L558
train
nats-io/nats-streaming-server
stores/sqlstore.go
releaseDBLockIfOwner
func (s *SQLStore) releaseDBLockIfOwner() { s.dbLock.Lock() defer s.dbLock.Unlock() if s.dbLock.isOwner { s.dbLock.db.Exec(sqlStmts[sqlDBLockUpdate], "", 0) } }
go
func (s *SQLStore) releaseDBLockIfOwner() { s.dbLock.Lock() defer s.dbLock.Unlock() if s.dbLock.isOwner { s.dbLock.db.Exec(sqlStmts[sqlDBLockUpdate], "", 0) } }
[ "func", "(", "s", "*", "SQLStore", ")", "releaseDBLockIfOwner", "(", ")", "{", "s", ".", "dbLock", ".", "Lock", "(", ")", "\n", "defer", "s", ".", "dbLock", ".", "Unlock", "(", ")", "\n", "if", "s", ".", "dbLock", ".", "isOwner", "{", "s", ".", "dbLock", ".", "db", ".", "Exec", "(", "sqlStmts", "[", "sqlDBLockUpdate", "]", ",", "\"", "\"", ",", "0", ")", "\n", "}", "\n", "}" ]
// Release the store lock if this store was the owner of the lock
[ "Release", "the", "store", "lock", "if", "this", "store", "was", "the", "owner", "of", "the", "lock" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L561-L567
train
nats-io/nats-streaming-server
stores/sqlstore.go
scheduleSubStoreFlush
func (s *SQLStore) scheduleSubStoreFlush(ss *SQLSubStore) { needSignal := false f := s.ssFlusher f.Lock() f.stores[ss] = struct{}{} if !f.signaled { f.signaled = true needSignal = true } f.Unlock() if needSignal { select { case f.signalCh <- struct{}{}: default: } } }
go
func (s *SQLStore) scheduleSubStoreFlush(ss *SQLSubStore) { needSignal := false f := s.ssFlusher f.Lock() f.stores[ss] = struct{}{} if !f.signaled { f.signaled = true needSignal = true } f.Unlock() if needSignal { select { case f.signalCh <- struct{}{}: default: } } }
[ "func", "(", "s", "*", "SQLStore", ")", "scheduleSubStoreFlush", "(", "ss", "*", "SQLSubStore", ")", "{", "needSignal", ":=", "false", "\n", "f", ":=", "s", ".", "ssFlusher", "\n", "f", ".", "Lock", "(", ")", "\n", "f", ".", "stores", "[", "ss", "]", "=", "struct", "{", "}", "{", "}", "\n", "if", "!", "f", ".", "signaled", "{", "f", ".", "signaled", "=", "true", "\n", "needSignal", "=", "true", "\n", "}", "\n", "f", ".", "Unlock", "(", ")", "\n", "if", "needSignal", "{", "select", "{", "case", "f", ".", "signalCh", "<-", "struct", "{", "}", "{", "}", ":", "default", ":", "}", "\n", "}", "\n", "}" ]
// Add this store to the list of SubStore needing flushing // and signal the go-routine responsible for flushing if // need be.
[ "Add", "this", "store", "to", "the", "list", "of", "SubStore", "needing", "flushing", "and", "signal", "the", "go", "-", "routine", "responsible", "for", "flushing", "if", "need", "be", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L617-L633
train
nats-io/nats-streaming-server
stores/sqlstore.go
newSQLMsgStore
func (s *SQLStore) newSQLMsgStore(channel string, channelID int64, limits *MsgStoreLimits) *SQLMsgStore { msgStore := &SQLMsgStore{ sqlStore: s, channelID: channelID, } msgStore.init(channel, s.log, limits) if !s.opts.NoCaching { msgStore.writeCache = &sqlMsgsCache{msgs: make(map[uint64]*sqlCachedMsg)} } return msgStore }
go
func (s *SQLStore) newSQLMsgStore(channel string, channelID int64, limits *MsgStoreLimits) *SQLMsgStore { msgStore := &SQLMsgStore{ sqlStore: s, channelID: channelID, } msgStore.init(channel, s.log, limits) if !s.opts.NoCaching { msgStore.writeCache = &sqlMsgsCache{msgs: make(map[uint64]*sqlCachedMsg)} } return msgStore }
[ "func", "(", "s", "*", "SQLStore", ")", "newSQLMsgStore", "(", "channel", "string", ",", "channelID", "int64", ",", "limits", "*", "MsgStoreLimits", ")", "*", "SQLMsgStore", "{", "msgStore", ":=", "&", "SQLMsgStore", "{", "sqlStore", ":", "s", ",", "channelID", ":", "channelID", ",", "}", "\n", "msgStore", ".", "init", "(", "channel", ",", "s", ".", "log", ",", "limits", ")", "\n", "if", "!", "s", ".", "opts", ".", "NoCaching", "{", "msgStore", ".", "writeCache", "=", "&", "sqlMsgsCache", "{", "msgs", ":", "make", "(", "map", "[", "uint64", "]", "*", "sqlCachedMsg", ")", "}", "\n", "}", "\n", "return", "msgStore", "\n", "}" ]
// creates an instance of a SQLMsgStore
[ "creates", "an", "instance", "of", "a", "SQLMsgStore" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L636-L646
train
nats-io/nats-streaming-server
stores/sqlstore.go
newSQLSubStore
func (s *SQLStore) newSQLSubStore(channelID int64, limits *SubStoreLimits) *SQLSubStore { subStore := &SQLSubStore{ sqlStore: s, channelID: channelID, maxSubID: &s.maxSubID, limits: *limits, } subStore.log = s.log if s.opts.NoCaching { subStore.subLastSent = make(map[uint64]uint64) } else { subStore.cache = &sqlSubAcksPendingCache{ subs: make(map[uint64]*sqlSubAcksPending), } } return subStore }
go
func (s *SQLStore) newSQLSubStore(channelID int64, limits *SubStoreLimits) *SQLSubStore { subStore := &SQLSubStore{ sqlStore: s, channelID: channelID, maxSubID: &s.maxSubID, limits: *limits, } subStore.log = s.log if s.opts.NoCaching { subStore.subLastSent = make(map[uint64]uint64) } else { subStore.cache = &sqlSubAcksPendingCache{ subs: make(map[uint64]*sqlSubAcksPending), } } return subStore }
[ "func", "(", "s", "*", "SQLStore", ")", "newSQLSubStore", "(", "channelID", "int64", ",", "limits", "*", "SubStoreLimits", ")", "*", "SQLSubStore", "{", "subStore", ":=", "&", "SQLSubStore", "{", "sqlStore", ":", "s", ",", "channelID", ":", "channelID", ",", "maxSubID", ":", "&", "s", ".", "maxSubID", ",", "limits", ":", "*", "limits", ",", "}", "\n", "subStore", ".", "log", "=", "s", ".", "log", "\n", "if", "s", ".", "opts", ".", "NoCaching", "{", "subStore", ".", "subLastSent", "=", "make", "(", "map", "[", "uint64", "]", "uint64", ")", "\n", "}", "else", "{", "subStore", ".", "cache", "=", "&", "sqlSubAcksPendingCache", "{", "subs", ":", "make", "(", "map", "[", "uint64", "]", "*", "sqlSubAcksPending", ")", ",", "}", "\n", "}", "\n", "return", "subStore", "\n", "}" ]
// creates an instance of SQLSubStore
[ "creates", "an", "instance", "of", "SQLSubStore" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L649-L665
train
nats-io/nats-streaming-server
stores/sqlstore.go
initSQLStmtsTable
func initSQLStmtsTable(driver string) { // The sqlStmts table is initialized with MySQL statements. // Update the statements for the selected driver. switch driver { case driverPostgres: // Replace ? with $1, $2, etc... for i, stmt := range sqlStmts { n := 0 for strings.IndexByte(stmt, '?') != -1 { n++ param := "$" + strconv.Itoa(n) stmt = strings.Replace(stmt, "?", param, 1) } sqlStmts[i] = stmt } // Replace `row` with row for i, stmt := range sqlStmts { stmt := strings.Replace(stmt, "`row`", "row", -1) sqlStmts[i] = stmt } // OVER (PARTITION ...) is not supported in older MySQL servers. // So the default SQL statement is specific to MySQL and uses variables. // For Postgres, replace with this statement: sqlStmts[sqlRecoverGetSeqFloorForMaxBytes] = "SELECT COALESCE(MIN(seq), 0) FROM (SELECT seq, SUM(size) OVER (PARTITION BY id ORDER BY seq DESC) AS total FROM Messages WHERE id=$1)t WHERE t.total<=$2" } }
go
func initSQLStmtsTable(driver string) { // The sqlStmts table is initialized with MySQL statements. // Update the statements for the selected driver. switch driver { case driverPostgres: // Replace ? with $1, $2, etc... for i, stmt := range sqlStmts { n := 0 for strings.IndexByte(stmt, '?') != -1 { n++ param := "$" + strconv.Itoa(n) stmt = strings.Replace(stmt, "?", param, 1) } sqlStmts[i] = stmt } // Replace `row` with row for i, stmt := range sqlStmts { stmt := strings.Replace(stmt, "`row`", "row", -1) sqlStmts[i] = stmt } // OVER (PARTITION ...) is not supported in older MySQL servers. // So the default SQL statement is specific to MySQL and uses variables. // For Postgres, replace with this statement: sqlStmts[sqlRecoverGetSeqFloorForMaxBytes] = "SELECT COALESCE(MIN(seq), 0) FROM (SELECT seq, SUM(size) OVER (PARTITION BY id ORDER BY seq DESC) AS total FROM Messages WHERE id=$1)t WHERE t.total<=$2" } }
[ "func", "initSQLStmtsTable", "(", "driver", "string", ")", "{", "// The sqlStmts table is initialized with MySQL statements.", "// Update the statements for the selected driver.", "switch", "driver", "{", "case", "driverPostgres", ":", "// Replace ? with $1, $2, etc...", "for", "i", ",", "stmt", ":=", "range", "sqlStmts", "{", "n", ":=", "0", "\n", "for", "strings", ".", "IndexByte", "(", "stmt", ",", "'?'", ")", "!=", "-", "1", "{", "n", "++", "\n", "param", ":=", "\"", "\"", "+", "strconv", ".", "Itoa", "(", "n", ")", "\n", "stmt", "=", "strings", ".", "Replace", "(", "stmt", ",", "\"", "\"", ",", "param", ",", "1", ")", "\n", "}", "\n", "sqlStmts", "[", "i", "]", "=", "stmt", "\n", "}", "\n", "// Replace `row` with row", "for", "i", ",", "stmt", ":=", "range", "sqlStmts", "{", "stmt", ":=", "strings", ".", "Replace", "(", "stmt", ",", "\"", "\"", ",", "\"", "\"", ",", "-", "1", ")", "\n", "sqlStmts", "[", "i", "]", "=", "stmt", "\n", "}", "\n", "// OVER (PARTITION ...) is not supported in older MySQL servers.", "// So the default SQL statement is specific to MySQL and uses variables.", "// For Postgres, replace with this statement:", "sqlStmts", "[", "sqlRecoverGetSeqFloorForMaxBytes", "]", "=", "\"", "\"", "\n", "}", "\n", "}" ]
// initialize the global sqlStmts table to driver's one.
[ "initialize", "the", "global", "sqlStmts", "table", "to", "driver", "s", "one", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L680-L705
train
nats-io/nats-streaming-server
stores/sqlstore.go
Init
func (s *SQLStore) Init(info *spb.ServerInfo) error { s.Lock() defer s.Unlock() count := 0 r := s.db.QueryRow(sqlStmts[sqlHasServerInfoRow]) if err := r.Scan(&count); err != nil && err != sql.ErrNoRows { return sqlStmtError(sqlHasServerInfoRow, err) } infoBytes, _ := info.Marshal() if count == 0 { if _, err := s.db.Exec(sqlStmts[sqlAddServerInfo], info.ClusterID, infoBytes, sqlVersion); err != nil { return sqlStmtError(sqlAddServerInfo, err) } } else { if _, err := s.db.Exec(sqlStmts[sqlUpdateServerInfo], info.ClusterID, infoBytes, sqlVersion); err != nil { return sqlStmtError(sqlUpdateServerInfo, err) } } return nil }
go
func (s *SQLStore) Init(info *spb.ServerInfo) error { s.Lock() defer s.Unlock() count := 0 r := s.db.QueryRow(sqlStmts[sqlHasServerInfoRow]) if err := r.Scan(&count); err != nil && err != sql.ErrNoRows { return sqlStmtError(sqlHasServerInfoRow, err) } infoBytes, _ := info.Marshal() if count == 0 { if _, err := s.db.Exec(sqlStmts[sqlAddServerInfo], info.ClusterID, infoBytes, sqlVersion); err != nil { return sqlStmtError(sqlAddServerInfo, err) } } else { if _, err := s.db.Exec(sqlStmts[sqlUpdateServerInfo], info.ClusterID, infoBytes, sqlVersion); err != nil { return sqlStmtError(sqlUpdateServerInfo, err) } } return nil }
[ "func", "(", "s", "*", "SQLStore", ")", "Init", "(", "info", "*", "spb", ".", "ServerInfo", ")", "error", "{", "s", ".", "Lock", "(", ")", "\n", "defer", "s", ".", "Unlock", "(", ")", "\n", "count", ":=", "0", "\n", "r", ":=", "s", ".", "db", ".", "QueryRow", "(", "sqlStmts", "[", "sqlHasServerInfoRow", "]", ")", "\n", "if", "err", ":=", "r", ".", "Scan", "(", "&", "count", ")", ";", "err", "!=", "nil", "&&", "err", "!=", "sql", ".", "ErrNoRows", "{", "return", "sqlStmtError", "(", "sqlHasServerInfoRow", ",", "err", ")", "\n", "}", "\n", "infoBytes", ",", "_", ":=", "info", ".", "Marshal", "(", ")", "\n", "if", "count", "==", "0", "{", "if", "_", ",", "err", ":=", "s", ".", "db", ".", "Exec", "(", "sqlStmts", "[", "sqlAddServerInfo", "]", ",", "info", ".", "ClusterID", ",", "infoBytes", ",", "sqlVersion", ")", ";", "err", "!=", "nil", "{", "return", "sqlStmtError", "(", "sqlAddServerInfo", ",", "err", ")", "\n", "}", "\n", "}", "else", "{", "if", "_", ",", "err", ":=", "s", ".", "db", ".", "Exec", "(", "sqlStmts", "[", "sqlUpdateServerInfo", "]", ",", "info", ".", "ClusterID", ",", "infoBytes", ",", "sqlVersion", ")", ";", "err", "!=", "nil", "{", "return", "sqlStmtError", "(", "sqlUpdateServerInfo", ",", "err", ")", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// Init implements the Store interface
[ "Init", "implements", "the", "Store", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L708-L727
train
nats-io/nats-streaming-server
stores/sqlstore.go
Close
func (s *SQLStore) Close() error { s.Lock() if s.closed { s.Unlock() return nil } s.closed = true // This will cause MsgStore's and SubStore's to be closed. err := s.close() db := s.db wg := &s.wg // Signal background go-routines to quit if s.doneCh != nil { close(s.doneCh) } s.Unlock() // Wait for go routine(s) to finish wg.Wait() s.Lock() for _, ps := range s.preparedStmts { if lerr := ps.Close(); lerr != nil && err == nil { err = lerr } } if db != nil { if s.dbLock != nil { s.releaseDBLockIfOwner() } if lerr := db.Close(); lerr != nil && err == nil { err = lerr } } s.Unlock() return err }
go
func (s *SQLStore) Close() error { s.Lock() if s.closed { s.Unlock() return nil } s.closed = true // This will cause MsgStore's and SubStore's to be closed. err := s.close() db := s.db wg := &s.wg // Signal background go-routines to quit if s.doneCh != nil { close(s.doneCh) } s.Unlock() // Wait for go routine(s) to finish wg.Wait() s.Lock() for _, ps := range s.preparedStmts { if lerr := ps.Close(); lerr != nil && err == nil { err = lerr } } if db != nil { if s.dbLock != nil { s.releaseDBLockIfOwner() } if lerr := db.Close(); lerr != nil && err == nil { err = lerr } } s.Unlock() return err }
[ "func", "(", "s", "*", "SQLStore", ")", "Close", "(", ")", "error", "{", "s", ".", "Lock", "(", ")", "\n", "if", "s", ".", "closed", "{", "s", ".", "Unlock", "(", ")", "\n", "return", "nil", "\n", "}", "\n", "s", ".", "closed", "=", "true", "\n", "// This will cause MsgStore's and SubStore's to be closed.", "err", ":=", "s", ".", "close", "(", ")", "\n", "db", ":=", "s", ".", "db", "\n", "wg", ":=", "&", "s", ".", "wg", "\n", "// Signal background go-routines to quit", "if", "s", ".", "doneCh", "!=", "nil", "{", "close", "(", "s", ".", "doneCh", ")", "\n", "}", "\n", "s", ".", "Unlock", "(", ")", "\n\n", "// Wait for go routine(s) to finish", "wg", ".", "Wait", "(", ")", "\n\n", "s", ".", "Lock", "(", ")", "\n", "for", "_", ",", "ps", ":=", "range", "s", ".", "preparedStmts", "{", "if", "lerr", ":=", "ps", ".", "Close", "(", ")", ";", "lerr", "!=", "nil", "&&", "err", "==", "nil", "{", "err", "=", "lerr", "\n", "}", "\n", "}", "\n", "if", "db", "!=", "nil", "{", "if", "s", ".", "dbLock", "!=", "nil", "{", "s", ".", "releaseDBLockIfOwner", "(", ")", "\n", "}", "\n", "if", "lerr", ":=", "db", ".", "Close", "(", ")", ";", "lerr", "!=", "nil", "&&", "err", "==", "nil", "{", "err", "=", "lerr", "\n", "}", "\n", "}", "\n", "s", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// Close implements the Store interface
[ "Close", "implements", "the", "Store", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1272-L1308
train
nats-io/nats-streaming-server
stores/sqlstore.go
GetSequenceFromTimestamp
func (ms *SQLMsgStore) GetSequenceFromTimestamp(timestamp int64) (uint64, error) { ms.Lock() defer ms.Unlock() // No message ever stored if ms.first == 0 { return 0, nil } // All messages have expired if ms.first > ms.last { return ms.last + 1, nil } r := ms.sqlStore.preparedStmts[sqlGetSequenceFromTimestamp].QueryRow(ms.channelID, timestamp) seq := uint64(0) err := r.Scan(&seq) if err == sql.ErrNoRows { return ms.last + 1, nil } if err != nil { return 0, sqlStmtError(sqlGetSequenceFromTimestamp, err) } return seq, nil }
go
func (ms *SQLMsgStore) GetSequenceFromTimestamp(timestamp int64) (uint64, error) { ms.Lock() defer ms.Unlock() // No message ever stored if ms.first == 0 { return 0, nil } // All messages have expired if ms.first > ms.last { return ms.last + 1, nil } r := ms.sqlStore.preparedStmts[sqlGetSequenceFromTimestamp].QueryRow(ms.channelID, timestamp) seq := uint64(0) err := r.Scan(&seq) if err == sql.ErrNoRows { return ms.last + 1, nil } if err != nil { return 0, sqlStmtError(sqlGetSequenceFromTimestamp, err) } return seq, nil }
[ "func", "(", "ms", "*", "SQLMsgStore", ")", "GetSequenceFromTimestamp", "(", "timestamp", "int64", ")", "(", "uint64", ",", "error", ")", "{", "ms", ".", "Lock", "(", ")", "\n", "defer", "ms", ".", "Unlock", "(", ")", "\n", "// No message ever stored", "if", "ms", ".", "first", "==", "0", "{", "return", "0", ",", "nil", "\n", "}", "\n", "// All messages have expired", "if", "ms", ".", "first", ">", "ms", ".", "last", "{", "return", "ms", ".", "last", "+", "1", ",", "nil", "\n", "}", "\n", "r", ":=", "ms", ".", "sqlStore", ".", "preparedStmts", "[", "sqlGetSequenceFromTimestamp", "]", ".", "QueryRow", "(", "ms", ".", "channelID", ",", "timestamp", ")", "\n", "seq", ":=", "uint64", "(", "0", ")", "\n", "err", ":=", "r", ".", "Scan", "(", "&", "seq", ")", "\n", "if", "err", "==", "sql", ".", "ErrNoRows", "{", "return", "ms", ".", "last", "+", "1", ",", "nil", "\n", "}", "\n", "if", "err", "!=", "nil", "{", "return", "0", ",", "sqlStmtError", "(", "sqlGetSequenceFromTimestamp", ",", "err", ")", "\n", "}", "\n", "return", "seq", ",", "nil", "\n", "}" ]
// GetSequenceFromTimestamp implements the MsgStore interface
[ "GetSequenceFromTimestamp", "implements", "the", "MsgStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1481-L1502
train
nats-io/nats-streaming-server
stores/sqlstore.go
LastMsg
func (ms *SQLMsgStore) LastMsg() (*pb.MsgProto, error) { ms.Lock() msg, err := ms.lookup(ms.last) ms.Unlock() return msg, err }
go
func (ms *SQLMsgStore) LastMsg() (*pb.MsgProto, error) { ms.Lock() msg, err := ms.lookup(ms.last) ms.Unlock() return msg, err }
[ "func", "(", "ms", "*", "SQLMsgStore", ")", "LastMsg", "(", ")", "(", "*", "pb", ".", "MsgProto", ",", "error", ")", "{", "ms", ".", "Lock", "(", ")", "\n", "msg", ",", "err", ":=", "ms", ".", "lookup", "(", "ms", ".", "last", ")", "\n", "ms", ".", "Unlock", "(", ")", "\n", "return", "msg", ",", "err", "\n", "}" ]
// LastMsg implements the MsgStore interface
[ "LastMsg", "implements", "the", "MsgStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1513-L1518
train
nats-io/nats-streaming-server
stores/sqlstore.go
expireMsgs
func (ms *SQLMsgStore) expireMsgs() { ms.Lock() defer ms.Unlock() if ms.closed { ms.wg.Done() return } var ( count int maxSeq uint64 totalSize uint64 timestamp int64 ) processErr := func(errCode int, err error) { ms.log.Errorf("Unable to perform expiration for channel %q: %v", ms.subject, sqlStmtError(errCode, err)) ms.expireTimer.Reset(sqlExpirationIntervalOnError) } for { expiredTimestamp := time.Now().UnixNano() - int64(ms.limits.MaxAge) r := ms.sqlStore.preparedStmts[sqlGetExpiredMessages].QueryRow(ms.channelID, expiredTimestamp) if err := r.Scan(&count, &maxSeq, &totalSize); err != nil { processErr(sqlGetExpiredMessages, err) return } // It could be that messages that should have expired have been // removed due to count/size limit. We still need to adjust the // expiration timer based on the first message that need to expire. if count > 0 { if maxSeq == ms.last { if _, err := ms.sqlStore.preparedStmts[sqlUpdateChannelMaxSeq].Exec(maxSeq, ms.channelID); err != nil { processErr(sqlUpdateChannelMaxSeq, err) return } } if _, err := ms.sqlStore.preparedStmts[sqlDeletedMsgsWithSeqLowerThan].Exec(ms.channelID, maxSeq); err != nil { processErr(sqlDeletedMsgsWithSeqLowerThan, err) return } ms.first = maxSeq + 1 ms.totalCount -= count ms.totalBytes -= totalSize } // Reset since we are in a loop timestamp = 0 // If there is any message left in the channel, find out what the expiration // timer needs to be set to. if ms.totalCount > 0 { r = ms.sqlStore.preparedStmts[sqlGetFirstMsgTimestamp].QueryRow(ms.channelID, ms.first) if err := r.Scan(&timestamp); err != nil { processErr(sqlGetFirstMsgTimestamp, err) return } } // No message left or no message to expire. The timer will be recreated when // a new message is added to the channel. if timestamp == 0 { ms.wg.Done() ms.expireTimer = nil return } elapsed := time.Duration(time.Now().UnixNano() - timestamp) if elapsed < ms.limits.MaxAge { ms.expireTimer.Reset(ms.limits.MaxAge - elapsed) // Done with the for loop return } } }
go
func (ms *SQLMsgStore) expireMsgs() { ms.Lock() defer ms.Unlock() if ms.closed { ms.wg.Done() return } var ( count int maxSeq uint64 totalSize uint64 timestamp int64 ) processErr := func(errCode int, err error) { ms.log.Errorf("Unable to perform expiration for channel %q: %v", ms.subject, sqlStmtError(errCode, err)) ms.expireTimer.Reset(sqlExpirationIntervalOnError) } for { expiredTimestamp := time.Now().UnixNano() - int64(ms.limits.MaxAge) r := ms.sqlStore.preparedStmts[sqlGetExpiredMessages].QueryRow(ms.channelID, expiredTimestamp) if err := r.Scan(&count, &maxSeq, &totalSize); err != nil { processErr(sqlGetExpiredMessages, err) return } // It could be that messages that should have expired have been // removed due to count/size limit. We still need to adjust the // expiration timer based on the first message that need to expire. if count > 0 { if maxSeq == ms.last { if _, err := ms.sqlStore.preparedStmts[sqlUpdateChannelMaxSeq].Exec(maxSeq, ms.channelID); err != nil { processErr(sqlUpdateChannelMaxSeq, err) return } } if _, err := ms.sqlStore.preparedStmts[sqlDeletedMsgsWithSeqLowerThan].Exec(ms.channelID, maxSeq); err != nil { processErr(sqlDeletedMsgsWithSeqLowerThan, err) return } ms.first = maxSeq + 1 ms.totalCount -= count ms.totalBytes -= totalSize } // Reset since we are in a loop timestamp = 0 // If there is any message left in the channel, find out what the expiration // timer needs to be set to. if ms.totalCount > 0 { r = ms.sqlStore.preparedStmts[sqlGetFirstMsgTimestamp].QueryRow(ms.channelID, ms.first) if err := r.Scan(&timestamp); err != nil { processErr(sqlGetFirstMsgTimestamp, err) return } } // No message left or no message to expire. The timer will be recreated when // a new message is added to the channel. if timestamp == 0 { ms.wg.Done() ms.expireTimer = nil return } elapsed := time.Duration(time.Now().UnixNano() - timestamp) if elapsed < ms.limits.MaxAge { ms.expireTimer.Reset(ms.limits.MaxAge - elapsed) // Done with the for loop return } } }
[ "func", "(", "ms", "*", "SQLMsgStore", ")", "expireMsgs", "(", ")", "{", "ms", ".", "Lock", "(", ")", "\n", "defer", "ms", ".", "Unlock", "(", ")", "\n\n", "if", "ms", ".", "closed", "{", "ms", ".", "wg", ".", "Done", "(", ")", "\n", "return", "\n", "}", "\n\n", "var", "(", "count", "int", "\n", "maxSeq", "uint64", "\n", "totalSize", "uint64", "\n", "timestamp", "int64", "\n", ")", "\n", "processErr", ":=", "func", "(", "errCode", "int", ",", "err", "error", ")", "{", "ms", ".", "log", ".", "Errorf", "(", "\"", "\"", ",", "ms", ".", "subject", ",", "sqlStmtError", "(", "errCode", ",", "err", ")", ")", "\n", "ms", ".", "expireTimer", ".", "Reset", "(", "sqlExpirationIntervalOnError", ")", "\n", "}", "\n", "for", "{", "expiredTimestamp", ":=", "time", ".", "Now", "(", ")", ".", "UnixNano", "(", ")", "-", "int64", "(", "ms", ".", "limits", ".", "MaxAge", ")", "\n", "r", ":=", "ms", ".", "sqlStore", ".", "preparedStmts", "[", "sqlGetExpiredMessages", "]", ".", "QueryRow", "(", "ms", ".", "channelID", ",", "expiredTimestamp", ")", "\n", "if", "err", ":=", "r", ".", "Scan", "(", "&", "count", ",", "&", "maxSeq", ",", "&", "totalSize", ")", ";", "err", "!=", "nil", "{", "processErr", "(", "sqlGetExpiredMessages", ",", "err", ")", "\n", "return", "\n", "}", "\n", "// It could be that messages that should have expired have been", "// removed due to count/size limit. We still need to adjust the", "// expiration timer based on the first message that need to expire.", "if", "count", ">", "0", "{", "if", "maxSeq", "==", "ms", ".", "last", "{", "if", "_", ",", "err", ":=", "ms", ".", "sqlStore", ".", "preparedStmts", "[", "sqlUpdateChannelMaxSeq", "]", ".", "Exec", "(", "maxSeq", ",", "ms", ".", "channelID", ")", ";", "err", "!=", "nil", "{", "processErr", "(", "sqlUpdateChannelMaxSeq", ",", "err", ")", "\n", "return", "\n", "}", "\n", "}", "\n", "if", "_", ",", "err", ":=", "ms", ".", "sqlStore", ".", "preparedStmts", "[", "sqlDeletedMsgsWithSeqLowerThan", "]", ".", "Exec", "(", "ms", ".", "channelID", ",", "maxSeq", ")", ";", "err", "!=", "nil", "{", "processErr", "(", "sqlDeletedMsgsWithSeqLowerThan", ",", "err", ")", "\n", "return", "\n", "}", "\n", "ms", ".", "first", "=", "maxSeq", "+", "1", "\n", "ms", ".", "totalCount", "-=", "count", "\n", "ms", ".", "totalBytes", "-=", "totalSize", "\n", "}", "\n", "// Reset since we are in a loop", "timestamp", "=", "0", "\n", "// If there is any message left in the channel, find out what the expiration", "// timer needs to be set to.", "if", "ms", ".", "totalCount", ">", "0", "{", "r", "=", "ms", ".", "sqlStore", ".", "preparedStmts", "[", "sqlGetFirstMsgTimestamp", "]", ".", "QueryRow", "(", "ms", ".", "channelID", ",", "ms", ".", "first", ")", "\n", "if", "err", ":=", "r", ".", "Scan", "(", "&", "timestamp", ")", ";", "err", "!=", "nil", "{", "processErr", "(", "sqlGetFirstMsgTimestamp", ",", "err", ")", "\n", "return", "\n", "}", "\n", "}", "\n", "// No message left or no message to expire. The timer will be recreated when", "// a new message is added to the channel.", "if", "timestamp", "==", "0", "{", "ms", ".", "wg", ".", "Done", "(", ")", "\n", "ms", ".", "expireTimer", "=", "nil", "\n", "return", "\n", "}", "\n", "elapsed", ":=", "time", ".", "Duration", "(", "time", ".", "Now", "(", ")", ".", "UnixNano", "(", ")", "-", "timestamp", ")", "\n", "if", "elapsed", "<", "ms", ".", "limits", ".", "MaxAge", "{", "ms", ".", "expireTimer", ".", "Reset", "(", "ms", ".", "limits", ".", "MaxAge", "-", "elapsed", ")", "\n", "// Done with the for loop", "return", "\n", "}", "\n", "}", "\n", "}" ]
// expireMsgsLocked removes all messages that have expired in this channel. // Store lock is assumed held on entry
[ "expireMsgsLocked", "removes", "all", "messages", "that", "have", "expired", "in", "this", "channel", ".", "Store", "lock", "is", "assumed", "held", "on", "entry" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1522-L1591
train
nats-io/nats-streaming-server
stores/sqlstore.go
Flush
func (ms *SQLMsgStore) Flush() error { ms.Lock() err := ms.flush() ms.Unlock() return err }
go
func (ms *SQLMsgStore) Flush() error { ms.Lock() err := ms.flush() ms.Unlock() return err }
[ "func", "(", "ms", "*", "SQLMsgStore", ")", "Flush", "(", ")", "error", "{", "ms", ".", "Lock", "(", ")", "\n", "err", ":=", "ms", ".", "flush", "(", ")", "\n", "ms", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// Flush implements the MsgStore interface
[ "Flush", "implements", "the", "MsgStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1674-L1679
train
nats-io/nats-streaming-server
stores/sqlstore.go
UpdateSub
func (ss *SQLSubStore) UpdateSub(sub *spb.SubState) error { ss.Lock() defer ss.Unlock() subBytes, _ := sub.Marshal() r, err := ss.sqlStore.preparedStmts[sqlUpdateSub].Exec(subBytes, ss.channelID, sub.ID) if err != nil { return sqlStmtError(sqlUpdateSub, err) } // FileSubStoe supports updating a subscription for which there was no CreateSub. // Not sure if this is necessary, since I think server would never do that. // Stay consistent. c, err := r.RowsAffected() if err != nil { return err } if c == 0 { if _, err := ss.sqlStore.preparedStmts[sqlCreateSub].Exec(ss.channelID, sub.ID, subBytes); err != nil { return sqlStmtError(sqlCreateSub, err) } } return nil }
go
func (ss *SQLSubStore) UpdateSub(sub *spb.SubState) error { ss.Lock() defer ss.Unlock() subBytes, _ := sub.Marshal() r, err := ss.sqlStore.preparedStmts[sqlUpdateSub].Exec(subBytes, ss.channelID, sub.ID) if err != nil { return sqlStmtError(sqlUpdateSub, err) } // FileSubStoe supports updating a subscription for which there was no CreateSub. // Not sure if this is necessary, since I think server would never do that. // Stay consistent. c, err := r.RowsAffected() if err != nil { return err } if c == 0 { if _, err := ss.sqlStore.preparedStmts[sqlCreateSub].Exec(ss.channelID, sub.ID, subBytes); err != nil { return sqlStmtError(sqlCreateSub, err) } } return nil }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "UpdateSub", "(", "sub", "*", "spb", ".", "SubState", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "defer", "ss", ".", "Unlock", "(", ")", "\n", "subBytes", ",", "_", ":=", "sub", ".", "Marshal", "(", ")", "\n", "r", ",", "err", ":=", "ss", ".", "sqlStore", ".", "preparedStmts", "[", "sqlUpdateSub", "]", ".", "Exec", "(", "subBytes", ",", "ss", ".", "channelID", ",", "sub", ".", "ID", ")", "\n", "if", "err", "!=", "nil", "{", "return", "sqlStmtError", "(", "sqlUpdateSub", ",", "err", ")", "\n", "}", "\n", "// FileSubStoe supports updating a subscription for which there was no CreateSub.", "// Not sure if this is necessary, since I think server would never do that.", "// Stay consistent.", "c", ",", "err", ":=", "r", ".", "RowsAffected", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "c", "==", "0", "{", "if", "_", ",", "err", ":=", "ss", ".", "sqlStore", ".", "preparedStmts", "[", "sqlCreateSub", "]", ".", "Exec", "(", "ss", ".", "channelID", ",", "sub", ".", "ID", ",", "subBytes", ")", ";", "err", "!=", "nil", "{", "return", "sqlStmtError", "(", "sqlCreateSub", ",", "err", ")", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// UpdateSub implements the SubStore interface
[ "UpdateSub", "implements", "the", "SubStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1737-L1758
train
nats-io/nats-streaming-server
stores/sqlstore.go
DeleteSub
func (ss *SQLSubStore) DeleteSub(subid uint64) error { ss.Lock() defer ss.Unlock() if subid == atomic.LoadUint64(ss.maxSubID) { if _, err := ss.sqlStore.preparedStmts[sqlMarkSubscriptionAsDeleted].Exec(ss.channelID, subid); err != nil { return sqlStmtError(sqlMarkSubscriptionAsDeleted, err) } ss.hasMarkedAsDel = true } else { if _, err := ss.sqlStore.preparedStmts[sqlDeleteSubscription].Exec(ss.channelID, subid); err != nil { return sqlStmtError(sqlDeleteSubscription, err) } } if ss.cache != nil { delete(ss.cache.subs, subid) } else { delete(ss.subLastSent, subid) } // Ignore error on this since subscription would not be recovered // if above executed ok. ss.sqlStore.preparedStmts[sqlDeleteSubPendingMessages].Exec(subid) return nil }
go
func (ss *SQLSubStore) DeleteSub(subid uint64) error { ss.Lock() defer ss.Unlock() if subid == atomic.LoadUint64(ss.maxSubID) { if _, err := ss.sqlStore.preparedStmts[sqlMarkSubscriptionAsDeleted].Exec(ss.channelID, subid); err != nil { return sqlStmtError(sqlMarkSubscriptionAsDeleted, err) } ss.hasMarkedAsDel = true } else { if _, err := ss.sqlStore.preparedStmts[sqlDeleteSubscription].Exec(ss.channelID, subid); err != nil { return sqlStmtError(sqlDeleteSubscription, err) } } if ss.cache != nil { delete(ss.cache.subs, subid) } else { delete(ss.subLastSent, subid) } // Ignore error on this since subscription would not be recovered // if above executed ok. ss.sqlStore.preparedStmts[sqlDeleteSubPendingMessages].Exec(subid) return nil }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "DeleteSub", "(", "subid", "uint64", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "defer", "ss", ".", "Unlock", "(", ")", "\n", "if", "subid", "==", "atomic", ".", "LoadUint64", "(", "ss", ".", "maxSubID", ")", "{", "if", "_", ",", "err", ":=", "ss", ".", "sqlStore", ".", "preparedStmts", "[", "sqlMarkSubscriptionAsDeleted", "]", ".", "Exec", "(", "ss", ".", "channelID", ",", "subid", ")", ";", "err", "!=", "nil", "{", "return", "sqlStmtError", "(", "sqlMarkSubscriptionAsDeleted", ",", "err", ")", "\n", "}", "\n", "ss", ".", "hasMarkedAsDel", "=", "true", "\n", "}", "else", "{", "if", "_", ",", "err", ":=", "ss", ".", "sqlStore", ".", "preparedStmts", "[", "sqlDeleteSubscription", "]", ".", "Exec", "(", "ss", ".", "channelID", ",", "subid", ")", ";", "err", "!=", "nil", "{", "return", "sqlStmtError", "(", "sqlDeleteSubscription", ",", "err", ")", "\n", "}", "\n", "}", "\n", "if", "ss", ".", "cache", "!=", "nil", "{", "delete", "(", "ss", ".", "cache", ".", "subs", ",", "subid", ")", "\n", "}", "else", "{", "delete", "(", "ss", ".", "subLastSent", ",", "subid", ")", "\n", "}", "\n", "// Ignore error on this since subscription would not be recovered", "// if above executed ok.", "ss", ".", "sqlStore", ".", "preparedStmts", "[", "sqlDeleteSubPendingMessages", "]", ".", "Exec", "(", "subid", ")", "\n", "return", "nil", "\n", "}" ]
// DeleteSub implements the SubStore interface
[ "DeleteSub", "implements", "the", "SubStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1761-L1783
train
nats-io/nats-streaming-server
stores/sqlstore.go
getOrCreateAcksPending
func (ss *SQLSubStore) getOrCreateAcksPending(subid, seqno uint64) *sqlSubAcksPending { if !ss.cache.needsFlush { ss.cache.needsFlush = true ss.sqlStore.scheduleSubStoreFlush(ss) } ap := ss.cache.subs[subid] if ap == nil { ap = &sqlSubAcksPending{ msgToRow: make(map[uint64]*sqlSubsPendingRow), ackToRow: make(map[uint64]*sqlSubsPendingRow), msgs: make(map[uint64]struct{}), acks: make(map[uint64]struct{}), } ss.cache.subs[subid] = ap } if seqno > ap.lastSent { ap.lastSent = seqno } return ap }
go
func (ss *SQLSubStore) getOrCreateAcksPending(subid, seqno uint64) *sqlSubAcksPending { if !ss.cache.needsFlush { ss.cache.needsFlush = true ss.sqlStore.scheduleSubStoreFlush(ss) } ap := ss.cache.subs[subid] if ap == nil { ap = &sqlSubAcksPending{ msgToRow: make(map[uint64]*sqlSubsPendingRow), ackToRow: make(map[uint64]*sqlSubsPendingRow), msgs: make(map[uint64]struct{}), acks: make(map[uint64]struct{}), } ss.cache.subs[subid] = ap } if seqno > ap.lastSent { ap.lastSent = seqno } return ap }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "getOrCreateAcksPending", "(", "subid", ",", "seqno", "uint64", ")", "*", "sqlSubAcksPending", "{", "if", "!", "ss", ".", "cache", ".", "needsFlush", "{", "ss", ".", "cache", ".", "needsFlush", "=", "true", "\n", "ss", ".", "sqlStore", ".", "scheduleSubStoreFlush", "(", "ss", ")", "\n", "}", "\n", "ap", ":=", "ss", ".", "cache", ".", "subs", "[", "subid", "]", "\n", "if", "ap", "==", "nil", "{", "ap", "=", "&", "sqlSubAcksPending", "{", "msgToRow", ":", "make", "(", "map", "[", "uint64", "]", "*", "sqlSubsPendingRow", ")", ",", "ackToRow", ":", "make", "(", "map", "[", "uint64", "]", "*", "sqlSubsPendingRow", ")", ",", "msgs", ":", "make", "(", "map", "[", "uint64", "]", "struct", "{", "}", ")", ",", "acks", ":", "make", "(", "map", "[", "uint64", "]", "struct", "{", "}", ")", ",", "}", "\n", "ss", ".", "cache", ".", "subs", "[", "subid", "]", "=", "ap", "\n", "}", "\n", "if", "seqno", ">", "ap", ".", "lastSent", "{", "ap", ".", "lastSent", "=", "seqno", "\n", "}", "\n", "return", "ap", "\n", "}" ]
// This returns the structure responsible to keep track of // pending messages and acks for a given subscription ID.
[ "This", "returns", "the", "structure", "responsible", "to", "keep", "track", "of", "pending", "messages", "and", "acks", "for", "a", "given", "subscription", "ID", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1787-L1806
train
nats-io/nats-streaming-server
stores/sqlstore.go
addSeq
func (ss *SQLSubStore) addSeq(subid, seqno uint64) bool { ap := ss.getOrCreateAcksPending(subid, seqno) ap.msgs[seqno] = struct{}{} return len(ap.msgs) >= sqlMaxPendingAcks }
go
func (ss *SQLSubStore) addSeq(subid, seqno uint64) bool { ap := ss.getOrCreateAcksPending(subid, seqno) ap.msgs[seqno] = struct{}{} return len(ap.msgs) >= sqlMaxPendingAcks }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "addSeq", "(", "subid", ",", "seqno", "uint64", ")", "bool", "{", "ap", ":=", "ss", ".", "getOrCreateAcksPending", "(", "subid", ",", "seqno", ")", "\n", "ap", ".", "msgs", "[", "seqno", "]", "=", "struct", "{", "}", "{", "}", "\n", "return", "len", "(", "ap", ".", "msgs", ")", ">=", "sqlMaxPendingAcks", "\n", "}" ]
// Adds the given sequence to the list of pending messages. // Returns true if the number of pending messages has // reached a certain threshold, indicating that the // store should be flushed.
[ "Adds", "the", "given", "sequence", "to", "the", "list", "of", "pending", "messages", ".", "Returns", "true", "if", "the", "number", "of", "pending", "messages", "has", "reached", "a", "certain", "threshold", "indicating", "that", "the", "store", "should", "be", "flushed", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1812-L1816
train
nats-io/nats-streaming-server
stores/sqlstore.go
ackSeq
func (ss *SQLSubStore) ackSeq(subid, seqno uint64) (bool, error) { ap := ss.getOrCreateAcksPending(subid, seqno) // If still in cache and not persisted into a row, // then simply remove from map and do not persist the ack. if _, exists := ap.msgs[seqno]; exists { delete(ap.msgs, seqno) } else if row := ap.msgToRow[seqno]; row != nil { ap.acks[seqno] = struct{}{} // This is an ack for a pending msg that was persisted // in a row. Update the row's msgRef count. delete(ap.msgToRow, seqno) row.msgsRefs-- // If all pending messages in that row have been ack'ed if row.msgsRefs == 0 { // and if all acks on that row are no longer needed // (or there was none) if row.acksRefs == 0 { // then this row can be deleted. if err := ss.deleteSubPendingRow(subid, row.ID); err != nil { return false, err } // If there is no error, we don't even need // to persist this ack. delete(ap.acks, seqno) } // Since there is no pending message left in this // row, let's find all the corresponding acks' rows // for these sequences and update their acksRefs for seq := range row.msgs { delete(row.msgs, seq) ackRow := ap.ackToRow[seq] if ackRow != nil { // We found the row for the ack of this sequence, // remove from map and update reference count. // delete(ap.ackToRow, seq) ackRow.acksRefs-- // If all acks for that row are no longer needed and // that row has also no pending messages, then ok to // delete. if ackRow.acksRefs == 0 && ackRow.msgsRefs == 0 { if err := ss.deleteSubPendingRow(subid, ackRow.ID); err != nil { return false, err } } } else { // That means the ack is in current cache so we won't // need to persist it. delete(ap.acks, seq) } } sqlSeqMapPool.Put(row.msgs) row.msgs = nil } } return len(ap.acks) >= sqlMaxPendingAcks, nil }
go
func (ss *SQLSubStore) ackSeq(subid, seqno uint64) (bool, error) { ap := ss.getOrCreateAcksPending(subid, seqno) // If still in cache and not persisted into a row, // then simply remove from map and do not persist the ack. if _, exists := ap.msgs[seqno]; exists { delete(ap.msgs, seqno) } else if row := ap.msgToRow[seqno]; row != nil { ap.acks[seqno] = struct{}{} // This is an ack for a pending msg that was persisted // in a row. Update the row's msgRef count. delete(ap.msgToRow, seqno) row.msgsRefs-- // If all pending messages in that row have been ack'ed if row.msgsRefs == 0 { // and if all acks on that row are no longer needed // (or there was none) if row.acksRefs == 0 { // then this row can be deleted. if err := ss.deleteSubPendingRow(subid, row.ID); err != nil { return false, err } // If there is no error, we don't even need // to persist this ack. delete(ap.acks, seqno) } // Since there is no pending message left in this // row, let's find all the corresponding acks' rows // for these sequences and update their acksRefs for seq := range row.msgs { delete(row.msgs, seq) ackRow := ap.ackToRow[seq] if ackRow != nil { // We found the row for the ack of this sequence, // remove from map and update reference count. // delete(ap.ackToRow, seq) ackRow.acksRefs-- // If all acks for that row are no longer needed and // that row has also no pending messages, then ok to // delete. if ackRow.acksRefs == 0 && ackRow.msgsRefs == 0 { if err := ss.deleteSubPendingRow(subid, ackRow.ID); err != nil { return false, err } } } else { // That means the ack is in current cache so we won't // need to persist it. delete(ap.acks, seq) } } sqlSeqMapPool.Put(row.msgs) row.msgs = nil } } return len(ap.acks) >= sqlMaxPendingAcks, nil }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "ackSeq", "(", "subid", ",", "seqno", "uint64", ")", "(", "bool", ",", "error", ")", "{", "ap", ":=", "ss", ".", "getOrCreateAcksPending", "(", "subid", ",", "seqno", ")", "\n", "// If still in cache and not persisted into a row,", "// then simply remove from map and do not persist the ack.", "if", "_", ",", "exists", ":=", "ap", ".", "msgs", "[", "seqno", "]", ";", "exists", "{", "delete", "(", "ap", ".", "msgs", ",", "seqno", ")", "\n", "}", "else", "if", "row", ":=", "ap", ".", "msgToRow", "[", "seqno", "]", ";", "row", "!=", "nil", "{", "ap", ".", "acks", "[", "seqno", "]", "=", "struct", "{", "}", "{", "}", "\n", "// This is an ack for a pending msg that was persisted", "// in a row. Update the row's msgRef count.", "delete", "(", "ap", ".", "msgToRow", ",", "seqno", ")", "\n", "row", ".", "msgsRefs", "--", "\n", "// If all pending messages in that row have been ack'ed", "if", "row", ".", "msgsRefs", "==", "0", "{", "// and if all acks on that row are no longer needed", "// (or there was none)", "if", "row", ".", "acksRefs", "==", "0", "{", "// then this row can be deleted.", "if", "err", ":=", "ss", ".", "deleteSubPendingRow", "(", "subid", ",", "row", ".", "ID", ")", ";", "err", "!=", "nil", "{", "return", "false", ",", "err", "\n", "}", "\n", "// If there is no error, we don't even need", "// to persist this ack.", "delete", "(", "ap", ".", "acks", ",", "seqno", ")", "\n", "}", "\n", "// Since there is no pending message left in this", "// row, let's find all the corresponding acks' rows", "// for these sequences and update their acksRefs", "for", "seq", ":=", "range", "row", ".", "msgs", "{", "delete", "(", "row", ".", "msgs", ",", "seq", ")", "\n", "ackRow", ":=", "ap", ".", "ackToRow", "[", "seq", "]", "\n", "if", "ackRow", "!=", "nil", "{", "// We found the row for the ack of this sequence,", "// remove from map and update reference count.", "// delete(ap.ackToRow, seq)", "ackRow", ".", "acksRefs", "--", "\n", "// If all acks for that row are no longer needed and", "// that row has also no pending messages, then ok to", "// delete.", "if", "ackRow", ".", "acksRefs", "==", "0", "&&", "ackRow", ".", "msgsRefs", "==", "0", "{", "if", "err", ":=", "ss", ".", "deleteSubPendingRow", "(", "subid", ",", "ackRow", ".", "ID", ")", ";", "err", "!=", "nil", "{", "return", "false", ",", "err", "\n", "}", "\n", "}", "\n", "}", "else", "{", "// That means the ack is in current cache so we won't", "// need to persist it.", "delete", "(", "ap", ".", "acks", ",", "seq", ")", "\n", "}", "\n", "}", "\n", "sqlSeqMapPool", ".", "Put", "(", "row", ".", "msgs", ")", "\n", "row", ".", "msgs", "=", "nil", "\n", "}", "\n", "}", "\n", "return", "len", "(", "ap", ".", "acks", ")", ">=", "sqlMaxPendingAcks", ",", "nil", "\n", "}" ]
// Adds the given sequence to the list of acks and possibly // delete rows that have all their pending messages acknowledged. // Returns true if the number of acks has reached a certain threshold, // indicating that the store should be flushed.
[ "Adds", "the", "given", "sequence", "to", "the", "list", "of", "acks", "and", "possibly", "delete", "rows", "that", "have", "all", "their", "pending", "messages", "acknowledged", ".", "Returns", "true", "if", "the", "number", "of", "acks", "has", "reached", "a", "certain", "threshold", "indicating", "that", "the", "store", "should", "be", "flushed", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1822-L1877
train
nats-io/nats-streaming-server
stores/sqlstore.go
AddSeqPending
func (ss *SQLSubStore) AddSeqPending(subid, seqno uint64) error { var err error ss.Lock() if !ss.closed { if ss.cache != nil { if isFull := ss.addSeq(subid, seqno); isFull { err = ss.flush() } } else { ls := ss.subLastSent[subid] if seqno > ls { ss.subLastSent[subid] = seqno } ss.curRow++ _, err = ss.sqlStore.preparedStmts[sqlSubAddPending].Exec(subid, ss.curRow, seqno) if err != nil { err = sqlStmtError(sqlSubAddPending, err) } } } ss.Unlock() return err }
go
func (ss *SQLSubStore) AddSeqPending(subid, seqno uint64) error { var err error ss.Lock() if !ss.closed { if ss.cache != nil { if isFull := ss.addSeq(subid, seqno); isFull { err = ss.flush() } } else { ls := ss.subLastSent[subid] if seqno > ls { ss.subLastSent[subid] = seqno } ss.curRow++ _, err = ss.sqlStore.preparedStmts[sqlSubAddPending].Exec(subid, ss.curRow, seqno) if err != nil { err = sqlStmtError(sqlSubAddPending, err) } } } ss.Unlock() return err }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "AddSeqPending", "(", "subid", ",", "seqno", "uint64", ")", "error", "{", "var", "err", "error", "\n", "ss", ".", "Lock", "(", ")", "\n", "if", "!", "ss", ".", "closed", "{", "if", "ss", ".", "cache", "!=", "nil", "{", "if", "isFull", ":=", "ss", ".", "addSeq", "(", "subid", ",", "seqno", ")", ";", "isFull", "{", "err", "=", "ss", ".", "flush", "(", ")", "\n", "}", "\n", "}", "else", "{", "ls", ":=", "ss", ".", "subLastSent", "[", "subid", "]", "\n", "if", "seqno", ">", "ls", "{", "ss", ".", "subLastSent", "[", "subid", "]", "=", "seqno", "\n", "}", "\n", "ss", ".", "curRow", "++", "\n", "_", ",", "err", "=", "ss", ".", "sqlStore", ".", "preparedStmts", "[", "sqlSubAddPending", "]", ".", "Exec", "(", "subid", ",", "ss", ".", "curRow", ",", "seqno", ")", "\n", "if", "err", "!=", "nil", "{", "err", "=", "sqlStmtError", "(", "sqlSubAddPending", ",", "err", ")", "\n", "}", "\n", "}", "\n", "}", "\n", "ss", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// AddSeqPending implements the SubStore interface
[ "AddSeqPending", "implements", "the", "SubStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1880-L1902
train
nats-io/nats-streaming-server
stores/sqlstore.go
AckSeqPending
func (ss *SQLSubStore) AckSeqPending(subid, seqno uint64) error { var err error ss.Lock() if !ss.closed { if ss.cache != nil { var isFull bool isFull, err = ss.ackSeq(subid, seqno) if err == nil && isFull { err = ss.flush() } } else { updateLastSent := false ls := ss.subLastSent[subid] if seqno >= ls { if seqno > ls { ss.subLastSent[subid] = seqno } updateLastSent = true } if updateLastSent { if _, err := ss.sqlStore.preparedStmts[sqlSubUpdateLastSent].Exec(seqno, ss.channelID, subid); err != nil { ss.Unlock() return sqlStmtError(sqlSubUpdateLastSent, err) } } _, err = ss.sqlStore.preparedStmts[sqlSubDeletePending].Exec(subid, seqno) if err != nil { err = sqlStmtError(sqlSubDeletePending, err) } } } ss.Unlock() return err }
go
func (ss *SQLSubStore) AckSeqPending(subid, seqno uint64) error { var err error ss.Lock() if !ss.closed { if ss.cache != nil { var isFull bool isFull, err = ss.ackSeq(subid, seqno) if err == nil && isFull { err = ss.flush() } } else { updateLastSent := false ls := ss.subLastSent[subid] if seqno >= ls { if seqno > ls { ss.subLastSent[subid] = seqno } updateLastSent = true } if updateLastSent { if _, err := ss.sqlStore.preparedStmts[sqlSubUpdateLastSent].Exec(seqno, ss.channelID, subid); err != nil { ss.Unlock() return sqlStmtError(sqlSubUpdateLastSent, err) } } _, err = ss.sqlStore.preparedStmts[sqlSubDeletePending].Exec(subid, seqno) if err != nil { err = sqlStmtError(sqlSubDeletePending, err) } } } ss.Unlock() return err }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "AckSeqPending", "(", "subid", ",", "seqno", "uint64", ")", "error", "{", "var", "err", "error", "\n", "ss", ".", "Lock", "(", ")", "\n", "if", "!", "ss", ".", "closed", "{", "if", "ss", ".", "cache", "!=", "nil", "{", "var", "isFull", "bool", "\n", "isFull", ",", "err", "=", "ss", ".", "ackSeq", "(", "subid", ",", "seqno", ")", "\n", "if", "err", "==", "nil", "&&", "isFull", "{", "err", "=", "ss", ".", "flush", "(", ")", "\n", "}", "\n", "}", "else", "{", "updateLastSent", ":=", "false", "\n", "ls", ":=", "ss", ".", "subLastSent", "[", "subid", "]", "\n", "if", "seqno", ">=", "ls", "{", "if", "seqno", ">", "ls", "{", "ss", ".", "subLastSent", "[", "subid", "]", "=", "seqno", "\n", "}", "\n", "updateLastSent", "=", "true", "\n", "}", "\n", "if", "updateLastSent", "{", "if", "_", ",", "err", ":=", "ss", ".", "sqlStore", ".", "preparedStmts", "[", "sqlSubUpdateLastSent", "]", ".", "Exec", "(", "seqno", ",", "ss", ".", "channelID", ",", "subid", ")", ";", "err", "!=", "nil", "{", "ss", ".", "Unlock", "(", ")", "\n", "return", "sqlStmtError", "(", "sqlSubUpdateLastSent", ",", "err", ")", "\n", "}", "\n", "}", "\n", "_", ",", "err", "=", "ss", ".", "sqlStore", ".", "preparedStmts", "[", "sqlSubDeletePending", "]", ".", "Exec", "(", "subid", ",", "seqno", ")", "\n", "if", "err", "!=", "nil", "{", "err", "=", "sqlStmtError", "(", "sqlSubDeletePending", ",", "err", ")", "\n", "}", "\n", "}", "\n", "}", "\n", "ss", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// AckSeqPending implements the SubStore interface
[ "AckSeqPending", "implements", "the", "SubStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L1905-L1938
train
nats-io/nats-streaming-server
stores/sqlstore.go
Flush
func (ss *SQLSubStore) Flush() error { ss.Lock() err := ss.flush() ss.Unlock() return err }
go
func (ss *SQLSubStore) Flush() error { ss.Lock() err := ss.flush() ss.Unlock() return err }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "Flush", "(", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "err", ":=", "ss", ".", "flush", "(", ")", "\n", "ss", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// Flush implements the SubStore interface
[ "Flush", "implements", "the", "SubStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L2015-L2020
train
nats-io/nats-streaming-server
stores/sqlstore.go
Close
func (ss *SQLSubStore) Close() error { ss.Lock() if ss.closed { ss.Unlock() return nil } // Flush before switching the state to closed. err := ss.flush() ss.closed = true ss.Unlock() return err }
go
func (ss *SQLSubStore) Close() error { ss.Lock() if ss.closed { ss.Unlock() return nil } // Flush before switching the state to closed. err := ss.flush() ss.closed = true ss.Unlock() return err }
[ "func", "(", "ss", "*", "SQLSubStore", ")", "Close", "(", ")", "error", "{", "ss", ".", "Lock", "(", ")", "\n", "if", "ss", ".", "closed", "{", "ss", ".", "Unlock", "(", ")", "\n", "return", "nil", "\n", "}", "\n", "// Flush before switching the state to closed.", "err", ":=", "ss", ".", "flush", "(", ")", "\n", "ss", ".", "closed", "=", "true", "\n", "ss", ".", "Unlock", "(", ")", "\n", "return", "err", "\n", "}" ]
// Close implements the SubStore interface
[ "Close", "implements", "the", "SubStore", "interface" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/sqlstore.go#L2136-L2147
train
nats-io/nats-streaming-server
util/channels.go
SendChannelsList
func SendChannelsList(channels []string, sendInbox, replyInbox string, nc *nats.Conn, serverID string) error { // Since the NATS message payload is limited, we need to repeat // requests if all channels can't fit in a request. maxPayload := int(nc.MaxPayload()) // Reuse this request object to send the (possibly many) protocol message(s). header := &spb.CtrlMsg{ ServerID: serverID, MsgType: spb.CtrlMsg_Partitioning, } // The Data field (a byte array) will require 1+len(array)+(encoded size of array). // To be conservative, let's just use a 8 bytes integer headerSize := header.Size() + 1 + 8 var ( bytes []byte // Reused buffer in which the request is to marshal info n int // Size of the serialized request in the above buffer count int // Number of channels added to the request ) for start := 0; start != len(channels); start += count { bytes, n, count = encodeChannelsRequest(header, channels, bytes, headerSize, maxPayload, start) if count == 0 { return errors.New("message payload too small to send channels list") } if err := nc.PublishRequest(sendInbox, replyInbox, bytes[:n]); err != nil { return err } } return nc.Flush() }
go
func SendChannelsList(channels []string, sendInbox, replyInbox string, nc *nats.Conn, serverID string) error { // Since the NATS message payload is limited, we need to repeat // requests if all channels can't fit in a request. maxPayload := int(nc.MaxPayload()) // Reuse this request object to send the (possibly many) protocol message(s). header := &spb.CtrlMsg{ ServerID: serverID, MsgType: spb.CtrlMsg_Partitioning, } // The Data field (a byte array) will require 1+len(array)+(encoded size of array). // To be conservative, let's just use a 8 bytes integer headerSize := header.Size() + 1 + 8 var ( bytes []byte // Reused buffer in which the request is to marshal info n int // Size of the serialized request in the above buffer count int // Number of channels added to the request ) for start := 0; start != len(channels); start += count { bytes, n, count = encodeChannelsRequest(header, channels, bytes, headerSize, maxPayload, start) if count == 0 { return errors.New("message payload too small to send channels list") } if err := nc.PublishRequest(sendInbox, replyInbox, bytes[:n]); err != nil { return err } } return nc.Flush() }
[ "func", "SendChannelsList", "(", "channels", "[", "]", "string", ",", "sendInbox", ",", "replyInbox", "string", ",", "nc", "*", "nats", ".", "Conn", ",", "serverID", "string", ")", "error", "{", "// Since the NATS message payload is limited, we need to repeat", "// requests if all channels can't fit in a request.", "maxPayload", ":=", "int", "(", "nc", ".", "MaxPayload", "(", ")", ")", "\n", "// Reuse this request object to send the (possibly many) protocol message(s).", "header", ":=", "&", "spb", ".", "CtrlMsg", "{", "ServerID", ":", "serverID", ",", "MsgType", ":", "spb", ".", "CtrlMsg_Partitioning", ",", "}", "\n", "// The Data field (a byte array) will require 1+len(array)+(encoded size of array).", "// To be conservative, let's just use a 8 bytes integer", "headerSize", ":=", "header", ".", "Size", "(", ")", "+", "1", "+", "8", "\n", "var", "(", "bytes", "[", "]", "byte", "// Reused buffer in which the request is to marshal info", "\n", "n", "int", "// Size of the serialized request in the above buffer", "\n", "count", "int", "// Number of channels added to the request", "\n", ")", "\n", "for", "start", ":=", "0", ";", "start", "!=", "len", "(", "channels", ")", ";", "start", "+=", "count", "{", "bytes", ",", "n", ",", "count", "=", "encodeChannelsRequest", "(", "header", ",", "channels", ",", "bytes", ",", "headerSize", ",", "maxPayload", ",", "start", ")", "\n", "if", "count", "==", "0", "{", "return", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n", "if", "err", ":=", "nc", ".", "PublishRequest", "(", "sendInbox", ",", "replyInbox", ",", "bytes", "[", ":", "n", "]", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "}", "\n", "return", "nc", ".", "Flush", "(", ")", "\n", "}" ]
// SendsChannelsList sends the list of channels to the given subject, possibly // splitting the list in several requests if it cannot fit in a single message.
[ "SendsChannelsList", "sends", "the", "list", "of", "channels", "to", "the", "given", "subject", "possibly", "splitting", "the", "list", "in", "several", "requests", "if", "it", "cannot", "fit", "in", "a", "single", "message", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/channels.go#L30-L57
train
nats-io/nats-streaming-server
util/channels.go
DecodeChannels
func DecodeChannels(data []byte) ([]string, error) { channels := []string{} pos := 0 for pos < len(data) { if pos+2 > len(data) { return nil, fmt.Errorf("unable to decode size, pos=%v len=%v", pos, len(data)) } cl := int(ByteOrder.Uint16(data[pos:])) pos += encodedChannelLen end := pos + cl if end > len(data) { return nil, fmt.Errorf("unable to decode channel, pos=%v len=%v max=%v (string=%v)", pos, cl, len(data), string(data[pos:])) } c := string(data[pos:end]) channels = append(channels, c) pos = end } return channels, nil }
go
func DecodeChannels(data []byte) ([]string, error) { channels := []string{} pos := 0 for pos < len(data) { if pos+2 > len(data) { return nil, fmt.Errorf("unable to decode size, pos=%v len=%v", pos, len(data)) } cl := int(ByteOrder.Uint16(data[pos:])) pos += encodedChannelLen end := pos + cl if end > len(data) { return nil, fmt.Errorf("unable to decode channel, pos=%v len=%v max=%v (string=%v)", pos, cl, len(data), string(data[pos:])) } c := string(data[pos:end]) channels = append(channels, c) pos = end } return channels, nil }
[ "func", "DecodeChannels", "(", "data", "[", "]", "byte", ")", "(", "[", "]", "string", ",", "error", ")", "{", "channels", ":=", "[", "]", "string", "{", "}", "\n", "pos", ":=", "0", "\n", "for", "pos", "<", "len", "(", "data", ")", "{", "if", "pos", "+", "2", ">", "len", "(", "data", ")", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "pos", ",", "len", "(", "data", ")", ")", "\n", "}", "\n", "cl", ":=", "int", "(", "ByteOrder", ".", "Uint16", "(", "data", "[", "pos", ":", "]", ")", ")", "\n", "pos", "+=", "encodedChannelLen", "\n", "end", ":=", "pos", "+", "cl", "\n", "if", "end", ">", "len", "(", "data", ")", "{", "return", "nil", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "pos", ",", "cl", ",", "len", "(", "data", ")", ",", "string", "(", "data", "[", "pos", ":", "]", ")", ")", "\n", "}", "\n", "c", ":=", "string", "(", "data", "[", "pos", ":", "end", "]", ")", "\n", "channels", "=", "append", "(", "channels", ",", "c", ")", "\n", "pos", "=", "end", "\n", "}", "\n", "return", "channels", ",", "nil", "\n", "}" ]
// DecodeChannels decodes from the given byte array the list of channel names // and return them as an array of strings.
[ "DecodeChannels", "decodes", "from", "the", "given", "byte", "array", "the", "list", "of", "channel", "names", "and", "return", "them", "as", "an", "array", "of", "strings", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/channels.go#L61-L80
train
nats-io/nats-streaming-server
server/ft.go
ftStart
func (s *StanServer) ftStart() (retErr error) { s.log.Noticef("Starting in standby mode") // For tests purposes if ftPauseBeforeFirstAttempt { <-ftPauseCh } print, _ := util.NewBackoffTimeCheck(time.Second, 2, time.Minute) for { select { case <-s.ftQuit: // we are done return nil case <-s.ftHBCh: // go back to the beginning of the for loop continue case <-time.After(s.ftHBMissedInterval): // try to lock the store } locked, err := s.ftGetStoreLock() if err != nil { // Log the error, but go back and wait for the next interval and // try again. It is possible that the error resolves (for instance // the connection to the database is restored - for SQL stores). s.log.Errorf("ft: error attempting to get the store lock: %v", err) continue } else if locked { break } // Here, we did not get the lock, print and go back to standby. // Use some backoff for the printing to not fill up the log if print.Ok() { s.log.Noticef("ft: unable to get store lock at this time, going back to standby") } } // Capture the time this server activated. It will be used in case several // servers claim to be active. Not bulletproof since there could be clock // differences, etc... but when more than one server has acquired the store // lock it means we are already in trouble, so just trying to minimize the // possible store corruption... activationTime := time.Now() s.log.Noticef("Server is active") s.startGoRoutine(func() { s.ftSendHBLoop(activationTime) }) // Start the recovery process, etc.. return s.start(FTActive) }
go
func (s *StanServer) ftStart() (retErr error) { s.log.Noticef("Starting in standby mode") // For tests purposes if ftPauseBeforeFirstAttempt { <-ftPauseCh } print, _ := util.NewBackoffTimeCheck(time.Second, 2, time.Minute) for { select { case <-s.ftQuit: // we are done return nil case <-s.ftHBCh: // go back to the beginning of the for loop continue case <-time.After(s.ftHBMissedInterval): // try to lock the store } locked, err := s.ftGetStoreLock() if err != nil { // Log the error, but go back and wait for the next interval and // try again. It is possible that the error resolves (for instance // the connection to the database is restored - for SQL stores). s.log.Errorf("ft: error attempting to get the store lock: %v", err) continue } else if locked { break } // Here, we did not get the lock, print and go back to standby. // Use some backoff for the printing to not fill up the log if print.Ok() { s.log.Noticef("ft: unable to get store lock at this time, going back to standby") } } // Capture the time this server activated. It will be used in case several // servers claim to be active. Not bulletproof since there could be clock // differences, etc... but when more than one server has acquired the store // lock it means we are already in trouble, so just trying to minimize the // possible store corruption... activationTime := time.Now() s.log.Noticef("Server is active") s.startGoRoutine(func() { s.ftSendHBLoop(activationTime) }) // Start the recovery process, etc.. return s.start(FTActive) }
[ "func", "(", "s", "*", "StanServer", ")", "ftStart", "(", ")", "(", "retErr", "error", ")", "{", "s", ".", "log", ".", "Noticef", "(", "\"", "\"", ")", "\n", "// For tests purposes", "if", "ftPauseBeforeFirstAttempt", "{", "<-", "ftPauseCh", "\n", "}", "\n", "print", ",", "_", ":=", "util", ".", "NewBackoffTimeCheck", "(", "time", ".", "Second", ",", "2", ",", "time", ".", "Minute", ")", "\n", "for", "{", "select", "{", "case", "<-", "s", ".", "ftQuit", ":", "// we are done", "return", "nil", "\n", "case", "<-", "s", ".", "ftHBCh", ":", "// go back to the beginning of the for loop", "continue", "\n", "case", "<-", "time", ".", "After", "(", "s", ".", "ftHBMissedInterval", ")", ":", "// try to lock the store", "}", "\n", "locked", ",", "err", ":=", "s", ".", "ftGetStoreLock", "(", ")", "\n", "if", "err", "!=", "nil", "{", "// Log the error, but go back and wait for the next interval and", "// try again. It is possible that the error resolves (for instance", "// the connection to the database is restored - for SQL stores).", "s", ".", "log", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "continue", "\n", "}", "else", "if", "locked", "{", "break", "\n", "}", "\n", "// Here, we did not get the lock, print and go back to standby.", "// Use some backoff for the printing to not fill up the log", "if", "print", ".", "Ok", "(", ")", "{", "s", ".", "log", ".", "Noticef", "(", "\"", "\"", ")", "\n", "}", "\n", "}", "\n", "// Capture the time this server activated. It will be used in case several", "// servers claim to be active. Not bulletproof since there could be clock", "// differences, etc... but when more than one server has acquired the store", "// lock it means we are already in trouble, so just trying to minimize the", "// possible store corruption...", "activationTime", ":=", "time", ".", "Now", "(", ")", "\n", "s", ".", "log", ".", "Noticef", "(", "\"", "\"", ")", "\n", "s", ".", "startGoRoutine", "(", "func", "(", ")", "{", "s", ".", "ftSendHBLoop", "(", "activationTime", ")", "\n", "}", ")", "\n", "// Start the recovery process, etc..", "return", "s", ".", "start", "(", "FTActive", ")", "\n", "}" ]
// ftStart will return only when this server has become active // and was able to get the store's exclusive lock. // This is running in a separate go-routine so if server state // changes, take care of using the server's lock.
[ "ftStart", "will", "return", "only", "when", "this", "server", "has", "become", "active", "and", "was", "able", "to", "get", "the", "store", "s", "exclusive", "lock", ".", "This", "is", "running", "in", "a", "separate", "go", "-", "routine", "so", "if", "server", "state", "changes", "take", "care", "of", "using", "the", "server", "s", "lock", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L56-L102
train
nats-io/nats-streaming-server
server/ft.go
ftGetStoreLock
func (s *StanServer) ftGetStoreLock() (bool, error) { // Normally, the store would be set early and is immutable, but some // FT tests do set a mock store after the server is created, so use // locking here to avoid race reports. s.mu.Lock() store := s.store s.mu.Unlock() if ok, err := store.GetExclusiveLock(); !ok || err != nil { // We got an error not related to locking (could be not supported, // permissions error, file not reachable, etc..) if err != nil { return false, fmt.Errorf("ft: fatal error getting the store lock: %v", err) } // If ok is false, it means that we did not get the lock. return false, nil } return true, nil }
go
func (s *StanServer) ftGetStoreLock() (bool, error) { // Normally, the store would be set early and is immutable, but some // FT tests do set a mock store after the server is created, so use // locking here to avoid race reports. s.mu.Lock() store := s.store s.mu.Unlock() if ok, err := store.GetExclusiveLock(); !ok || err != nil { // We got an error not related to locking (could be not supported, // permissions error, file not reachable, etc..) if err != nil { return false, fmt.Errorf("ft: fatal error getting the store lock: %v", err) } // If ok is false, it means that we did not get the lock. return false, nil } return true, nil }
[ "func", "(", "s", "*", "StanServer", ")", "ftGetStoreLock", "(", ")", "(", "bool", ",", "error", ")", "{", "// Normally, the store would be set early and is immutable, but some", "// FT tests do set a mock store after the server is created, so use", "// locking here to avoid race reports.", "s", ".", "mu", ".", "Lock", "(", ")", "\n", "store", ":=", "s", ".", "store", "\n", "s", ".", "mu", ".", "Unlock", "(", ")", "\n", "if", "ok", ",", "err", ":=", "store", ".", "GetExclusiveLock", "(", ")", ";", "!", "ok", "||", "err", "!=", "nil", "{", "// We got an error not related to locking (could be not supported,", "// permissions error, file not reachable, etc..)", "if", "err", "!=", "nil", "{", "return", "false", ",", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "// If ok is false, it means that we did not get the lock.", "return", "false", ",", "nil", "\n", "}", "\n", "return", "true", ",", "nil", "\n", "}" ]
// ftGetStoreLock returns true if the server was able to get the // exclusive store lock, false othewise, or if there was a fatal error doing so.
[ "ftGetStoreLock", "returns", "true", "if", "the", "server", "was", "able", "to", "get", "the", "exclusive", "store", "lock", "false", "othewise", "or", "if", "there", "was", "a", "fatal", "error", "doing", "so", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L106-L123
train
nats-io/nats-streaming-server
server/ft.go
ftSendHBLoop
func (s *StanServer) ftSendHBLoop(activationTime time.Time) { // Release the wait group on exit defer s.wg.Done() timeAsBytes, _ := activationTime.MarshalBinary() ftHB := &spb.CtrlMsg{ MsgType: spb.CtrlMsg_FTHeartbeat, ServerID: s.serverID, Data: timeAsBytes, } ftHBBytes, _ := ftHB.Marshal() print, _ := util.NewBackoffTimeCheck(time.Second, 2, time.Minute) for { if err := s.ftnc.Publish(s.ftSubject, ftHBBytes); err != nil { if print.Ok() { s.log.Errorf("Unable to send FT heartbeat: %v", err) } } startSelect: select { case m := <-s.ftHBCh: hb := spb.CtrlMsg{} if err := hb.Unmarshal(m.Data); err != nil { goto startSelect } // Ignore our own message if hb.MsgType != spb.CtrlMsg_FTHeartbeat || hb.ServerID == s.serverID { goto startSelect } // Another server claims to be active peerActivationTime := time.Time{} if err := peerActivationTime.UnmarshalBinary(hb.Data); err != nil { s.log.Errorf("Error decoding activation time: %v", err) } else { // Step down if the peer's activation time is earlier than ours. err := fmt.Errorf("ft: serverID %q claims to be active", hb.ServerID) if peerActivationTime.Before(activationTime) { err = fmt.Errorf("%s, aborting", err) if ftNoPanic { s.setLastError(err) return } panic(err) } else { s.log.Errorf(err.Error()) } } case <-time.After(s.ftHBInterval): // We'll send the ping at the top of the for loop case <-s.ftQuit: return } } }
go
func (s *StanServer) ftSendHBLoop(activationTime time.Time) { // Release the wait group on exit defer s.wg.Done() timeAsBytes, _ := activationTime.MarshalBinary() ftHB := &spb.CtrlMsg{ MsgType: spb.CtrlMsg_FTHeartbeat, ServerID: s.serverID, Data: timeAsBytes, } ftHBBytes, _ := ftHB.Marshal() print, _ := util.NewBackoffTimeCheck(time.Second, 2, time.Minute) for { if err := s.ftnc.Publish(s.ftSubject, ftHBBytes); err != nil { if print.Ok() { s.log.Errorf("Unable to send FT heartbeat: %v", err) } } startSelect: select { case m := <-s.ftHBCh: hb := spb.CtrlMsg{} if err := hb.Unmarshal(m.Data); err != nil { goto startSelect } // Ignore our own message if hb.MsgType != spb.CtrlMsg_FTHeartbeat || hb.ServerID == s.serverID { goto startSelect } // Another server claims to be active peerActivationTime := time.Time{} if err := peerActivationTime.UnmarshalBinary(hb.Data); err != nil { s.log.Errorf("Error decoding activation time: %v", err) } else { // Step down if the peer's activation time is earlier than ours. err := fmt.Errorf("ft: serverID %q claims to be active", hb.ServerID) if peerActivationTime.Before(activationTime) { err = fmt.Errorf("%s, aborting", err) if ftNoPanic { s.setLastError(err) return } panic(err) } else { s.log.Errorf(err.Error()) } } case <-time.After(s.ftHBInterval): // We'll send the ping at the top of the for loop case <-s.ftQuit: return } } }
[ "func", "(", "s", "*", "StanServer", ")", "ftSendHBLoop", "(", "activationTime", "time", ".", "Time", ")", "{", "// Release the wait group on exit", "defer", "s", ".", "wg", ".", "Done", "(", ")", "\n\n", "timeAsBytes", ",", "_", ":=", "activationTime", ".", "MarshalBinary", "(", ")", "\n", "ftHB", ":=", "&", "spb", ".", "CtrlMsg", "{", "MsgType", ":", "spb", ".", "CtrlMsg_FTHeartbeat", ",", "ServerID", ":", "s", ".", "serverID", ",", "Data", ":", "timeAsBytes", ",", "}", "\n", "ftHBBytes", ",", "_", ":=", "ftHB", ".", "Marshal", "(", ")", "\n", "print", ",", "_", ":=", "util", ".", "NewBackoffTimeCheck", "(", "time", ".", "Second", ",", "2", ",", "time", ".", "Minute", ")", "\n", "for", "{", "if", "err", ":=", "s", ".", "ftnc", ".", "Publish", "(", "s", ".", "ftSubject", ",", "ftHBBytes", ")", ";", "err", "!=", "nil", "{", "if", "print", ".", "Ok", "(", ")", "{", "s", ".", "log", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "}", "\n", "startSelect", ":", "select", "{", "case", "m", ":=", "<-", "s", ".", "ftHBCh", ":", "hb", ":=", "spb", ".", "CtrlMsg", "{", "}", "\n", "if", "err", ":=", "hb", ".", "Unmarshal", "(", "m", ".", "Data", ")", ";", "err", "!=", "nil", "{", "goto", "startSelect", "\n", "}", "\n", "// Ignore our own message", "if", "hb", ".", "MsgType", "!=", "spb", ".", "CtrlMsg_FTHeartbeat", "||", "hb", ".", "ServerID", "==", "s", ".", "serverID", "{", "goto", "startSelect", "\n", "}", "\n", "// Another server claims to be active", "peerActivationTime", ":=", "time", ".", "Time", "{", "}", "\n", "if", "err", ":=", "peerActivationTime", ".", "UnmarshalBinary", "(", "hb", ".", "Data", ")", ";", "err", "!=", "nil", "{", "s", ".", "log", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "else", "{", "// Step down if the peer's activation time is earlier than ours.", "err", ":=", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "hb", ".", "ServerID", ")", "\n", "if", "peerActivationTime", ".", "Before", "(", "activationTime", ")", "{", "err", "=", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "if", "ftNoPanic", "{", "s", ".", "setLastError", "(", "err", ")", "\n", "return", "\n", "}", "\n", "panic", "(", "err", ")", "\n", "}", "else", "{", "s", ".", "log", ".", "Errorf", "(", "err", ".", "Error", "(", ")", ")", "\n", "}", "\n", "}", "\n", "case", "<-", "time", ".", "After", "(", "s", ".", "ftHBInterval", ")", ":", "// We'll send the ping at the top of the for loop", "case", "<-", "s", ".", "ftQuit", ":", "return", "\n", "}", "\n", "}", "\n", "}" ]
// ftSendHBLoop is used by an active server to send HB to the FT subject. // Standby servers receiving those HBs do not attempt to lock the store. // When they miss HBs, they will.
[ "ftSendHBLoop", "is", "used", "by", "an", "active", "server", "to", "send", "HB", "to", "the", "FT", "subject", ".", "Standby", "servers", "receiving", "those", "HBs", "do", "not", "attempt", "to", "lock", "the", "store", ".", "When", "they", "miss", "HBs", "they", "will", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L128-L181
train
nats-io/nats-streaming-server
server/ft.go
ftSetup
func (s *StanServer) ftSetup() error { // Check that store type is ok. So far only support for FileStore if s.opts.StoreType != stores.TypeFile && s.opts.StoreType != stores.TypeSQL { return fmt.Errorf("ft: only %v or %v stores supported in FT mode", stores.TypeFile, stores.TypeSQL) } // So far, those are not exposed to users, just used in tests. // Still make sure that the missed HB interval is > than the HB // interval. if ftHBMissedInterval < time.Duration(float64(ftHBInterval)*1.1) { return fmt.Errorf("ft: the missed heartbeat interval needs to be"+ " at least 10%% of the heartbeat interval (hb=%v missed hb=%v", ftHBInterval, ftHBMissedInterval) } // Set the HB and MissedHB intervals, using a bit of randomness rand.Seed(time.Now().UnixNano()) s.ftHBInterval = ftGetRandomInterval(ftHBInterval) s.ftHBMissedInterval = ftGetRandomInterval(ftHBMissedInterval) // Subscribe to FT subject s.ftSubject = fmt.Sprintf("%s.%s.%s", ftHBPrefix, s.opts.ID, s.opts.FTGroupName) s.ftHBCh = make(chan *nats.Msg) sub, err := s.ftnc.Subscribe(s.ftSubject, func(m *nats.Msg) { // Dropping incoming FT HBs is not crucial, we will then check for // store lock. select { case s.ftHBCh <- m: default: } }) if err != nil { return fmt.Errorf("ft: unable to subscribe on ft subject: %v", err) } // We don't want to cause possible slow consumer error sub.SetPendingLimits(-1, -1) // Create channel to notify FT go routine to quit. s.ftQuit = make(chan struct{}, 1) // Set the state as standby initially s.state = FTStandby return nil }
go
func (s *StanServer) ftSetup() error { // Check that store type is ok. So far only support for FileStore if s.opts.StoreType != stores.TypeFile && s.opts.StoreType != stores.TypeSQL { return fmt.Errorf("ft: only %v or %v stores supported in FT mode", stores.TypeFile, stores.TypeSQL) } // So far, those are not exposed to users, just used in tests. // Still make sure that the missed HB interval is > than the HB // interval. if ftHBMissedInterval < time.Duration(float64(ftHBInterval)*1.1) { return fmt.Errorf("ft: the missed heartbeat interval needs to be"+ " at least 10%% of the heartbeat interval (hb=%v missed hb=%v", ftHBInterval, ftHBMissedInterval) } // Set the HB and MissedHB intervals, using a bit of randomness rand.Seed(time.Now().UnixNano()) s.ftHBInterval = ftGetRandomInterval(ftHBInterval) s.ftHBMissedInterval = ftGetRandomInterval(ftHBMissedInterval) // Subscribe to FT subject s.ftSubject = fmt.Sprintf("%s.%s.%s", ftHBPrefix, s.opts.ID, s.opts.FTGroupName) s.ftHBCh = make(chan *nats.Msg) sub, err := s.ftnc.Subscribe(s.ftSubject, func(m *nats.Msg) { // Dropping incoming FT HBs is not crucial, we will then check for // store lock. select { case s.ftHBCh <- m: default: } }) if err != nil { return fmt.Errorf("ft: unable to subscribe on ft subject: %v", err) } // We don't want to cause possible slow consumer error sub.SetPendingLimits(-1, -1) // Create channel to notify FT go routine to quit. s.ftQuit = make(chan struct{}, 1) // Set the state as standby initially s.state = FTStandby return nil }
[ "func", "(", "s", "*", "StanServer", ")", "ftSetup", "(", ")", "error", "{", "// Check that store type is ok. So far only support for FileStore", "if", "s", ".", "opts", ".", "StoreType", "!=", "stores", ".", "TypeFile", "&&", "s", ".", "opts", ".", "StoreType", "!=", "stores", ".", "TypeSQL", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "stores", ".", "TypeFile", ",", "stores", ".", "TypeSQL", ")", "\n", "}", "\n", "// So far, those are not exposed to users, just used in tests.", "// Still make sure that the missed HB interval is > than the HB", "// interval.", "if", "ftHBMissedInterval", "<", "time", ".", "Duration", "(", "float64", "(", "ftHBInterval", ")", "*", "1.1", ")", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", "+", "\"", "\"", ",", "ftHBInterval", ",", "ftHBMissedInterval", ")", "\n", "}", "\n", "// Set the HB and MissedHB intervals, using a bit of randomness", "rand", ".", "Seed", "(", "time", ".", "Now", "(", ")", ".", "UnixNano", "(", ")", ")", "\n", "s", ".", "ftHBInterval", "=", "ftGetRandomInterval", "(", "ftHBInterval", ")", "\n", "s", ".", "ftHBMissedInterval", "=", "ftGetRandomInterval", "(", "ftHBMissedInterval", ")", "\n", "// Subscribe to FT subject", "s", ".", "ftSubject", "=", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "ftHBPrefix", ",", "s", ".", "opts", ".", "ID", ",", "s", ".", "opts", ".", "FTGroupName", ")", "\n", "s", ".", "ftHBCh", "=", "make", "(", "chan", "*", "nats", ".", "Msg", ")", "\n", "sub", ",", "err", ":=", "s", ".", "ftnc", ".", "Subscribe", "(", "s", ".", "ftSubject", ",", "func", "(", "m", "*", "nats", ".", "Msg", ")", "{", "// Dropping incoming FT HBs is not crucial, we will then check for", "// store lock.", "select", "{", "case", "s", ".", "ftHBCh", "<-", "m", ":", "default", ":", "}", "\n", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "// We don't want to cause possible slow consumer error", "sub", ".", "SetPendingLimits", "(", "-", "1", ",", "-", "1", ")", "\n", "// Create channel to notify FT go routine to quit.", "s", ".", "ftQuit", "=", "make", "(", "chan", "struct", "{", "}", ",", "1", ")", "\n", "// Set the state as standby initially", "s", ".", "state", "=", "FTStandby", "\n", "return", "nil", "\n", "}" ]
// ftSetup checks that all required FT parameters have been specified and // create the channel required for shutdown. // Note that FTGroupName has to be set before server invokes this function, // so this parameter is not checked here.
[ "ftSetup", "checks", "that", "all", "required", "FT", "parameters", "have", "been", "specified", "and", "create", "the", "channel", "required", "for", "shutdown", ".", "Note", "that", "FTGroupName", "has", "to", "be", "set", "before", "server", "invokes", "this", "function", "so", "this", "parameter", "is", "not", "checked", "here", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/ft.go#L187-L225
train
nats-io/nats-streaming-server
server/client.go
newClientStore
func newClientStore(store stores.Store) *clientStore { return &clientStore{ clients: make(map[string]*client), connIDs: make(map[string]*client), knownInvalid: make(map[string]struct{}), store: store, } }
go
func newClientStore(store stores.Store) *clientStore { return &clientStore{ clients: make(map[string]*client), connIDs: make(map[string]*client), knownInvalid: make(map[string]struct{}), store: store, } }
[ "func", "newClientStore", "(", "store", "stores", ".", "Store", ")", "*", "clientStore", "{", "return", "&", "clientStore", "{", "clients", ":", "make", "(", "map", "[", "string", "]", "*", "client", ")", ",", "connIDs", ":", "make", "(", "map", "[", "string", "]", "*", "client", ")", ",", "knownInvalid", ":", "make", "(", "map", "[", "string", "]", "struct", "{", "}", ")", ",", "store", ":", "store", ",", "}", "\n", "}" ]
// newClientStore creates a new clientStore instance using `store` as the backing storage.
[ "newClientStore", "creates", "a", "new", "clientStore", "instance", "using", "store", "as", "the", "backing", "storage", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L51-L58
train
nats-io/nats-streaming-server
server/client.go
getSubsCopy
func (c *client) getSubsCopy() []*subState { subs := make([]*subState, len(c.subs)) copy(subs, c.subs) return subs }
go
func (c *client) getSubsCopy() []*subState { subs := make([]*subState, len(c.subs)) copy(subs, c.subs) return subs }
[ "func", "(", "c", "*", "client", ")", "getSubsCopy", "(", ")", "[", "]", "*", "subState", "{", "subs", ":=", "make", "(", "[", "]", "*", "subState", ",", "len", "(", "c", ".", "subs", ")", ")", "\n", "copy", "(", "subs", ",", "c", ".", "subs", ")", "\n", "return", "subs", "\n", "}" ]
// getSubsCopy returns a copy of the client's subscribers array. // At least Read-lock must be held by the caller.
[ "getSubsCopy", "returns", "a", "copy", "of", "the", "client", "s", "subscribers", "array", ".", "At", "least", "Read", "-", "lock", "must", "be", "held", "by", "the", "caller", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L62-L66
train
nats-io/nats-streaming-server
server/client.go
register
func (cs *clientStore) register(info *spb.ClientInfo) (*client, error) { cs.Lock() defer cs.Unlock() c := cs.clients[info.ID] if c != nil { return nil, ErrInvalidClient } sc, err := cs.store.AddClient(info) if err != nil { return nil, err } c = &client{info: sc, subs: make([]*subState, 0, 4)} cs.clients[c.info.ID] = c if len(c.info.ConnID) > 0 { cs.connIDs[string(c.info.ConnID)] = c } delete(cs.knownInvalid, getKnownInvalidKey(info.ID, info.ConnID)) if cs.waitOnRegister != nil { ch := cs.waitOnRegister[c.info.ID] if ch != nil { ch <- struct{}{} delete(cs.waitOnRegister, c.info.ID) } } return c, nil }
go
func (cs *clientStore) register(info *spb.ClientInfo) (*client, error) { cs.Lock() defer cs.Unlock() c := cs.clients[info.ID] if c != nil { return nil, ErrInvalidClient } sc, err := cs.store.AddClient(info) if err != nil { return nil, err } c = &client{info: sc, subs: make([]*subState, 0, 4)} cs.clients[c.info.ID] = c if len(c.info.ConnID) > 0 { cs.connIDs[string(c.info.ConnID)] = c } delete(cs.knownInvalid, getKnownInvalidKey(info.ID, info.ConnID)) if cs.waitOnRegister != nil { ch := cs.waitOnRegister[c.info.ID] if ch != nil { ch <- struct{}{} delete(cs.waitOnRegister, c.info.ID) } } return c, nil }
[ "func", "(", "cs", "*", "clientStore", ")", "register", "(", "info", "*", "spb", ".", "ClientInfo", ")", "(", "*", "client", ",", "error", ")", "{", "cs", ".", "Lock", "(", ")", "\n", "defer", "cs", ".", "Unlock", "(", ")", "\n", "c", ":=", "cs", ".", "clients", "[", "info", ".", "ID", "]", "\n", "if", "c", "!=", "nil", "{", "return", "nil", ",", "ErrInvalidClient", "\n", "}", "\n", "sc", ",", "err", ":=", "cs", ".", "store", ".", "AddClient", "(", "info", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "c", "=", "&", "client", "{", "info", ":", "sc", ",", "subs", ":", "make", "(", "[", "]", "*", "subState", ",", "0", ",", "4", ")", "}", "\n", "cs", ".", "clients", "[", "c", ".", "info", ".", "ID", "]", "=", "c", "\n", "if", "len", "(", "c", ".", "info", ".", "ConnID", ")", ">", "0", "{", "cs", ".", "connIDs", "[", "string", "(", "c", ".", "info", ".", "ConnID", ")", "]", "=", "c", "\n", "}", "\n", "delete", "(", "cs", ".", "knownInvalid", ",", "getKnownInvalidKey", "(", "info", ".", "ID", ",", "info", ".", "ConnID", ")", ")", "\n", "if", "cs", ".", "waitOnRegister", "!=", "nil", "{", "ch", ":=", "cs", ".", "waitOnRegister", "[", "c", ".", "info", ".", "ID", "]", "\n", "if", "ch", "!=", "nil", "{", "ch", "<-", "struct", "{", "}", "{", "}", "\n", "delete", "(", "cs", ".", "waitOnRegister", ",", "c", ".", "info", ".", "ID", ")", "\n", "}", "\n", "}", "\n", "return", "c", ",", "nil", "\n", "}" ]
// Register a new client. Returns ErrInvalidClient if client is already registered.
[ "Register", "a", "new", "client", ".", "Returns", "ErrInvalidClient", "if", "client", "is", "already", "registered", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L76-L101
train
nats-io/nats-streaming-server
server/client.go
unregister
func (cs *clientStore) unregister(ID string) (*client, error) { cs.Lock() defer cs.Unlock() c := cs.clients[ID] if c == nil { return nil, nil } c.Lock() if c.hbt != nil { c.hbt.Stop() c.hbt = nil } connID := c.info.ConnID c.Unlock() delete(cs.clients, ID) if len(connID) > 0 { delete(cs.connIDs, string(connID)) } if cs.waitOnRegister != nil { delete(cs.waitOnRegister, ID) } err := cs.store.DeleteClient(ID) return c, err }
go
func (cs *clientStore) unregister(ID string) (*client, error) { cs.Lock() defer cs.Unlock() c := cs.clients[ID] if c == nil { return nil, nil } c.Lock() if c.hbt != nil { c.hbt.Stop() c.hbt = nil } connID := c.info.ConnID c.Unlock() delete(cs.clients, ID) if len(connID) > 0 { delete(cs.connIDs, string(connID)) } if cs.waitOnRegister != nil { delete(cs.waitOnRegister, ID) } err := cs.store.DeleteClient(ID) return c, err }
[ "func", "(", "cs", "*", "clientStore", ")", "unregister", "(", "ID", "string", ")", "(", "*", "client", ",", "error", ")", "{", "cs", ".", "Lock", "(", ")", "\n", "defer", "cs", ".", "Unlock", "(", ")", "\n", "c", ":=", "cs", ".", "clients", "[", "ID", "]", "\n", "if", "c", "==", "nil", "{", "return", "nil", ",", "nil", "\n", "}", "\n", "c", ".", "Lock", "(", ")", "\n", "if", "c", ".", "hbt", "!=", "nil", "{", "c", ".", "hbt", ".", "Stop", "(", ")", "\n", "c", ".", "hbt", "=", "nil", "\n", "}", "\n", "connID", ":=", "c", ".", "info", ".", "ConnID", "\n", "c", ".", "Unlock", "(", ")", "\n", "delete", "(", "cs", ".", "clients", ",", "ID", ")", "\n", "if", "len", "(", "connID", ")", ">", "0", "{", "delete", "(", "cs", ".", "connIDs", ",", "string", "(", "connID", ")", ")", "\n", "}", "\n", "if", "cs", ".", "waitOnRegister", "!=", "nil", "{", "delete", "(", "cs", ".", "waitOnRegister", ",", "ID", ")", "\n", "}", "\n", "err", ":=", "cs", ".", "store", ".", "DeleteClient", "(", "ID", ")", "\n", "return", "c", ",", "err", "\n", "}" ]
// Unregister a client.
[ "Unregister", "a", "client", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L104-L127
train
nats-io/nats-streaming-server
server/client.go
isValid
func (cs *clientStore) isValid(ID string, connID []byte) bool { cs.RLock() valid := cs.lookupByConnIDOrID(ID, connID) != nil cs.RUnlock() return valid }
go
func (cs *clientStore) isValid(ID string, connID []byte) bool { cs.RLock() valid := cs.lookupByConnIDOrID(ID, connID) != nil cs.RUnlock() return valid }
[ "func", "(", "cs", "*", "clientStore", ")", "isValid", "(", "ID", "string", ",", "connID", "[", "]", "byte", ")", "bool", "{", "cs", ".", "RLock", "(", ")", "\n", "valid", ":=", "cs", ".", "lookupByConnIDOrID", "(", "ID", ",", "connID", ")", "!=", "nil", "\n", "cs", ".", "RUnlock", "(", ")", "\n", "return", "valid", "\n", "}" ]
// IsValid returns true if the client is registered, false otherwise.
[ "IsValid", "returns", "true", "if", "the", "client", "is", "registered", "false", "otherwise", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L130-L135
train
nats-io/nats-streaming-server
server/client.go
lookupByConnIDOrID
func (cs *clientStore) lookupByConnIDOrID(ID string, connID []byte) *client { var c *client if len(connID) > 0 { c = cs.connIDs[string(connID)] } else { c = cs.clients[ID] } return c }
go
func (cs *clientStore) lookupByConnIDOrID(ID string, connID []byte) *client { var c *client if len(connID) > 0 { c = cs.connIDs[string(connID)] } else { c = cs.clients[ID] } return c }
[ "func", "(", "cs", "*", "clientStore", ")", "lookupByConnIDOrID", "(", "ID", "string", ",", "connID", "[", "]", "byte", ")", "*", "client", "{", "var", "c", "*", "client", "\n", "if", "len", "(", "connID", ")", ">", "0", "{", "c", "=", "cs", ".", "connIDs", "[", "string", "(", "connID", ")", "]", "\n", "}", "else", "{", "c", "=", "cs", ".", "clients", "[", "ID", "]", "\n", "}", "\n", "return", "c", "\n", "}" ]
// Lookup client by ConnID if not nil, otherwise by clientID. // Assume at least clientStore RLock is held on entry.
[ "Lookup", "client", "by", "ConnID", "if", "not", "nil", "otherwise", "by", "clientID", ".", "Assume", "at", "least", "clientStore", "RLock", "is", "held", "on", "entry", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L197-L205
train
nats-io/nats-streaming-server
server/client.go
lookup
func (cs *clientStore) lookup(ID string) *client { cs.RLock() c := cs.clients[ID] cs.RUnlock() return c }
go
func (cs *clientStore) lookup(ID string) *client { cs.RLock() c := cs.clients[ID] cs.RUnlock() return c }
[ "func", "(", "cs", "*", "clientStore", ")", "lookup", "(", "ID", "string", ")", "*", "client", "{", "cs", ".", "RLock", "(", ")", "\n", "c", ":=", "cs", ".", "clients", "[", "ID", "]", "\n", "cs", ".", "RUnlock", "(", ")", "\n", "return", "c", "\n", "}" ]
// Lookup a client
[ "Lookup", "a", "client" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L208-L213
train
nats-io/nats-streaming-server
server/client.go
lookupByConnID
func (cs *clientStore) lookupByConnID(connID []byte) *client { cs.RLock() c := cs.connIDs[string(connID)] cs.RUnlock() return c }
go
func (cs *clientStore) lookupByConnID(connID []byte) *client { cs.RLock() c := cs.connIDs[string(connID)] cs.RUnlock() return c }
[ "func", "(", "cs", "*", "clientStore", ")", "lookupByConnID", "(", "connID", "[", "]", "byte", ")", "*", "client", "{", "cs", ".", "RLock", "(", ")", "\n", "c", ":=", "cs", ".", "connIDs", "[", "string", "(", "connID", ")", "]", "\n", "cs", ".", "RUnlock", "(", ")", "\n", "return", "c", "\n", "}" ]
// Lookup a client by connection ID
[ "Lookup", "a", "client", "by", "connection", "ID" ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L216-L221
train
nats-io/nats-streaming-server
server/client.go
getSubs
func (cs *clientStore) getSubs(ID string) []*subState { cs.RLock() defer cs.RUnlock() c := cs.clients[ID] if c == nil { return nil } c.RLock() subs := c.getSubsCopy() c.RUnlock() return subs }
go
func (cs *clientStore) getSubs(ID string) []*subState { cs.RLock() defer cs.RUnlock() c := cs.clients[ID] if c == nil { return nil } c.RLock() subs := c.getSubsCopy() c.RUnlock() return subs }
[ "func", "(", "cs", "*", "clientStore", ")", "getSubs", "(", "ID", "string", ")", "[", "]", "*", "subState", "{", "cs", ".", "RLock", "(", ")", "\n", "defer", "cs", ".", "RUnlock", "(", ")", "\n", "c", ":=", "cs", ".", "clients", "[", "ID", "]", "\n", "if", "c", "==", "nil", "{", "return", "nil", "\n", "}", "\n", "c", ".", "RLock", "(", ")", "\n", "subs", ":=", "c", ".", "getSubsCopy", "(", ")", "\n", "c", ".", "RUnlock", "(", ")", "\n", "return", "subs", "\n", "}" ]
// GetSubs returns the list of subscriptions for the client identified by ID, // or nil if such client is not found.
[ "GetSubs", "returns", "the", "list", "of", "subscriptions", "for", "the", "client", "identified", "by", "ID", "or", "nil", "if", "such", "client", "is", "not", "found", "." ]
57c6c84265c0012a1efef365703c221329804d4c
https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/client.go#L225-L236
train