id
int32
0
167k
repo
stringlengths
5
54
path
stringlengths
4
155
func_name
stringlengths
1
118
original_string
stringlengths
52
85.5k
language
stringclasses
1 value
code
stringlengths
52
85.5k
code_tokens
listlengths
21
1.41k
docstring
stringlengths
6
2.61k
docstring_tokens
listlengths
3
215
sha
stringlengths
40
40
url
stringlengths
85
252
13,400
stripe/veneur
worker.go
ImportMetric
func (w *Worker) ImportMetric(other samplers.JSONMetric) { w.mutex.Lock() defer w.mutex.Unlock() // we don't increment the processed metric counter here, it was already // counted by the original veneur that sent this to us w.imported++ if other.Type == counterTypeName || other.Type == gaugeTypeName { // this is an odd special case -- counters that are imported are global w.wm.Upsert(other.MetricKey, samplers.GlobalOnly, other.Tags) } else { w.wm.Upsert(other.MetricKey, samplers.MixedScope, other.Tags) } switch other.Type { case counterTypeName: if err := w.wm.globalCounters[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge counters") } case gaugeTypeName: if err := w.wm.globalGauges[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge gauges") } case setTypeName: if err := w.wm.sets[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge sets") } case histogramTypeName: if err := w.wm.histograms[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge histograms") } case timerTypeName: if err := w.wm.timers[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge timers") } default: log.WithField("type", other.Type).Error("Unknown metric type for importing") } }
go
func (w *Worker) ImportMetric(other samplers.JSONMetric) { w.mutex.Lock() defer w.mutex.Unlock() // we don't increment the processed metric counter here, it was already // counted by the original veneur that sent this to us w.imported++ if other.Type == counterTypeName || other.Type == gaugeTypeName { // this is an odd special case -- counters that are imported are global w.wm.Upsert(other.MetricKey, samplers.GlobalOnly, other.Tags) } else { w.wm.Upsert(other.MetricKey, samplers.MixedScope, other.Tags) } switch other.Type { case counterTypeName: if err := w.wm.globalCounters[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge counters") } case gaugeTypeName: if err := w.wm.globalGauges[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge gauges") } case setTypeName: if err := w.wm.sets[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge sets") } case histogramTypeName: if err := w.wm.histograms[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge histograms") } case timerTypeName: if err := w.wm.timers[other.MetricKey].Combine(other.Value); err != nil { log.WithError(err).Error("Could not merge timers") } default: log.WithField("type", other.Type).Error("Unknown metric type for importing") } }
[ "func", "(", "w", "*", "Worker", ")", "ImportMetric", "(", "other", "samplers", ".", "JSONMetric", ")", "{", "w", ".", "mutex", ".", "Lock", "(", ")", "\n", "defer", "w", ".", "mutex", ".", "Unlock", "(", ")", "\n\n", "// we don't increment the processed metric counter here, it was already", "// counted by the original veneur that sent this to us", "w", ".", "imported", "++", "\n", "if", "other", ".", "Type", "==", "counterTypeName", "||", "other", ".", "Type", "==", "gaugeTypeName", "{", "// this is an odd special case -- counters that are imported are global", "w", ".", "wm", ".", "Upsert", "(", "other", ".", "MetricKey", ",", "samplers", ".", "GlobalOnly", ",", "other", ".", "Tags", ")", "\n", "}", "else", "{", "w", ".", "wm", ".", "Upsert", "(", "other", ".", "MetricKey", ",", "samplers", ".", "MixedScope", ",", "other", ".", "Tags", ")", "\n", "}", "\n\n", "switch", "other", ".", "Type", "{", "case", "counterTypeName", ":", "if", "err", ":=", "w", ".", "wm", ".", "globalCounters", "[", "other", ".", "MetricKey", "]", ".", "Combine", "(", "other", ".", "Value", ")", ";", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n", "case", "gaugeTypeName", ":", "if", "err", ":=", "w", ".", "wm", ".", "globalGauges", "[", "other", ".", "MetricKey", "]", ".", "Combine", "(", "other", ".", "Value", ")", ";", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n", "case", "setTypeName", ":", "if", "err", ":=", "w", ".", "wm", ".", "sets", "[", "other", ".", "MetricKey", "]", ".", "Combine", "(", "other", ".", "Value", ")", ";", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n", "case", "histogramTypeName", ":", "if", "err", ":=", "w", ".", "wm", ".", "histograms", "[", "other", ".", "MetricKey", "]", ".", "Combine", "(", "other", ".", "Value", ")", ";", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n", "case", "timerTypeName", ":", "if", "err", ":=", "w", ".", "wm", ".", "timers", "[", "other", ".", "MetricKey", "]", ".", "Combine", "(", "other", ".", "Value", ")", ";", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n", "default", ":", "log", ".", "WithField", "(", "\"", "\"", ",", "other", ".", "Type", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n", "}" ]
// ImportMetric receives a metric from another veneur instance
[ "ImportMetric", "receives", "a", "metric", "from", "another", "veneur", "instance" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/worker.go#L336-L374
13,401
stripe/veneur
worker.go
ImportMetricGRPC
func (w *Worker) ImportMetricGRPC(other *metricpb.Metric) (err error) { w.mutex.Lock() defer w.mutex.Unlock() key := samplers.NewMetricKeyFromMetric(other) scope := samplers.ScopeFromPB(other.Scope) if other.Type == metricpb.Type_Counter || other.Type == metricpb.Type_Gauge { scope = samplers.GlobalOnly } if scope == samplers.LocalOnly { return fmt.Errorf("gRPC import does not accept local metrics") } w.wm.Upsert(key, scope, other.Tags) w.imported++ switch v := other.GetValue().(type) { case *metricpb.Metric_Counter: w.wm.globalCounters[key].Merge(v.Counter) case *metricpb.Metric_Gauge: w.wm.globalGauges[key].Merge(v.Gauge) case *metricpb.Metric_Set: if merr := w.wm.sets[key].Merge(v.Set); merr != nil { err = fmt.Errorf("could not merge a set: %v", err) } case *metricpb.Metric_Histogram: switch other.Type { case metricpb.Type_Histogram: if other.Scope == metricpb.Scope_Mixed { w.wm.histograms[key].Merge(v.Histogram) } else if other.Scope == metricpb.Scope_Global { w.wm.globalHistograms[key].Merge(v.Histogram) } case metricpb.Type_Timer: if other.Scope == metricpb.Scope_Mixed { w.wm.timers[key].Merge(v.Histogram) } else if other.Scope == metricpb.Scope_Global { w.wm.globalTimers[key].Merge(v.Histogram) } } case nil: err = errors.New("Can't import a metric with a nil value") default: err = fmt.Errorf("Unknown metric type for importing: %T", v) } if err != nil { log.WithError(err).WithFields(logrus.Fields{ "type": other.Type, "name": other.Name, "protocol": "grpc", }).Error("Failed to import a metric") } return err }
go
func (w *Worker) ImportMetricGRPC(other *metricpb.Metric) (err error) { w.mutex.Lock() defer w.mutex.Unlock() key := samplers.NewMetricKeyFromMetric(other) scope := samplers.ScopeFromPB(other.Scope) if other.Type == metricpb.Type_Counter || other.Type == metricpb.Type_Gauge { scope = samplers.GlobalOnly } if scope == samplers.LocalOnly { return fmt.Errorf("gRPC import does not accept local metrics") } w.wm.Upsert(key, scope, other.Tags) w.imported++ switch v := other.GetValue().(type) { case *metricpb.Metric_Counter: w.wm.globalCounters[key].Merge(v.Counter) case *metricpb.Metric_Gauge: w.wm.globalGauges[key].Merge(v.Gauge) case *metricpb.Metric_Set: if merr := w.wm.sets[key].Merge(v.Set); merr != nil { err = fmt.Errorf("could not merge a set: %v", err) } case *metricpb.Metric_Histogram: switch other.Type { case metricpb.Type_Histogram: if other.Scope == metricpb.Scope_Mixed { w.wm.histograms[key].Merge(v.Histogram) } else if other.Scope == metricpb.Scope_Global { w.wm.globalHistograms[key].Merge(v.Histogram) } case metricpb.Type_Timer: if other.Scope == metricpb.Scope_Mixed { w.wm.timers[key].Merge(v.Histogram) } else if other.Scope == metricpb.Scope_Global { w.wm.globalTimers[key].Merge(v.Histogram) } } case nil: err = errors.New("Can't import a metric with a nil value") default: err = fmt.Errorf("Unknown metric type for importing: %T", v) } if err != nil { log.WithError(err).WithFields(logrus.Fields{ "type": other.Type, "name": other.Name, "protocol": "grpc", }).Error("Failed to import a metric") } return err }
[ "func", "(", "w", "*", "Worker", ")", "ImportMetricGRPC", "(", "other", "*", "metricpb", ".", "Metric", ")", "(", "err", "error", ")", "{", "w", ".", "mutex", ".", "Lock", "(", ")", "\n", "defer", "w", ".", "mutex", ".", "Unlock", "(", ")", "\n\n", "key", ":=", "samplers", ".", "NewMetricKeyFromMetric", "(", "other", ")", "\n\n", "scope", ":=", "samplers", ".", "ScopeFromPB", "(", "other", ".", "Scope", ")", "\n", "if", "other", ".", "Type", "==", "metricpb", ".", "Type_Counter", "||", "other", ".", "Type", "==", "metricpb", ".", "Type_Gauge", "{", "scope", "=", "samplers", ".", "GlobalOnly", "\n", "}", "\n\n", "if", "scope", "==", "samplers", ".", "LocalOnly", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ")", "\n", "}", "\n\n", "w", ".", "wm", ".", "Upsert", "(", "key", ",", "scope", ",", "other", ".", "Tags", ")", "\n", "w", ".", "imported", "++", "\n\n", "switch", "v", ":=", "other", ".", "GetValue", "(", ")", ".", "(", "type", ")", "{", "case", "*", "metricpb", ".", "Metric_Counter", ":", "w", ".", "wm", ".", "globalCounters", "[", "key", "]", ".", "Merge", "(", "v", ".", "Counter", ")", "\n", "case", "*", "metricpb", ".", "Metric_Gauge", ":", "w", ".", "wm", ".", "globalGauges", "[", "key", "]", ".", "Merge", "(", "v", ".", "Gauge", ")", "\n", "case", "*", "metricpb", ".", "Metric_Set", ":", "if", "merr", ":=", "w", ".", "wm", ".", "sets", "[", "key", "]", ".", "Merge", "(", "v", ".", "Set", ")", ";", "merr", "!=", "nil", "{", "err", "=", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "err", ")", "\n", "}", "\n", "case", "*", "metricpb", ".", "Metric_Histogram", ":", "switch", "other", ".", "Type", "{", "case", "metricpb", ".", "Type_Histogram", ":", "if", "other", ".", "Scope", "==", "metricpb", ".", "Scope_Mixed", "{", "w", ".", "wm", ".", "histograms", "[", "key", "]", ".", "Merge", "(", "v", ".", "Histogram", ")", "\n", "}", "else", "if", "other", ".", "Scope", "==", "metricpb", ".", "Scope_Global", "{", "w", ".", "wm", ".", "globalHistograms", "[", "key", "]", ".", "Merge", "(", "v", ".", "Histogram", ")", "\n", "}", "\n", "case", "metricpb", ".", "Type_Timer", ":", "if", "other", ".", "Scope", "==", "metricpb", ".", "Scope_Mixed", "{", "w", ".", "wm", ".", "timers", "[", "key", "]", ".", "Merge", "(", "v", ".", "Histogram", ")", "\n", "}", "else", "if", "other", ".", "Scope", "==", "metricpb", ".", "Scope_Global", "{", "w", ".", "wm", ".", "globalTimers", "[", "key", "]", ".", "Merge", "(", "v", ".", "Histogram", ")", "\n", "}", "\n", "}", "\n", "case", "nil", ":", "err", "=", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "default", ":", "err", "=", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "v", ")", "\n", "}", "\n\n", "if", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "other", ".", "Type", ",", "\"", "\"", ":", "other", ".", "Name", ",", "\"", "\"", ":", "\"", "\"", ",", "}", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n\n", "return", "err", "\n", "}" ]
// ImportMetricGRPC receives a metric from another veneur instance over gRPC. // // In practice, this is only called when in the aggregation tier, so we don't // handle LocalOnly scope.
[ "ImportMetricGRPC", "receives", "a", "metric", "from", "another", "veneur", "instance", "over", "gRPC", ".", "In", "practice", "this", "is", "only", "called", "when", "in", "the", "aggregation", "tier", "so", "we", "don", "t", "handle", "LocalOnly", "scope", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/worker.go#L380-L437
13,402
stripe/veneur
worker.go
Flush
func (w *Worker) Flush() WorkerMetrics { // This is a critical spot. The worker can't process metrics while this // mutex is held! So we try and minimize it by copying the maps of values // and assigning new ones. wm := NewWorkerMetrics() w.mutex.Lock() ret := w.wm processed := w.processed imported := w.imported w.wm = wm w.processed = 0 w.imported = 0 w.mutex.Unlock() w.stats.Count("worker.metrics_processed_total", processed, []string{}, 1.0) w.stats.Count("worker.metrics_imported_total", imported, []string{}, 1.0) return ret }
go
func (w *Worker) Flush() WorkerMetrics { // This is a critical spot. The worker can't process metrics while this // mutex is held! So we try and minimize it by copying the maps of values // and assigning new ones. wm := NewWorkerMetrics() w.mutex.Lock() ret := w.wm processed := w.processed imported := w.imported w.wm = wm w.processed = 0 w.imported = 0 w.mutex.Unlock() w.stats.Count("worker.metrics_processed_total", processed, []string{}, 1.0) w.stats.Count("worker.metrics_imported_total", imported, []string{}, 1.0) return ret }
[ "func", "(", "w", "*", "Worker", ")", "Flush", "(", ")", "WorkerMetrics", "{", "// This is a critical spot. The worker can't process metrics while this", "// mutex is held! So we try and minimize it by copying the maps of values", "// and assigning new ones.", "wm", ":=", "NewWorkerMetrics", "(", ")", "\n", "w", ".", "mutex", ".", "Lock", "(", ")", "\n", "ret", ":=", "w", ".", "wm", "\n", "processed", ":=", "w", ".", "processed", "\n", "imported", ":=", "w", ".", "imported", "\n\n", "w", ".", "wm", "=", "wm", "\n", "w", ".", "processed", "=", "0", "\n", "w", ".", "imported", "=", "0", "\n", "w", ".", "mutex", ".", "Unlock", "(", ")", "\n\n", "w", ".", "stats", ".", "Count", "(", "\"", "\"", ",", "processed", ",", "[", "]", "string", "{", "}", ",", "1.0", ")", "\n", "w", ".", "stats", ".", "Count", "(", "\"", "\"", ",", "imported", ",", "[", "]", "string", "{", "}", ",", "1.0", ")", "\n\n", "return", "ret", "\n", "}" ]
// Flush resets the worker's internal metrics and returns their contents.
[ "Flush", "resets", "the", "worker", "s", "internal", "metrics", "and", "returns", "their", "contents", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/worker.go#L440-L459
13,403
stripe/veneur
worker.go
NewEventWorker
func NewEventWorker(cl *trace.Client, stats *statsd.Client) *EventWorker { return &EventWorker{ sampleChan: make(chan ssf.SSFSample), mutex: &sync.Mutex{}, traceClient: cl, stats: stats, } }
go
func NewEventWorker(cl *trace.Client, stats *statsd.Client) *EventWorker { return &EventWorker{ sampleChan: make(chan ssf.SSFSample), mutex: &sync.Mutex{}, traceClient: cl, stats: stats, } }
[ "func", "NewEventWorker", "(", "cl", "*", "trace", ".", "Client", ",", "stats", "*", "statsd", ".", "Client", ")", "*", "EventWorker", "{", "return", "&", "EventWorker", "{", "sampleChan", ":", "make", "(", "chan", "ssf", ".", "SSFSample", ")", ",", "mutex", ":", "&", "sync", ".", "Mutex", "{", "}", ",", "traceClient", ":", "cl", ",", "stats", ":", "stats", ",", "}", "\n", "}" ]
// NewEventWorker creates an EventWorker ready to collect events and service checks.
[ "NewEventWorker", "creates", "an", "EventWorker", "ready", "to", "collect", "events", "and", "service", "checks", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/worker.go#L478-L485
13,404
stripe/veneur
worker.go
Work
func (ew *EventWorker) Work() { for { select { case s := <-ew.sampleChan: ew.mutex.Lock() ew.samples = append(ew.samples, s) ew.mutex.Unlock() } } }
go
func (ew *EventWorker) Work() { for { select { case s := <-ew.sampleChan: ew.mutex.Lock() ew.samples = append(ew.samples, s) ew.mutex.Unlock() } } }
[ "func", "(", "ew", "*", "EventWorker", ")", "Work", "(", ")", "{", "for", "{", "select", "{", "case", "s", ":=", "<-", "ew", ".", "sampleChan", ":", "ew", ".", "mutex", ".", "Lock", "(", ")", "\n", "ew", ".", "samples", "=", "append", "(", "ew", ".", "samples", ",", "s", ")", "\n", "ew", ".", "mutex", ".", "Unlock", "(", ")", "\n", "}", "\n", "}", "\n", "}" ]
// Work will start the EventWorker listening for events and service checks. // This function will never return.
[ "Work", "will", "start", "the", "EventWorker", "listening", "for", "events", "and", "service", "checks", ".", "This", "function", "will", "never", "return", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/worker.go#L489-L498
13,405
stripe/veneur
worker.go
Flush
func (ew *EventWorker) Flush() []ssf.SSFSample { ew.mutex.Lock() retsamples := ew.samples // these slices will be allocated again at append time ew.samples = nil ew.mutex.Unlock() if len(retsamples) != 0 { ew.stats.Count("worker.other_samples_flushed_total", int64(len(retsamples)), nil, 1.0) } return retsamples }
go
func (ew *EventWorker) Flush() []ssf.SSFSample { ew.mutex.Lock() retsamples := ew.samples // these slices will be allocated again at append time ew.samples = nil ew.mutex.Unlock() if len(retsamples) != 0 { ew.stats.Count("worker.other_samples_flushed_total", int64(len(retsamples)), nil, 1.0) } return retsamples }
[ "func", "(", "ew", "*", "EventWorker", ")", "Flush", "(", ")", "[", "]", "ssf", ".", "SSFSample", "{", "ew", ".", "mutex", ".", "Lock", "(", ")", "\n\n", "retsamples", ":=", "ew", ".", "samples", "\n", "// these slices will be allocated again at append time", "ew", ".", "samples", "=", "nil", "\n\n", "ew", ".", "mutex", ".", "Unlock", "(", ")", "\n", "if", "len", "(", "retsamples", ")", "!=", "0", "{", "ew", ".", "stats", ".", "Count", "(", "\"", "\"", ",", "int64", "(", "len", "(", "retsamples", ")", ")", ",", "nil", ",", "1.0", ")", "\n", "}", "\n", "return", "retsamples", "\n", "}" ]
// Flush returns the EventWorker's stored events and service checks and // resets the stored contents.
[ "Flush", "returns", "the", "EventWorker", "s", "stored", "events", "and", "service", "checks", "and", "resets", "the", "stored", "contents", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/worker.go#L502-L514
13,406
stripe/veneur
worker.go
NewSpanWorker
func NewSpanWorker(sinks []sinks.SpanSink, cl *trace.Client, statsd *statsd.Client, spanChan <-chan *ssf.SSFSpan, commonTags map[string]string) *SpanWorker { tags := make([]map[string]string, len(sinks)) for i, sink := range sinks { tags[i] = map[string]string{ "sink": sink.Name(), } } return &SpanWorker{ SpanChan: spanChan, sinks: sinks, sinkTags: tags, commonTags: commonTags, cumulativeTimes: make([]int64, len(sinks)), traceClient: cl, statsd: statsd, } }
go
func NewSpanWorker(sinks []sinks.SpanSink, cl *trace.Client, statsd *statsd.Client, spanChan <-chan *ssf.SSFSpan, commonTags map[string]string) *SpanWorker { tags := make([]map[string]string, len(sinks)) for i, sink := range sinks { tags[i] = map[string]string{ "sink": sink.Name(), } } return &SpanWorker{ SpanChan: spanChan, sinks: sinks, sinkTags: tags, commonTags: commonTags, cumulativeTimes: make([]int64, len(sinks)), traceClient: cl, statsd: statsd, } }
[ "func", "NewSpanWorker", "(", "sinks", "[", "]", "sinks", ".", "SpanSink", ",", "cl", "*", "trace", ".", "Client", ",", "statsd", "*", "statsd", ".", "Client", ",", "spanChan", "<-", "chan", "*", "ssf", ".", "SSFSpan", ",", "commonTags", "map", "[", "string", "]", "string", ")", "*", "SpanWorker", "{", "tags", ":=", "make", "(", "[", "]", "map", "[", "string", "]", "string", ",", "len", "(", "sinks", ")", ")", "\n", "for", "i", ",", "sink", ":=", "range", "sinks", "{", "tags", "[", "i", "]", "=", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "sink", ".", "Name", "(", ")", ",", "}", "\n", "}", "\n\n", "return", "&", "SpanWorker", "{", "SpanChan", ":", "spanChan", ",", "sinks", ":", "sinks", ",", "sinkTags", ":", "tags", ",", "commonTags", ":", "commonTags", ",", "cumulativeTimes", ":", "make", "(", "[", "]", "int64", ",", "len", "(", "sinks", ")", ")", ",", "traceClient", ":", "cl", ",", "statsd", ":", "statsd", ",", "}", "\n", "}" ]
// NewSpanWorker creates a SpanWorker ready to collect events and service checks.
[ "NewSpanWorker", "creates", "a", "SpanWorker", "ready", "to", "collect", "events", "and", "service", "checks", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/worker.go#L532-L549
13,407
stripe/veneur
worker.go
Flush
func (tw *SpanWorker) Flush() { samples := &ssf.Samples{} // Flush and time each sink. for i, s := range tw.sinks { tags := make([]string, 0, len(tw.sinkTags[i])) for k, v := range tw.sinkTags[i] { tags = append(tags, fmt.Sprintf("%s:%s", k, v)) } sinkFlushStart := time.Now() s.Flush() tw.statsd.Timing("worker.span.flush_duration_ns", time.Since(sinkFlushStart), tags, 1.0) // cumulative time is measured in nanoseconds cumulative := time.Duration(atomic.SwapInt64(&tw.cumulativeTimes[i], 0)) * time.Nanosecond tw.statsd.Timing(sinks.MetricKeySpanIngestDuration, cumulative, tags, 1.0) } metrics.Report(tw.traceClient, samples) tw.statsd.Count("worker.span.hit_chan_cap", atomic.SwapInt64(&tw.capCount, 0), nil, 1.0) tw.statsd.Count("worker.ssf.empty_total", atomic.SwapInt64(&tw.emptySSFCount, 0), nil, 1.0) }
go
func (tw *SpanWorker) Flush() { samples := &ssf.Samples{} // Flush and time each sink. for i, s := range tw.sinks { tags := make([]string, 0, len(tw.sinkTags[i])) for k, v := range tw.sinkTags[i] { tags = append(tags, fmt.Sprintf("%s:%s", k, v)) } sinkFlushStart := time.Now() s.Flush() tw.statsd.Timing("worker.span.flush_duration_ns", time.Since(sinkFlushStart), tags, 1.0) // cumulative time is measured in nanoseconds cumulative := time.Duration(atomic.SwapInt64(&tw.cumulativeTimes[i], 0)) * time.Nanosecond tw.statsd.Timing(sinks.MetricKeySpanIngestDuration, cumulative, tags, 1.0) } metrics.Report(tw.traceClient, samples) tw.statsd.Count("worker.span.hit_chan_cap", atomic.SwapInt64(&tw.capCount, 0), nil, 1.0) tw.statsd.Count("worker.ssf.empty_total", atomic.SwapInt64(&tw.emptySSFCount, 0), nil, 1.0) }
[ "func", "(", "tw", "*", "SpanWorker", ")", "Flush", "(", ")", "{", "samples", ":=", "&", "ssf", ".", "Samples", "{", "}", "\n\n", "// Flush and time each sink.", "for", "i", ",", "s", ":=", "range", "tw", ".", "sinks", "{", "tags", ":=", "make", "(", "[", "]", "string", ",", "0", ",", "len", "(", "tw", ".", "sinkTags", "[", "i", "]", ")", ")", "\n", "for", "k", ",", "v", ":=", "range", "tw", ".", "sinkTags", "[", "i", "]", "{", "tags", "=", "append", "(", "tags", ",", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "k", ",", "v", ")", ")", "\n", "}", "\n", "sinkFlushStart", ":=", "time", ".", "Now", "(", ")", "\n", "s", ".", "Flush", "(", ")", "\n", "tw", ".", "statsd", ".", "Timing", "(", "\"", "\"", ",", "time", ".", "Since", "(", "sinkFlushStart", ")", ",", "tags", ",", "1.0", ")", "\n\n", "// cumulative time is measured in nanoseconds", "cumulative", ":=", "time", ".", "Duration", "(", "atomic", ".", "SwapInt64", "(", "&", "tw", ".", "cumulativeTimes", "[", "i", "]", ",", "0", ")", ")", "*", "time", ".", "Nanosecond", "\n", "tw", ".", "statsd", ".", "Timing", "(", "sinks", ".", "MetricKeySpanIngestDuration", ",", "cumulative", ",", "tags", ",", "1.0", ")", "\n", "}", "\n\n", "metrics", ".", "Report", "(", "tw", ".", "traceClient", ",", "samples", ")", "\n", "tw", ".", "statsd", ".", "Count", "(", "\"", "\"", ",", "atomic", ".", "SwapInt64", "(", "&", "tw", ".", "capCount", ",", "0", ")", ",", "nil", ",", "1.0", ")", "\n", "tw", ".", "statsd", ".", "Count", "(", "\"", "\"", ",", "atomic", ".", "SwapInt64", "(", "&", "tw", ".", "emptySSFCount", ",", "0", ")", ",", "nil", ",", "1.0", ")", "\n", "}" ]
// Flush invokes flush on each sink.
[ "Flush", "invokes", "flush", "on", "each", "sink", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/worker.go#L640-L661
13,408
stripe/veneur
config_parse.go
ReadProxyConfig
func ReadProxyConfig(path string) (c ProxyConfig, err error) { f, err := os.Open(path) if err != nil { return c, err } defer f.Close() c, err = readProxyConfig(f) c.applyDefaults() return }
go
func ReadProxyConfig(path string) (c ProxyConfig, err error) { f, err := os.Open(path) if err != nil { return c, err } defer f.Close() c, err = readProxyConfig(f) c.applyDefaults() return }
[ "func", "ReadProxyConfig", "(", "path", "string", ")", "(", "c", "ProxyConfig", ",", "err", "error", ")", "{", "f", ",", "err", ":=", "os", ".", "Open", "(", "path", ")", "\n", "if", "err", "!=", "nil", "{", "return", "c", ",", "err", "\n", "}", "\n", "defer", "f", ".", "Close", "(", ")", "\n", "c", ",", "err", "=", "readProxyConfig", "(", "f", ")", "\n", "c", ".", "applyDefaults", "(", ")", "\n", "return", "\n", "}" ]
// ReadProxyConfig unmarshals the proxy config file and slurps in its data.
[ "ReadProxyConfig", "unmarshals", "the", "proxy", "config", "file", "and", "slurps", "in", "its", "data", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/config_parse.go#L33-L42
13,409
stripe/veneur
config_parse.go
ParseInterval
func (c Config) ParseInterval() (time.Duration, error) { return time.ParseDuration(c.Interval) }
go
func (c Config) ParseInterval() (time.Duration, error) { return time.ParseDuration(c.Interval) }
[ "func", "(", "c", "Config", ")", "ParseInterval", "(", ")", "(", "time", ".", "Duration", ",", "error", ")", "{", "return", "time", ".", "ParseDuration", "(", "c", ".", "Interval", ")", "\n", "}" ]
// ParseInterval handles parsing the flush interval as a time.Duration
[ "ParseInterval", "handles", "parsing", "the", "flush", "interval", "as", "a", "time", ".", "Duration" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/config_parse.go#L229-L231
13,410
stripe/veneur
proxy.go
RefreshDestinations
func (p *Proxy) RefreshDestinations(serviceName string, ring *consistent.Consistent, mtx *sync.Mutex) { samples := &ssf.Samples{} defer metrics.Report(p.TraceClient, samples) srvTags := map[string]string{"service": serviceName} start := time.Now() destinations, err := p.Discoverer.GetDestinationsForService(serviceName) samples.Add(ssf.Timing("discoverer.update_duration_ns", time.Since(start), time.Nanosecond, srvTags)) log.WithFields(logrus.Fields{ "destinations": destinations, "service": serviceName, }).Debug("Got destinations") samples.Add(ssf.Timing("discoverer.update_duration_ns", time.Since(start), time.Nanosecond, srvTags)) if err != nil || len(destinations) == 0 { log.WithError(err).WithFields(logrus.Fields{ "service": serviceName, "errorType": reflect.TypeOf(err), "numDestinations": len(destinations), }).Error("Discoverer found zero destinations and/or returned an error. Destinations may be stale!") samples.Add(ssf.Count("discoverer.errors", 1, srvTags)) // Return since we got no hosts. We don't want to zero out the list. This // should result in us leaving the "last good" values in the ring. return } mtx.Lock() ring.Set(destinations) mtx.Unlock() samples.Add(ssf.Gauge("discoverer.destination_number", float32(len(destinations)), srvTags)) }
go
func (p *Proxy) RefreshDestinations(serviceName string, ring *consistent.Consistent, mtx *sync.Mutex) { samples := &ssf.Samples{} defer metrics.Report(p.TraceClient, samples) srvTags := map[string]string{"service": serviceName} start := time.Now() destinations, err := p.Discoverer.GetDestinationsForService(serviceName) samples.Add(ssf.Timing("discoverer.update_duration_ns", time.Since(start), time.Nanosecond, srvTags)) log.WithFields(logrus.Fields{ "destinations": destinations, "service": serviceName, }).Debug("Got destinations") samples.Add(ssf.Timing("discoverer.update_duration_ns", time.Since(start), time.Nanosecond, srvTags)) if err != nil || len(destinations) == 0 { log.WithError(err).WithFields(logrus.Fields{ "service": serviceName, "errorType": reflect.TypeOf(err), "numDestinations": len(destinations), }).Error("Discoverer found zero destinations and/or returned an error. Destinations may be stale!") samples.Add(ssf.Count("discoverer.errors", 1, srvTags)) // Return since we got no hosts. We don't want to zero out the list. This // should result in us leaving the "last good" values in the ring. return } mtx.Lock() ring.Set(destinations) mtx.Unlock() samples.Add(ssf.Gauge("discoverer.destination_number", float32(len(destinations)), srvTags)) }
[ "func", "(", "p", "*", "Proxy", ")", "RefreshDestinations", "(", "serviceName", "string", ",", "ring", "*", "consistent", ".", "Consistent", ",", "mtx", "*", "sync", ".", "Mutex", ")", "{", "samples", ":=", "&", "ssf", ".", "Samples", "{", "}", "\n", "defer", "metrics", ".", "Report", "(", "p", ".", "TraceClient", ",", "samples", ")", "\n", "srvTags", ":=", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "serviceName", "}", "\n\n", "start", ":=", "time", ".", "Now", "(", ")", "\n", "destinations", ",", "err", ":=", "p", ".", "Discoverer", ".", "GetDestinationsForService", "(", "serviceName", ")", "\n", "samples", ".", "Add", "(", "ssf", ".", "Timing", "(", "\"", "\"", ",", "time", ".", "Since", "(", "start", ")", ",", "time", ".", "Nanosecond", ",", "srvTags", ")", ")", "\n", "log", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "destinations", ",", "\"", "\"", ":", "serviceName", ",", "}", ")", ".", "Debug", "(", "\"", "\"", ")", "\n\n", "samples", ".", "Add", "(", "ssf", ".", "Timing", "(", "\"", "\"", ",", "time", ".", "Since", "(", "start", ")", ",", "time", ".", "Nanosecond", ",", "srvTags", ")", ")", "\n", "if", "err", "!=", "nil", "||", "len", "(", "destinations", ")", "==", "0", "{", "log", ".", "WithError", "(", "err", ")", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "serviceName", ",", "\"", "\"", ":", "reflect", ".", "TypeOf", "(", "err", ")", ",", "\"", "\"", ":", "len", "(", "destinations", ")", ",", "}", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "samples", ".", "Add", "(", "ssf", ".", "Count", "(", "\"", "\"", ",", "1", ",", "srvTags", ")", ")", "\n", "// Return since we got no hosts. We don't want to zero out the list. This", "// should result in us leaving the \"last good\" values in the ring.", "return", "\n", "}", "\n\n", "mtx", ".", "Lock", "(", ")", "\n", "ring", ".", "Set", "(", "destinations", ")", "\n", "mtx", ".", "Unlock", "(", ")", "\n", "samples", ".", "Add", "(", "ssf", ".", "Gauge", "(", "\"", "\"", ",", "float32", "(", "len", "(", "destinations", ")", ")", ",", "srvTags", ")", ")", "\n", "}" ]
// RefreshDestinations updates the server's list of valid destinations // for flushing. This should be called periodically to ensure we have // the latest data.
[ "RefreshDestinations", "updates", "the", "server", "s", "list", "of", "valid", "destinations", "for", "flushing", ".", "This", "should", "be", "called", "periodically", "to", "ensure", "we", "have", "the", "latest", "data", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/proxy.go#L484-L514
13,411
stripe/veneur
proxy.go
ProxyMetrics
func (p *Proxy) ProxyMetrics(ctx context.Context, jsonMetrics []samplers.JSONMetric, origin string) { span, _ := trace.StartSpanFromContext(ctx, "veneur.opentracing.proxy.proxy_metrics") defer span.ClientFinish(p.TraceClient) if p.ForwardTimeout > 0 { var cancel func() ctx, cancel = context.WithTimeout(ctx, p.ForwardTimeout) defer cancel() } metricCount := len(jsonMetrics) span.Add(ssf.RandomlySample(0.1, ssf.Count("import.metrics_total", float32(metricCount), map[string]string{ "remote_addr": origin, "veneurglobalonly": "", }), )...) jsonMetricsByDestination := make(map[string][]samplers.JSONMetric) for _, h := range p.ForwardDestinations.Members() { jsonMetricsByDestination[h] = make([]samplers.JSONMetric, 0) } for _, jm := range jsonMetrics { dest, _ := p.ForwardDestinations.Get(jm.MetricKey.String()) jsonMetricsByDestination[dest] = append(jsonMetricsByDestination[dest], jm) } // nb The response has already been returned at this point, because we wg := sync.WaitGroup{} wg.Add(len(jsonMetricsByDestination)) // Make our waitgroup the size of our destinations for dest, batch := range jsonMetricsByDestination { go p.doPost(ctx, &wg, dest, batch) } wg.Wait() // Wait for all the above goroutines to complete log.WithField("count", metricCount).Debug("Completed forward") span.Add(ssf.RandomlySample(0.1, ssf.Timing("proxy.duration_ns", time.Since(span.Start), time.Nanosecond, nil), ssf.Count("proxy.proxied_metrics_total", float32(len(jsonMetrics)), nil), )...) }
go
func (p *Proxy) ProxyMetrics(ctx context.Context, jsonMetrics []samplers.JSONMetric, origin string) { span, _ := trace.StartSpanFromContext(ctx, "veneur.opentracing.proxy.proxy_metrics") defer span.ClientFinish(p.TraceClient) if p.ForwardTimeout > 0 { var cancel func() ctx, cancel = context.WithTimeout(ctx, p.ForwardTimeout) defer cancel() } metricCount := len(jsonMetrics) span.Add(ssf.RandomlySample(0.1, ssf.Count("import.metrics_total", float32(metricCount), map[string]string{ "remote_addr": origin, "veneurglobalonly": "", }), )...) jsonMetricsByDestination := make(map[string][]samplers.JSONMetric) for _, h := range p.ForwardDestinations.Members() { jsonMetricsByDestination[h] = make([]samplers.JSONMetric, 0) } for _, jm := range jsonMetrics { dest, _ := p.ForwardDestinations.Get(jm.MetricKey.String()) jsonMetricsByDestination[dest] = append(jsonMetricsByDestination[dest], jm) } // nb The response has already been returned at this point, because we wg := sync.WaitGroup{} wg.Add(len(jsonMetricsByDestination)) // Make our waitgroup the size of our destinations for dest, batch := range jsonMetricsByDestination { go p.doPost(ctx, &wg, dest, batch) } wg.Wait() // Wait for all the above goroutines to complete log.WithField("count", metricCount).Debug("Completed forward") span.Add(ssf.RandomlySample(0.1, ssf.Timing("proxy.duration_ns", time.Since(span.Start), time.Nanosecond, nil), ssf.Count("proxy.proxied_metrics_total", float32(len(jsonMetrics)), nil), )...) }
[ "func", "(", "p", "*", "Proxy", ")", "ProxyMetrics", "(", "ctx", "context", ".", "Context", ",", "jsonMetrics", "[", "]", "samplers", ".", "JSONMetric", ",", "origin", "string", ")", "{", "span", ",", "_", ":=", "trace", ".", "StartSpanFromContext", "(", "ctx", ",", "\"", "\"", ")", "\n", "defer", "span", ".", "ClientFinish", "(", "p", ".", "TraceClient", ")", "\n\n", "if", "p", ".", "ForwardTimeout", ">", "0", "{", "var", "cancel", "func", "(", ")", "\n", "ctx", ",", "cancel", "=", "context", ".", "WithTimeout", "(", "ctx", ",", "p", ".", "ForwardTimeout", ")", "\n", "defer", "cancel", "(", ")", "\n", "}", "\n", "metricCount", ":=", "len", "(", "jsonMetrics", ")", "\n", "span", ".", "Add", "(", "ssf", ".", "RandomlySample", "(", "0.1", ",", "ssf", ".", "Count", "(", "\"", "\"", ",", "float32", "(", "metricCount", ")", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "origin", ",", "\"", "\"", ":", "\"", "\"", ",", "}", ")", ",", ")", "...", ")", "\n\n", "jsonMetricsByDestination", ":=", "make", "(", "map", "[", "string", "]", "[", "]", "samplers", ".", "JSONMetric", ")", "\n", "for", "_", ",", "h", ":=", "range", "p", ".", "ForwardDestinations", ".", "Members", "(", ")", "{", "jsonMetricsByDestination", "[", "h", "]", "=", "make", "(", "[", "]", "samplers", ".", "JSONMetric", ",", "0", ")", "\n", "}", "\n\n", "for", "_", ",", "jm", ":=", "range", "jsonMetrics", "{", "dest", ",", "_", ":=", "p", ".", "ForwardDestinations", ".", "Get", "(", "jm", ".", "MetricKey", ".", "String", "(", ")", ")", "\n", "jsonMetricsByDestination", "[", "dest", "]", "=", "append", "(", "jsonMetricsByDestination", "[", "dest", "]", ",", "jm", ")", "\n", "}", "\n\n", "// nb The response has already been returned at this point, because we", "wg", ":=", "sync", ".", "WaitGroup", "{", "}", "\n", "wg", ".", "Add", "(", "len", "(", "jsonMetricsByDestination", ")", ")", "// Make our waitgroup the size of our destinations", "\n\n", "for", "dest", ",", "batch", ":=", "range", "jsonMetricsByDestination", "{", "go", "p", ".", "doPost", "(", "ctx", ",", "&", "wg", ",", "dest", ",", "batch", ")", "\n", "}", "\n", "wg", ".", "Wait", "(", ")", "// Wait for all the above goroutines to complete", "\n", "log", ".", "WithField", "(", "\"", "\"", ",", "metricCount", ")", ".", "Debug", "(", "\"", "\"", ")", "\n\n", "span", ".", "Add", "(", "ssf", ".", "RandomlySample", "(", "0.1", ",", "ssf", ".", "Timing", "(", "\"", "\"", ",", "time", ".", "Since", "(", "span", ".", "Start", ")", ",", "time", ".", "Nanosecond", ",", "nil", ")", ",", "ssf", ".", "Count", "(", "\"", "\"", ",", "float32", "(", "len", "(", "jsonMetrics", ")", ")", ",", "nil", ")", ",", ")", "...", ")", "\n", "}" ]
// ProxyMetrics takes a slice of JSONMetrics and breaks them up into // multiple HTTP requests by MetricKey using the hash ring.
[ "ProxyMetrics", "takes", "a", "slice", "of", "JSONMetrics", "and", "breaks", "them", "up", "into", "multiple", "HTTP", "requests", "by", "MetricKey", "using", "the", "hash", "ring", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/proxy.go#L580-L621
13,412
stripe/veneur
http.go
ImportMetrics
func (s *Server) ImportMetrics(ctx context.Context, jsonMetrics []samplers.JSONMetric) { span, _ := trace.StartSpanFromContext(ctx, "veneur.opentracing.import.import_metrics") defer span.Finish() // we have a slice of json metrics that we need to divide up across the workers // we don't want to push one metric at a time (too much channel contention // and goroutine switching) and we also don't want to allocate a temp // slice for each worker (which we'll have to append to, therefore lots // of allocations) // instead, we'll compute the fnv hash of every metric in the array, // and sort the array by the hashes sortedIter := newJSONMetricsByWorker(jsonMetrics, len(s.Workers)) for sortedIter.Next() { nextChunk, workerIndex := sortedIter.Chunk() s.Workers[workerIndex].ImportChan <- nextChunk } metrics.ReportOne(s.TraceClient, ssf.Timing("import.response_duration_ns", time.Since(span.Start), time.Nanosecond, map[string]string{"part": "merge"})) }
go
func (s *Server) ImportMetrics(ctx context.Context, jsonMetrics []samplers.JSONMetric) { span, _ := trace.StartSpanFromContext(ctx, "veneur.opentracing.import.import_metrics") defer span.Finish() // we have a slice of json metrics that we need to divide up across the workers // we don't want to push one metric at a time (too much channel contention // and goroutine switching) and we also don't want to allocate a temp // slice for each worker (which we'll have to append to, therefore lots // of allocations) // instead, we'll compute the fnv hash of every metric in the array, // and sort the array by the hashes sortedIter := newJSONMetricsByWorker(jsonMetrics, len(s.Workers)) for sortedIter.Next() { nextChunk, workerIndex := sortedIter.Chunk() s.Workers[workerIndex].ImportChan <- nextChunk } metrics.ReportOne(s.TraceClient, ssf.Timing("import.response_duration_ns", time.Since(span.Start), time.Nanosecond, map[string]string{"part": "merge"})) }
[ "func", "(", "s", "*", "Server", ")", "ImportMetrics", "(", "ctx", "context", ".", "Context", ",", "jsonMetrics", "[", "]", "samplers", ".", "JSONMetric", ")", "{", "span", ",", "_", ":=", "trace", ".", "StartSpanFromContext", "(", "ctx", ",", "\"", "\"", ")", "\n", "defer", "span", ".", "Finish", "(", ")", "\n\n", "// we have a slice of json metrics that we need to divide up across the workers", "// we don't want to push one metric at a time (too much channel contention", "// and goroutine switching) and we also don't want to allocate a temp", "// slice for each worker (which we'll have to append to, therefore lots", "// of allocations)", "// instead, we'll compute the fnv hash of every metric in the array,", "// and sort the array by the hashes", "sortedIter", ":=", "newJSONMetricsByWorker", "(", "jsonMetrics", ",", "len", "(", "s", ".", "Workers", ")", ")", "\n", "for", "sortedIter", ".", "Next", "(", ")", "{", "nextChunk", ",", "workerIndex", ":=", "sortedIter", ".", "Chunk", "(", ")", "\n", "s", ".", "Workers", "[", "workerIndex", "]", ".", "ImportChan", "<-", "nextChunk", "\n", "}", "\n", "metrics", ".", "ReportOne", "(", "s", ".", "TraceClient", ",", "ssf", ".", "Timing", "(", "\"", "\"", ",", "time", ".", "Since", "(", "span", ".", "Start", ")", ",", "time", ".", "Nanosecond", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", "}", ")", ")", "\n", "}" ]
// ImportMetrics feeds a slice of json metrics to the server's workers
[ "ImportMetrics", "feeds", "a", "slice", "of", "json", "metrics", "to", "the", "server", "s", "workers" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/http.go#L54-L71
13,413
stripe/veneur
http.go
newJSONMetricsByWorker
func newJSONMetricsByWorker(metrics []samplers.JSONMetric, numWorkers int) *jsonMetricsByWorker { ret := &jsonMetricsByWorker{ sjm: newSortableJSONMetrics(metrics, numWorkers), } sort.Sort(ret.sjm) return ret }
go
func newJSONMetricsByWorker(metrics []samplers.JSONMetric, numWorkers int) *jsonMetricsByWorker { ret := &jsonMetricsByWorker{ sjm: newSortableJSONMetrics(metrics, numWorkers), } sort.Sort(ret.sjm) return ret }
[ "func", "newJSONMetricsByWorker", "(", "metrics", "[", "]", "samplers", ".", "JSONMetric", ",", "numWorkers", "int", ")", "*", "jsonMetricsByWorker", "{", "ret", ":=", "&", "jsonMetricsByWorker", "{", "sjm", ":", "newSortableJSONMetrics", "(", "metrics", ",", "numWorkers", ")", ",", "}", "\n", "sort", ".", "Sort", "(", "ret", ".", "sjm", ")", "\n", "return", "ret", "\n", "}" ]
// iterate over a sorted set of jsonmetrics, returning them in contiguous // nonempty chunks such that each chunk corresponds to a single worker.
[ "iterate", "over", "a", "sorted", "set", "of", "jsonmetrics", "returning", "them", "in", "contiguous", "nonempty", "chunks", "such", "that", "each", "chunk", "corresponds", "to", "a", "single", "worker", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/http.go#L115-L121
13,414
stripe/veneur
sinks/splunk/splunk.go
submitter
func (sss *splunkSpanSink) submitter(sync chan struct{}, ready chan struct{}) { ctx := context.Background() for { exit := sss.submitBatch(ctx, sync, ready) if exit { return } } }
go
func (sss *splunkSpanSink) submitter(sync chan struct{}, ready chan struct{}) { ctx := context.Background() for { exit := sss.submitBatch(ctx, sync, ready) if exit { return } } }
[ "func", "(", "sss", "*", "splunkSpanSink", ")", "submitter", "(", "sync", "chan", "struct", "{", "}", ",", "ready", "chan", "struct", "{", "}", ")", "{", "ctx", ":=", "context", ".", "Background", "(", ")", "\n", "for", "{", "exit", ":=", "sss", ".", "submitBatch", "(", "ctx", ",", "sync", ",", "ready", ")", "\n", "if", "exit", "{", "return", "\n", "}", "\n", "}", "\n", "}" ]
// submitter runs for the lifetime of the sink and performs batch-wise // submission to the HEC sink.
[ "submitter", "runs", "for", "the", "lifetime", "of", "the", "sink", "and", "performs", "batch", "-", "wise", "submission", "to", "the", "HEC", "sink", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/splunk/splunk.go#L182-L190
13,415
stripe/veneur
sinks/splunk/splunk.go
setupHTTPRequest
func (sss *splunkSpanSink) setupHTTPRequest(ctx context.Context) (context.CancelFunc, *hecRequest, io.Writer, error) { ctx, cancel := context.WithCancel(ctx) hecReq := sss.hec.newRequest() req, w, err := hecReq.Start(ctx) if err != nil { cancel() return nil, nil, nil, err } // At this point, we have a workable HTTP connection; // open it in the background: go sss.makeHTTPRequest(req, cancel) return cancel, hecReq, w, nil }
go
func (sss *splunkSpanSink) setupHTTPRequest(ctx context.Context) (context.CancelFunc, *hecRequest, io.Writer, error) { ctx, cancel := context.WithCancel(ctx) hecReq := sss.hec.newRequest() req, w, err := hecReq.Start(ctx) if err != nil { cancel() return nil, nil, nil, err } // At this point, we have a workable HTTP connection; // open it in the background: go sss.makeHTTPRequest(req, cancel) return cancel, hecReq, w, nil }
[ "func", "(", "sss", "*", "splunkSpanSink", ")", "setupHTTPRequest", "(", "ctx", "context", ".", "Context", ")", "(", "context", ".", "CancelFunc", ",", "*", "hecRequest", ",", "io", ".", "Writer", ",", "error", ")", "{", "ctx", ",", "cancel", ":=", "context", ".", "WithCancel", "(", "ctx", ")", "\n", "hecReq", ":=", "sss", ".", "hec", ".", "newRequest", "(", ")", "\n", "req", ",", "w", ",", "err", ":=", "hecReq", ".", "Start", "(", "ctx", ")", "\n", "if", "err", "!=", "nil", "{", "cancel", "(", ")", "\n", "return", "nil", ",", "nil", ",", "nil", ",", "err", "\n", "}", "\n\n", "// At this point, we have a workable HTTP connection;", "// open it in the background:", "go", "sss", ".", "makeHTTPRequest", "(", "req", ",", "cancel", ")", "\n", "return", "cancel", ",", "hecReq", ",", "w", ",", "nil", "\n", "}" ]
// setupHTTPRequest sets up and kicks off an HTTP request. It returns // the elements of it that are necessary in sending a single batch to // the HEC.
[ "setupHTTPRequest", "sets", "up", "and", "kicks", "off", "an", "HTTP", "request", ".", "It", "returns", "the", "elements", "of", "it", "that", "are", "necessary", "in", "sending", "a", "single", "batch", "to", "the", "HEC", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/splunk/splunk.go#L206-L219
13,416
stripe/veneur
sinks/splunk/splunk.go
submitOneEvent
func (sss *splunkSpanSink) submitOneEvent(ctx context.Context, w io.Writer, ev *Event) error { if sss.sendTimeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, sss.sendTimeout) defer cancel() } encodeErrors := make(chan error) enc := json.NewEncoder(w) go func() { err := enc.Encode(ev) select { case encodeErrors <- err: case <-ctx.Done(): } }() select { case <-ctx.Done(): return ctx.Err() case err := <-encodeErrors: return err } }
go
func (sss *splunkSpanSink) submitOneEvent(ctx context.Context, w io.Writer, ev *Event) error { if sss.sendTimeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, sss.sendTimeout) defer cancel() } encodeErrors := make(chan error) enc := json.NewEncoder(w) go func() { err := enc.Encode(ev) select { case encodeErrors <- err: case <-ctx.Done(): } }() select { case <-ctx.Done(): return ctx.Err() case err := <-encodeErrors: return err } }
[ "func", "(", "sss", "*", "splunkSpanSink", ")", "submitOneEvent", "(", "ctx", "context", ".", "Context", ",", "w", "io", ".", "Writer", ",", "ev", "*", "Event", ")", "error", "{", "if", "sss", ".", "sendTimeout", ">", "0", "{", "var", "cancel", "context", ".", "CancelFunc", "\n", "ctx", ",", "cancel", "=", "context", ".", "WithTimeout", "(", "ctx", ",", "sss", ".", "sendTimeout", ")", "\n", "defer", "cancel", "(", ")", "\n", "}", "\n", "encodeErrors", ":=", "make", "(", "chan", "error", ")", "\n", "enc", ":=", "json", ".", "NewEncoder", "(", "w", ")", "\n", "go", "func", "(", ")", "{", "err", ":=", "enc", ".", "Encode", "(", "ev", ")", "\n", "select", "{", "case", "encodeErrors", "<-", "err", ":", "case", "<-", "ctx", ".", "Done", "(", ")", ":", "}", "\n", "}", "(", ")", "\n", "select", "{", "case", "<-", "ctx", ".", "Done", "(", ")", ":", "return", "ctx", ".", "Err", "(", ")", "\n", "case", "err", ":=", "<-", "encodeErrors", ":", "return", "err", "\n", "}", "\n", "}" ]
// submitOneEvent takes one event and submits it to an HEC HTTP // connection. It observes the configured splunk_hec_ingest_timeout - // if the timeout is exceeded, it returns an error. If the timeout is // 0, it waits forever to submit the event.
[ "submitOneEvent", "takes", "one", "event", "and", "submits", "it", "to", "an", "HEC", "HTTP", "connection", ".", "It", "observes", "the", "configured", "splunk_hec_ingest_timeout", "-", "if", "the", "timeout", "is", "exceeded", "it", "returns", "an", "error", ".", "If", "the", "timeout", "is", "0", "it", "waits", "forever", "to", "submit", "the", "event", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/splunk/splunk.go#L292-L313
13,417
stripe/veneur
sinks/splunk/splunk.go
Flush
func (sss *splunkSpanSink) Flush() { // report the sink stats: samples := &ssf.Samples{} samples.Add( ssf.Count( sinks.MetricKeyTotalSpansFlushed, float32(atomic.SwapUint32(&sss.ingestedSpans, 0)), map[string]string{"sink": sss.Name()}), ssf.Count( sinks.MetricKeyTotalSpansDropped, float32(atomic.SwapUint32(&sss.droppedSpans, 0)), map[string]string{"sink": sss.Name()}, ), ssf.Count( sinks.MetricKeyTotalSpansSkipped, float32(atomic.SwapUint32(&sss.skippedSpans, 0)), map[string]string{"sink": sss.Name()}, ), ) metrics.Report(sss.traceClient, samples) }
go
func (sss *splunkSpanSink) Flush() { // report the sink stats: samples := &ssf.Samples{} samples.Add( ssf.Count( sinks.MetricKeyTotalSpansFlushed, float32(atomic.SwapUint32(&sss.ingestedSpans, 0)), map[string]string{"sink": sss.Name()}), ssf.Count( sinks.MetricKeyTotalSpansDropped, float32(atomic.SwapUint32(&sss.droppedSpans, 0)), map[string]string{"sink": sss.Name()}, ), ssf.Count( sinks.MetricKeyTotalSpansSkipped, float32(atomic.SwapUint32(&sss.skippedSpans, 0)), map[string]string{"sink": sss.Name()}, ), ) metrics.Report(sss.traceClient, samples) }
[ "func", "(", "sss", "*", "splunkSpanSink", ")", "Flush", "(", ")", "{", "// report the sink stats:", "samples", ":=", "&", "ssf", ".", "Samples", "{", "}", "\n", "samples", ".", "Add", "(", "ssf", ".", "Count", "(", "sinks", ".", "MetricKeyTotalSpansFlushed", ",", "float32", "(", "atomic", ".", "SwapUint32", "(", "&", "sss", ".", "ingestedSpans", ",", "0", ")", ")", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "sss", ".", "Name", "(", ")", "}", ")", ",", "ssf", ".", "Count", "(", "sinks", ".", "MetricKeyTotalSpansDropped", ",", "float32", "(", "atomic", ".", "SwapUint32", "(", "&", "sss", ".", "droppedSpans", ",", "0", ")", ")", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "sss", ".", "Name", "(", ")", "}", ",", ")", ",", "ssf", ".", "Count", "(", "sinks", ".", "MetricKeyTotalSpansSkipped", ",", "float32", "(", "atomic", ".", "SwapUint32", "(", "&", "sss", ".", "skippedSpans", ",", "0", ")", ")", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "sss", ".", "Name", "(", ")", "}", ",", ")", ",", ")", "\n\n", "metrics", ".", "Report", "(", "sss", ".", "traceClient", ",", "samples", ")", "\n", "}" ]
// Flush takes the batched-up events and sends them to the HEC // endpoint for ingestion. If set, it uses the send timeout configured // for the span batch.
[ "Flush", "takes", "the", "batched", "-", "up", "events", "and", "sends", "them", "to", "the", "HEC", "endpoint", "for", "ingestion", ".", "If", "set", "it", "uses", "the", "send", "timeout", "configured", "for", "the", "span", "batch", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/splunk/splunk.go#L407-L428
13,418
stripe/veneur
sinks/lightstep/lightstep.go
NewLightStepSpanSink
func NewLightStepSpanSink(collector string, reconnectPeriod string, maximumSpans int, numClients int, accessToken string, log *logrus.Logger) (*LightStepSpanSink, error) { var host *url.URL host, err := url.Parse(collector) if err != nil { log.WithError(err).WithField( "host", collector, ).Error("Error parsing LightStep collector URL") return &LightStepSpanSink{}, err } port, err := strconv.Atoi(host.Port()) if err != nil { log.WithError(err).WithFields(logrus.Fields{ "port": port, "default_port": lightstepDefaultPort, }).Warn("Error parsing LightStep port, using default") port = lightstepDefaultPort } reconPeriod := lightstepDefaultInterval if reconnectPeriod != "" { reconPeriod, err = time.ParseDuration(reconnectPeriod) if err != nil { log.WithError(err).WithFields(logrus.Fields{ "interval": reconnectPeriod, "default_interval": lightstepDefaultInterval, }).Warn("Failed to parse reconnect duration, using default.") reconPeriod = lightstepDefaultInterval } } log.WithFields(logrus.Fields{ "Host": host.Hostname(), "Port": port, }).Info("Dialing lightstep host") lightstepMultiplexTracerNum := numClients // If config value is missing, this value should default to one client if lightstepMultiplexTracerNum <= 0 { lightstepMultiplexTracerNum = 1 } tracers := make([]opentracing.Tracer, 0, lightstepMultiplexTracerNum) plaintext := false if host.Scheme == "http" { plaintext = true } for i := 0; i < lightstepMultiplexTracerNum; i++ { tracers = append(tracers, lightstep.NewTracer(lightstep.Options{ AccessToken: accessToken, ReconnectPeriod: reconPeriod, Collector: lightstep.Endpoint{ Host: host.Hostname(), Port: port, Plaintext: plaintext, }, UseGRPC: true, MaxBufferedSpans: maximumSpans, })) } return &LightStepSpanSink{ tracers: tracers, serviceCount: sync.Map{}, mutex: &sync.Mutex{}, log: log, }, nil }
go
func NewLightStepSpanSink(collector string, reconnectPeriod string, maximumSpans int, numClients int, accessToken string, log *logrus.Logger) (*LightStepSpanSink, error) { var host *url.URL host, err := url.Parse(collector) if err != nil { log.WithError(err).WithField( "host", collector, ).Error("Error parsing LightStep collector URL") return &LightStepSpanSink{}, err } port, err := strconv.Atoi(host.Port()) if err != nil { log.WithError(err).WithFields(logrus.Fields{ "port": port, "default_port": lightstepDefaultPort, }).Warn("Error parsing LightStep port, using default") port = lightstepDefaultPort } reconPeriod := lightstepDefaultInterval if reconnectPeriod != "" { reconPeriod, err = time.ParseDuration(reconnectPeriod) if err != nil { log.WithError(err).WithFields(logrus.Fields{ "interval": reconnectPeriod, "default_interval": lightstepDefaultInterval, }).Warn("Failed to parse reconnect duration, using default.") reconPeriod = lightstepDefaultInterval } } log.WithFields(logrus.Fields{ "Host": host.Hostname(), "Port": port, }).Info("Dialing lightstep host") lightstepMultiplexTracerNum := numClients // If config value is missing, this value should default to one client if lightstepMultiplexTracerNum <= 0 { lightstepMultiplexTracerNum = 1 } tracers := make([]opentracing.Tracer, 0, lightstepMultiplexTracerNum) plaintext := false if host.Scheme == "http" { plaintext = true } for i := 0; i < lightstepMultiplexTracerNum; i++ { tracers = append(tracers, lightstep.NewTracer(lightstep.Options{ AccessToken: accessToken, ReconnectPeriod: reconPeriod, Collector: lightstep.Endpoint{ Host: host.Hostname(), Port: port, Plaintext: plaintext, }, UseGRPC: true, MaxBufferedSpans: maximumSpans, })) } return &LightStepSpanSink{ tracers: tracers, serviceCount: sync.Map{}, mutex: &sync.Mutex{}, log: log, }, nil }
[ "func", "NewLightStepSpanSink", "(", "collector", "string", ",", "reconnectPeriod", "string", ",", "maximumSpans", "int", ",", "numClients", "int", ",", "accessToken", "string", ",", "log", "*", "logrus", ".", "Logger", ")", "(", "*", "LightStepSpanSink", ",", "error", ")", "{", "var", "host", "*", "url", ".", "URL", "\n", "host", ",", "err", ":=", "url", ".", "Parse", "(", "collector", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "WithField", "(", "\"", "\"", ",", "collector", ",", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "return", "&", "LightStepSpanSink", "{", "}", ",", "err", "\n", "}", "\n\n", "port", ",", "err", ":=", "strconv", ".", "Atoi", "(", "host", ".", "Port", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "port", ",", "\"", "\"", ":", "lightstepDefaultPort", ",", "}", ")", ".", "Warn", "(", "\"", "\"", ")", "\n", "port", "=", "lightstepDefaultPort", "\n", "}", "\n\n", "reconPeriod", ":=", "lightstepDefaultInterval", "\n", "if", "reconnectPeriod", "!=", "\"", "\"", "{", "reconPeriod", ",", "err", "=", "time", ".", "ParseDuration", "(", "reconnectPeriod", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "reconnectPeriod", ",", "\"", "\"", ":", "lightstepDefaultInterval", ",", "}", ")", ".", "Warn", "(", "\"", "\"", ")", "\n", "reconPeriod", "=", "lightstepDefaultInterval", "\n", "}", "\n", "}", "\n\n", "log", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "host", ".", "Hostname", "(", ")", ",", "\"", "\"", ":", "port", ",", "}", ")", ".", "Info", "(", "\"", "\"", ")", "\n\n", "lightstepMultiplexTracerNum", ":=", "numClients", "\n", "// If config value is missing, this value should default to one client", "if", "lightstepMultiplexTracerNum", "<=", "0", "{", "lightstepMultiplexTracerNum", "=", "1", "\n", "}", "\n\n", "tracers", ":=", "make", "(", "[", "]", "opentracing", ".", "Tracer", ",", "0", ",", "lightstepMultiplexTracerNum", ")", "\n\n", "plaintext", ":=", "false", "\n", "if", "host", ".", "Scheme", "==", "\"", "\"", "{", "plaintext", "=", "true", "\n", "}", "\n\n", "for", "i", ":=", "0", ";", "i", "<", "lightstepMultiplexTracerNum", ";", "i", "++", "{", "tracers", "=", "append", "(", "tracers", ",", "lightstep", ".", "NewTracer", "(", "lightstep", ".", "Options", "{", "AccessToken", ":", "accessToken", ",", "ReconnectPeriod", ":", "reconPeriod", ",", "Collector", ":", "lightstep", ".", "Endpoint", "{", "Host", ":", "host", ".", "Hostname", "(", ")", ",", "Port", ":", "port", ",", "Plaintext", ":", "plaintext", ",", "}", ",", "UseGRPC", ":", "true", ",", "MaxBufferedSpans", ":", "maximumSpans", ",", "}", ")", ")", "\n", "}", "\n\n", "return", "&", "LightStepSpanSink", "{", "tracers", ":", "tracers", ",", "serviceCount", ":", "sync", ".", "Map", "{", "}", ",", "mutex", ":", "&", "sync", ".", "Mutex", "{", "}", ",", "log", ":", "log", ",", "}", ",", "nil", "\n", "}" ]
// NewLightStepSpanSink creates a new instance of a LightStepSpanSink.
[ "NewLightStepSpanSink", "creates", "a", "new", "instance", "of", "a", "LightStepSpanSink", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/lightstep/lightstep.go#L42-L111
13,419
stripe/veneur
sinks/lightstep/lightstep.go
Ingest
func (ls *LightStepSpanSink) Ingest(ssfSpan *ssf.SSFSpan) error { if err := protocol.ValidateTrace(ssfSpan); err != nil { return err } parentID := ssfSpan.ParentId if parentID <= 0 { parentID = 0 } var errorCode int64 if ssfSpan.Error { errorCode = 1 } timestamp := time.Unix(ssfSpan.StartTimestamp/1e9, ssfSpan.StartTimestamp%1e9) if len(ls.tracers) == 0 { err := fmt.Errorf("No lightstep tracer clients initialized") ls.log.Error(err) return err } // pick the tracer to use tracerIndex := ssfSpan.TraceId % int64(len(ls.tracers)) tracer := ls.tracers[tracerIndex] sp := tracer.StartSpan( ssfSpan.Name, opentracing.StartTime(timestamp), lightstep.SetTraceID(uint64(ssfSpan.TraceId)), lightstep.SetSpanID(uint64(ssfSpan.Id)), lightstep.SetParentSpanID(uint64(parentID))) sp.SetTag(trace.ResourceKey, ssfSpan.Tags[trace.ResourceKey]) // TODO Why is this here? sp.SetTag(lightstep.ComponentNameKey, ssfSpan.Service) sp.SetTag(indicatorSpanTagName, strconv.FormatBool(ssfSpan.Indicator)) // TODO don't hardcode sp.SetTag("type", "http") sp.SetTag("error-code", errorCode) for k, v := range ssfSpan.Tags { sp.SetTag(k, v) } // TODO add metrics as tags to the span as well? if errorCode > 0 { // Note: this sets the OT-standard "error" tag, which // LightStep uses to flag error spans. ext.Error.Set(sp, true) } endTime := time.Unix(ssfSpan.EndTimestamp/1e9, ssfSpan.EndTimestamp%1e9) finishOpts := opentracing.FinishOptions{FinishTime: endTime} sp.FinishWithOptions(finishOpts) service := ssfSpan.Service if service == "" { service = "unknown" } count, ok := ls.serviceCount.Load(service) if !ok { // ensure the value is in the map // we only do this if the value was not found in the map once already, to save an // allocation and more expensive operation in the typical case var c int64 = 0 count, _ = ls.serviceCount.LoadOrStore(service, &c) } c, ok := count.(*int64) if !ok { ls.log.WithField("type", reflect.TypeOf(count)).Debug(unexpectedCountTypeErr.Error()) return unexpectedCountTypeErr } atomic.AddInt64(c, 1) return nil }
go
func (ls *LightStepSpanSink) Ingest(ssfSpan *ssf.SSFSpan) error { if err := protocol.ValidateTrace(ssfSpan); err != nil { return err } parentID := ssfSpan.ParentId if parentID <= 0 { parentID = 0 } var errorCode int64 if ssfSpan.Error { errorCode = 1 } timestamp := time.Unix(ssfSpan.StartTimestamp/1e9, ssfSpan.StartTimestamp%1e9) if len(ls.tracers) == 0 { err := fmt.Errorf("No lightstep tracer clients initialized") ls.log.Error(err) return err } // pick the tracer to use tracerIndex := ssfSpan.TraceId % int64(len(ls.tracers)) tracer := ls.tracers[tracerIndex] sp := tracer.StartSpan( ssfSpan.Name, opentracing.StartTime(timestamp), lightstep.SetTraceID(uint64(ssfSpan.TraceId)), lightstep.SetSpanID(uint64(ssfSpan.Id)), lightstep.SetParentSpanID(uint64(parentID))) sp.SetTag(trace.ResourceKey, ssfSpan.Tags[trace.ResourceKey]) // TODO Why is this here? sp.SetTag(lightstep.ComponentNameKey, ssfSpan.Service) sp.SetTag(indicatorSpanTagName, strconv.FormatBool(ssfSpan.Indicator)) // TODO don't hardcode sp.SetTag("type", "http") sp.SetTag("error-code", errorCode) for k, v := range ssfSpan.Tags { sp.SetTag(k, v) } // TODO add metrics as tags to the span as well? if errorCode > 0 { // Note: this sets the OT-standard "error" tag, which // LightStep uses to flag error spans. ext.Error.Set(sp, true) } endTime := time.Unix(ssfSpan.EndTimestamp/1e9, ssfSpan.EndTimestamp%1e9) finishOpts := opentracing.FinishOptions{FinishTime: endTime} sp.FinishWithOptions(finishOpts) service := ssfSpan.Service if service == "" { service = "unknown" } count, ok := ls.serviceCount.Load(service) if !ok { // ensure the value is in the map // we only do this if the value was not found in the map once already, to save an // allocation and more expensive operation in the typical case var c int64 = 0 count, _ = ls.serviceCount.LoadOrStore(service, &c) } c, ok := count.(*int64) if !ok { ls.log.WithField("type", reflect.TypeOf(count)).Debug(unexpectedCountTypeErr.Error()) return unexpectedCountTypeErr } atomic.AddInt64(c, 1) return nil }
[ "func", "(", "ls", "*", "LightStepSpanSink", ")", "Ingest", "(", "ssfSpan", "*", "ssf", ".", "SSFSpan", ")", "error", "{", "if", "err", ":=", "protocol", ".", "ValidateTrace", "(", "ssfSpan", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n\n", "parentID", ":=", "ssfSpan", ".", "ParentId", "\n", "if", "parentID", "<=", "0", "{", "parentID", "=", "0", "\n", "}", "\n\n", "var", "errorCode", "int64", "\n", "if", "ssfSpan", ".", "Error", "{", "errorCode", "=", "1", "\n", "}", "\n\n", "timestamp", ":=", "time", ".", "Unix", "(", "ssfSpan", ".", "StartTimestamp", "/", "1e9", ",", "ssfSpan", ".", "StartTimestamp", "%", "1e9", ")", "\n\n", "if", "len", "(", "ls", ".", "tracers", ")", "==", "0", "{", "err", ":=", "fmt", ".", "Errorf", "(", "\"", "\"", ")", "\n", "ls", ".", "log", ".", "Error", "(", "err", ")", "\n", "return", "err", "\n", "}", "\n", "// pick the tracer to use", "tracerIndex", ":=", "ssfSpan", ".", "TraceId", "%", "int64", "(", "len", "(", "ls", ".", "tracers", ")", ")", "\n", "tracer", ":=", "ls", ".", "tracers", "[", "tracerIndex", "]", "\n\n", "sp", ":=", "tracer", ".", "StartSpan", "(", "ssfSpan", ".", "Name", ",", "opentracing", ".", "StartTime", "(", "timestamp", ")", ",", "lightstep", ".", "SetTraceID", "(", "uint64", "(", "ssfSpan", ".", "TraceId", ")", ")", ",", "lightstep", ".", "SetSpanID", "(", "uint64", "(", "ssfSpan", ".", "Id", ")", ")", ",", "lightstep", ".", "SetParentSpanID", "(", "uint64", "(", "parentID", ")", ")", ")", "\n\n", "sp", ".", "SetTag", "(", "trace", ".", "ResourceKey", ",", "ssfSpan", ".", "Tags", "[", "trace", ".", "ResourceKey", "]", ")", "// TODO Why is this here?", "\n", "sp", ".", "SetTag", "(", "lightstep", ".", "ComponentNameKey", ",", "ssfSpan", ".", "Service", ")", "\n", "sp", ".", "SetTag", "(", "indicatorSpanTagName", ",", "strconv", ".", "FormatBool", "(", "ssfSpan", ".", "Indicator", ")", ")", "\n", "// TODO don't hardcode", "sp", ".", "SetTag", "(", "\"", "\"", ",", "\"", "\"", ")", "\n", "sp", ".", "SetTag", "(", "\"", "\"", ",", "errorCode", ")", "\n", "for", "k", ",", "v", ":=", "range", "ssfSpan", ".", "Tags", "{", "sp", ".", "SetTag", "(", "k", ",", "v", ")", "\n", "}", "\n", "// TODO add metrics as tags to the span as well?", "if", "errorCode", ">", "0", "{", "// Note: this sets the OT-standard \"error\" tag, which", "// LightStep uses to flag error spans.", "ext", ".", "Error", ".", "Set", "(", "sp", ",", "true", ")", "\n", "}", "\n\n", "endTime", ":=", "time", ".", "Unix", "(", "ssfSpan", ".", "EndTimestamp", "/", "1e9", ",", "ssfSpan", ".", "EndTimestamp", "%", "1e9", ")", "\n", "finishOpts", ":=", "opentracing", ".", "FinishOptions", "{", "FinishTime", ":", "endTime", "}", "\n", "sp", ".", "FinishWithOptions", "(", "finishOpts", ")", "\n\n", "service", ":=", "ssfSpan", ".", "Service", "\n", "if", "service", "==", "\"", "\"", "{", "service", "=", "\"", "\"", "\n", "}", "\n\n", "count", ",", "ok", ":=", "ls", ".", "serviceCount", ".", "Load", "(", "service", ")", "\n", "if", "!", "ok", "{", "// ensure the value is in the map", "// we only do this if the value was not found in the map once already, to save an", "// allocation and more expensive operation in the typical case", "var", "c", "int64", "=", "0", "\n", "count", ",", "_", "=", "ls", ".", "serviceCount", ".", "LoadOrStore", "(", "service", ",", "&", "c", ")", "\n", "}", "\n\n", "c", ",", "ok", ":=", "count", ".", "(", "*", "int64", ")", "\n", "if", "!", "ok", "{", "ls", ".", "log", ".", "WithField", "(", "\"", "\"", ",", "reflect", ".", "TypeOf", "(", "count", ")", ")", ".", "Debug", "(", "unexpectedCountTypeErr", ".", "Error", "(", ")", ")", "\n", "return", "unexpectedCountTypeErr", "\n", "}", "\n", "atomic", ".", "AddInt64", "(", "c", ",", "1", ")", "\n", "return", "nil", "\n", "}" ]
// Ingest takes in a span and passed it along to the LS client after // some sanity checks and improvements are made.
[ "Ingest", "takes", "in", "a", "span", "and", "passed", "it", "along", "to", "the", "LS", "client", "after", "some", "sanity", "checks", "and", "improvements", "are", "made", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/lightstep/lightstep.go#L126-L201
13,420
stripe/veneur
sinks/lightstep/lightstep.go
Flush
func (ls *LightStepSpanSink) Flush() { ls.mutex.Lock() defer ls.mutex.Unlock() samples := &ssf.Samples{} defer metrics.Report(ls.traceClient, samples) totalCount := int64(0) ls.serviceCount.Range(func(keyI, valueI interface{}) bool { service, ok := keyI.(string) if !ok { ls.log.WithFields(logrus.Fields{ "key": keyI, "type": reflect.TypeOf(keyI), }).Error("Invalid key type in map when flushing Lightstep client") return true } value, ok := valueI.(*int64) if !ok { ls.log.WithFields(logrus.Fields{ "value": valueI, "type": reflect.TypeOf(valueI), }).Error("Invalid value type in map when flushing Lightstep client") return true } count := atomic.SwapInt64(value, 0) totalCount += count samples.Add(ssf.Count(sinks.MetricKeyTotalSpansFlushed, float32(count), map[string]string{"sink": ls.Name(), "service": service})) return true }) ls.log.WithField("total_spans", totalCount).Debug("Checkpointing flushed spans for Lightstep") }
go
func (ls *LightStepSpanSink) Flush() { ls.mutex.Lock() defer ls.mutex.Unlock() samples := &ssf.Samples{} defer metrics.Report(ls.traceClient, samples) totalCount := int64(0) ls.serviceCount.Range(func(keyI, valueI interface{}) bool { service, ok := keyI.(string) if !ok { ls.log.WithFields(logrus.Fields{ "key": keyI, "type": reflect.TypeOf(keyI), }).Error("Invalid key type in map when flushing Lightstep client") return true } value, ok := valueI.(*int64) if !ok { ls.log.WithFields(logrus.Fields{ "value": valueI, "type": reflect.TypeOf(valueI), }).Error("Invalid value type in map when flushing Lightstep client") return true } count := atomic.SwapInt64(value, 0) totalCount += count samples.Add(ssf.Count(sinks.MetricKeyTotalSpansFlushed, float32(count), map[string]string{"sink": ls.Name(), "service": service})) return true }) ls.log.WithField("total_spans", totalCount).Debug("Checkpointing flushed spans for Lightstep") }
[ "func", "(", "ls", "*", "LightStepSpanSink", ")", "Flush", "(", ")", "{", "ls", ".", "mutex", ".", "Lock", "(", ")", "\n", "defer", "ls", ".", "mutex", ".", "Unlock", "(", ")", "\n\n", "samples", ":=", "&", "ssf", ".", "Samples", "{", "}", "\n", "defer", "metrics", ".", "Report", "(", "ls", ".", "traceClient", ",", "samples", ")", "\n\n", "totalCount", ":=", "int64", "(", "0", ")", "\n\n", "ls", ".", "serviceCount", ".", "Range", "(", "func", "(", "keyI", ",", "valueI", "interface", "{", "}", ")", "bool", "{", "service", ",", "ok", ":=", "keyI", ".", "(", "string", ")", "\n", "if", "!", "ok", "{", "ls", ".", "log", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "keyI", ",", "\"", "\"", ":", "reflect", ".", "TypeOf", "(", "keyI", ")", ",", "}", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "return", "true", "\n", "}", "\n\n", "value", ",", "ok", ":=", "valueI", ".", "(", "*", "int64", ")", "\n", "if", "!", "ok", "{", "ls", ".", "log", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "valueI", ",", "\"", "\"", ":", "reflect", ".", "TypeOf", "(", "valueI", ")", ",", "}", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "return", "true", "\n", "}", "\n\n", "count", ":=", "atomic", ".", "SwapInt64", "(", "value", ",", "0", ")", "\n", "totalCount", "+=", "count", "\n", "samples", ".", "Add", "(", "ssf", ".", "Count", "(", "sinks", ".", "MetricKeyTotalSpansFlushed", ",", "float32", "(", "count", ")", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "ls", ".", "Name", "(", ")", ",", "\"", "\"", ":", "service", "}", ")", ")", "\n\n", "return", "true", "\n", "}", ")", "\n\n", "ls", ".", "log", ".", "WithField", "(", "\"", "\"", ",", "totalCount", ")", ".", "Debug", "(", "\"", "\"", ")", "\n", "}" ]
// Flush doesn't need to do anything to the LS tracer, so we emit metrics // instead.
[ "Flush", "doesn", "t", "need", "to", "do", "anything", "to", "the", "LS", "tracer", "so", "we", "emit", "metrics", "instead", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/lightstep/lightstep.go#L205-L242
13,421
stripe/veneur
consul.go
NewConsul
func NewConsul(config *api.Config) (*Consul, error) { consulClient, err := api.NewClient(config) if err != nil { return nil, err } return &Consul{ ConsulHealth: consulClient.Health(), }, nil }
go
func NewConsul(config *api.Config) (*Consul, error) { consulClient, err := api.NewClient(config) if err != nil { return nil, err } return &Consul{ ConsulHealth: consulClient.Health(), }, nil }
[ "func", "NewConsul", "(", "config", "*", "api", ".", "Config", ")", "(", "*", "Consul", ",", "error", ")", "{", "consulClient", ",", "err", ":=", "api", ".", "NewClient", "(", "config", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n\n", "return", "&", "Consul", "{", "ConsulHealth", ":", "consulClient", ".", "Health", "(", ")", ",", "}", ",", "nil", "\n", "}" ]
// NewConsul creates a new instance of a Consul Discoverer
[ "NewConsul", "creates", "a", "new", "instance", "of", "a", "Consul", "Discoverer" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/consul.go#L17-L26
13,422
stripe/veneur
sinks/ssfmetrics/metrics.go
sendMetrics
func (m *metricExtractionSink) sendMetrics(metrics []samplers.UDPMetric) { for _, metric := range metrics { m.workers[metric.Digest%uint32(len(m.workers))].IngestUDP(metric) } }
go
func (m *metricExtractionSink) sendMetrics(metrics []samplers.UDPMetric) { for _, metric := range metrics { m.workers[metric.Digest%uint32(len(m.workers))].IngestUDP(metric) } }
[ "func", "(", "m", "*", "metricExtractionSink", ")", "sendMetrics", "(", "metrics", "[", "]", "samplers", ".", "UDPMetric", ")", "{", "for", "_", ",", "metric", ":=", "range", "metrics", "{", "m", ".", "workers", "[", "metric", ".", "Digest", "%", "uint32", "(", "len", "(", "m", ".", "workers", ")", ")", "]", ".", "IngestUDP", "(", "metric", ")", "\n", "}", "\n", "}" ]
// sendMetrics enqueues the metrics into the worker channels
[ "sendMetrics", "enqueues", "the", "metrics", "into", "the", "worker", "channels" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/ssfmetrics/metrics.go#L65-L69
13,423
stripe/veneur
sinks/ssfmetrics/metrics.go
Ingest
func (m *metricExtractionSink) Ingest(span *ssf.SSFSpan) error { var metricsCount int defer func() { atomic.AddInt64(&m.metricsGenerated, int64(metricsCount)) atomic.AddInt64(&m.spansProcessed, 1) }() metrics, err := samplers.ConvertMetrics(span) if err != nil { if _, ok := err.(samplers.InvalidMetrics); ok { m.log.WithError(err). Warn("Could not parse metrics from SSF Message") m.SendSample(ssf.Count("ssf.error_total", 1, map[string]string{ "packet_type": "ssf_metric", "step": "extract_metrics", "reason": "invalid_metrics", })) } else { m.log.WithError(err).Error("Unexpected error extracting metrics from SSF Message") m.SendSample(ssf.Count("ssf.error_total", 1, map[string]string{ "packet_type": "ssf_metric", "step": "extract_metrics", "reason": "unexpected_error", "error": err.Error(), })) return err } } metricsCount += len(metrics) m.sendMetrics(metrics) if err := protocol.ValidateTrace(span); err != nil { return err } // If we made it here, we are dealing with a fully-fledged // trace span, not just a mere carrier for Samples: indicatorMetrics, err := samplers.ConvertIndicatorMetrics(span, m.indicatorSpanTimerName, m.objectiveSpanTimerName) if err != nil { m.log.WithError(err). WithField("span_name", span.Name). Warn("Couldn't extract indicator metrics for span") return err } metricsCount += len(indicatorMetrics) spanMetrics, err := samplers.ConvertSpanUniquenessMetrics(span, 0.01) if err != nil { m.log.WithError(err). WithField("span_name", span.Name). Warn("Couldn't extract uniqueness metrics for span") return err } metricsCount += len(spanMetrics) m.sendMetrics(append(indicatorMetrics, spanMetrics...)) return nil }
go
func (m *metricExtractionSink) Ingest(span *ssf.SSFSpan) error { var metricsCount int defer func() { atomic.AddInt64(&m.metricsGenerated, int64(metricsCount)) atomic.AddInt64(&m.spansProcessed, 1) }() metrics, err := samplers.ConvertMetrics(span) if err != nil { if _, ok := err.(samplers.InvalidMetrics); ok { m.log.WithError(err). Warn("Could not parse metrics from SSF Message") m.SendSample(ssf.Count("ssf.error_total", 1, map[string]string{ "packet_type": "ssf_metric", "step": "extract_metrics", "reason": "invalid_metrics", })) } else { m.log.WithError(err).Error("Unexpected error extracting metrics from SSF Message") m.SendSample(ssf.Count("ssf.error_total", 1, map[string]string{ "packet_type": "ssf_metric", "step": "extract_metrics", "reason": "unexpected_error", "error": err.Error(), })) return err } } metricsCount += len(metrics) m.sendMetrics(metrics) if err := protocol.ValidateTrace(span); err != nil { return err } // If we made it here, we are dealing with a fully-fledged // trace span, not just a mere carrier for Samples: indicatorMetrics, err := samplers.ConvertIndicatorMetrics(span, m.indicatorSpanTimerName, m.objectiveSpanTimerName) if err != nil { m.log.WithError(err). WithField("span_name", span.Name). Warn("Couldn't extract indicator metrics for span") return err } metricsCount += len(indicatorMetrics) spanMetrics, err := samplers.ConvertSpanUniquenessMetrics(span, 0.01) if err != nil { m.log.WithError(err). WithField("span_name", span.Name). Warn("Couldn't extract uniqueness metrics for span") return err } metricsCount += len(spanMetrics) m.sendMetrics(append(indicatorMetrics, spanMetrics...)) return nil }
[ "func", "(", "m", "*", "metricExtractionSink", ")", "Ingest", "(", "span", "*", "ssf", ".", "SSFSpan", ")", "error", "{", "var", "metricsCount", "int", "\n", "defer", "func", "(", ")", "{", "atomic", ".", "AddInt64", "(", "&", "m", ".", "metricsGenerated", ",", "int64", "(", "metricsCount", ")", ")", "\n", "atomic", ".", "AddInt64", "(", "&", "m", ".", "spansProcessed", ",", "1", ")", "\n", "}", "(", ")", "\n", "metrics", ",", "err", ":=", "samplers", ".", "ConvertMetrics", "(", "span", ")", "\n", "if", "err", "!=", "nil", "{", "if", "_", ",", "ok", ":=", "err", ".", "(", "samplers", ".", "InvalidMetrics", ")", ";", "ok", "{", "m", ".", "log", ".", "WithError", "(", "err", ")", ".", "Warn", "(", "\"", "\"", ")", "\n", "m", ".", "SendSample", "(", "ssf", ".", "Count", "(", "\"", "\"", ",", "1", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "\"", "\"", ",", "}", ")", ")", "\n", "}", "else", "{", "m", ".", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "m", ".", "SendSample", "(", "ssf", ".", "Count", "(", "\"", "\"", ",", "1", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "err", ".", "Error", "(", ")", ",", "}", ")", ")", "\n", "return", "err", "\n", "}", "\n", "}", "\n", "metricsCount", "+=", "len", "(", "metrics", ")", "\n", "m", ".", "sendMetrics", "(", "metrics", ")", "\n\n", "if", "err", ":=", "protocol", ".", "ValidateTrace", "(", "span", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "// If we made it here, we are dealing with a fully-fledged", "// trace span, not just a mere carrier for Samples:", "indicatorMetrics", ",", "err", ":=", "samplers", ".", "ConvertIndicatorMetrics", "(", "span", ",", "m", ".", "indicatorSpanTimerName", ",", "m", ".", "objectiveSpanTimerName", ")", "\n", "if", "err", "!=", "nil", "{", "m", ".", "log", ".", "WithError", "(", "err", ")", ".", "WithField", "(", "\"", "\"", ",", "span", ".", "Name", ")", ".", "Warn", "(", "\"", "\"", ")", "\n", "return", "err", "\n", "}", "\n", "metricsCount", "+=", "len", "(", "indicatorMetrics", ")", "\n\n", "spanMetrics", ",", "err", ":=", "samplers", ".", "ConvertSpanUniquenessMetrics", "(", "span", ",", "0.01", ")", "\n", "if", "err", "!=", "nil", "{", "m", ".", "log", ".", "WithError", "(", "err", ")", ".", "WithField", "(", "\"", "\"", ",", "span", ".", "Name", ")", ".", "Warn", "(", "\"", "\"", ")", "\n", "return", "err", "\n", "}", "\n", "metricsCount", "+=", "len", "(", "spanMetrics", ")", "\n\n", "m", ".", "sendMetrics", "(", "append", "(", "indicatorMetrics", ",", "spanMetrics", "...", ")", ")", "\n", "return", "nil", "\n", "}" ]
// Ingest extracts metrics from an SSF span, and feeds them into the // appropriate metric sinks.
[ "Ingest", "extracts", "metrics", "from", "an", "SSF", "span", "and", "feeds", "them", "into", "the", "appropriate", "metric", "sinks", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/ssfmetrics/metrics.go#L82-L138
13,424
stripe/veneur
socket.go
NewSocket
func NewSocket(addr *net.UDPAddr, recvBuf int, reuseport bool) (net.PacketConn, error) { if reuseport { panic("SO_REUSEPORT not supported on this platform") } serverConn, err := net.ListenUDP("udp", addr) if err != nil { return nil, err } if err := serverConn.SetReadBuffer(recvBuf); err != nil { return nil, err } return serverConn, nil }
go
func NewSocket(addr *net.UDPAddr, recvBuf int, reuseport bool) (net.PacketConn, error) { if reuseport { panic("SO_REUSEPORT not supported on this platform") } serverConn, err := net.ListenUDP("udp", addr) if err != nil { return nil, err } if err := serverConn.SetReadBuffer(recvBuf); err != nil { return nil, err } return serverConn, nil }
[ "func", "NewSocket", "(", "addr", "*", "net", ".", "UDPAddr", ",", "recvBuf", "int", ",", "reuseport", "bool", ")", "(", "net", ".", "PacketConn", ",", "error", ")", "{", "if", "reuseport", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "serverConn", ",", "err", ":=", "net", ".", "ListenUDP", "(", "\"", "\"", ",", "addr", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "if", "err", ":=", "serverConn", ".", "SetReadBuffer", "(", "recvBuf", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "return", "serverConn", ",", "nil", "\n", "}" ]
// NewSocket creates a socket which is intended for use by a single goroutine.
[ "NewSocket", "creates", "a", "socket", "which", "is", "intended", "for", "use", "by", "a", "single", "goroutine", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/socket.go#L10-L22
13,425
stripe/veneur
plugins/localfile/localfile.go
Flush
func (p *Plugin) Flush(ctx context.Context, metrics []samplers.InterMetric) error { f, err := os.OpenFile(p.FilePath, os.O_RDWR|os.O_APPEND|os.O_CREATE, os.ModePerm) defer f.Close() if err != nil { return fmt.Errorf("couldn't open %s for appending: %s", p.FilePath, err) } appendToWriter(f, metrics, p.hostname, p.interval) return nil }
go
func (p *Plugin) Flush(ctx context.Context, metrics []samplers.InterMetric) error { f, err := os.OpenFile(p.FilePath, os.O_RDWR|os.O_APPEND|os.O_CREATE, os.ModePerm) defer f.Close() if err != nil { return fmt.Errorf("couldn't open %s for appending: %s", p.FilePath, err) } appendToWriter(f, metrics, p.hostname, p.interval) return nil }
[ "func", "(", "p", "*", "Plugin", ")", "Flush", "(", "ctx", "context", ".", "Context", ",", "metrics", "[", "]", "samplers", ".", "InterMetric", ")", "error", "{", "f", ",", "err", ":=", "os", ".", "OpenFile", "(", "p", ".", "FilePath", ",", "os", ".", "O_RDWR", "|", "os", ".", "O_APPEND", "|", "os", ".", "O_CREATE", ",", "os", ".", "ModePerm", ")", "\n", "defer", "f", ".", "Close", "(", ")", "\n\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", "Errorf", "(", "\"", "\"", ",", "p", ".", "FilePath", ",", "err", ")", "\n", "}", "\n", "appendToWriter", "(", "f", ",", "metrics", ",", "p", ".", "hostname", ",", "p", ".", "interval", ")", "\n", "return", "nil", "\n", "}" ]
// Flush the metrics from the LocalFilePlugin
[ "Flush", "the", "metrics", "from", "the", "LocalFilePlugin" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/plugins/localfile/localfile.go#L32-L41
13,426
stripe/veneur
protocol/errors.go
IsFramingError
func IsFramingError(err error) bool { switch err.(type) { case *errFrameVersion: return true case *errFramingIO: return true case *errFrameLength: return true } return false }
go
func IsFramingError(err error) bool { switch err.(type) { case *errFrameVersion: return true case *errFramingIO: return true case *errFrameLength: return true } return false }
[ "func", "IsFramingError", "(", "err", "error", ")", "bool", "{", "switch", "err", ".", "(", "type", ")", "{", "case", "*", "errFrameVersion", ":", "return", "true", "\n", "case", "*", "errFramingIO", ":", "return", "true", "\n", "case", "*", "errFrameLength", ":", "return", "true", "\n", "}", "\n", "return", "false", "\n", "}" ]
// IsFramingError returns true if an error is a wire protocol framing // error. This indicates that the stream can no longer be used for // reading SSF data and should be closed.
[ "IsFramingError", "returns", "true", "if", "an", "error", "is", "a", "wire", "protocol", "framing", "error", ".", "This", "indicates", "that", "the", "stream", "can", "no", "longer", "be", "used", "for", "reading", "SSF", "data", "and", "should", "be", "closed", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/protocol/errors.go#L32-L42
13,427
stripe/veneur
ssf/samples.go
Add
func (s *Samples) Add(sample ...*SSFSample) { if s.Batch == nil { s.Batch = []*SSFSample{} } s.Batch = append(s.Batch, sample...) }
go
func (s *Samples) Add(sample ...*SSFSample) { if s.Batch == nil { s.Batch = []*SSFSample{} } s.Batch = append(s.Batch, sample...) }
[ "func", "(", "s", "*", "Samples", ")", "Add", "(", "sample", "...", "*", "SSFSample", ")", "{", "if", "s", ".", "Batch", "==", "nil", "{", "s", ".", "Batch", "=", "[", "]", "*", "SSFSample", "{", "}", "\n", "}", "\n", "s", ".", "Batch", "=", "append", "(", "s", ".", "Batch", ",", "sample", "...", ")", "\n", "}" ]
// Add appends a sample to the batch of samples.
[ "Add", "appends", "a", "sample", "to", "the", "batch", "of", "samples", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/ssf/samples.go#L28-L33
13,428
stripe/veneur
ssf/samples.go
Timestamp
func Timestamp(ts time.Time) SampleOption { return func(s *SSFSample) { s.Timestamp = ts.UnixNano() } }
go
func Timestamp(ts time.Time) SampleOption { return func(s *SSFSample) { s.Timestamp = ts.UnixNano() } }
[ "func", "Timestamp", "(", "ts", "time", ".", "Time", ")", "SampleOption", "{", "return", "func", "(", "s", "*", "SSFSample", ")", "{", "s", ".", "Timestamp", "=", "ts", ".", "UnixNano", "(", ")", "\n", "}", "\n", "}" ]
// Timestamp is a functional option for creating an SSFSample. It sets // the timestamp field on the sample to the timestamp passed.
[ "Timestamp", "is", "a", "functional", "option", "for", "creating", "an", "SSFSample", ".", "It", "sets", "the", "timestamp", "field", "on", "the", "sample", "to", "the", "timestamp", "passed", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/ssf/samples.go#L57-L61
13,429
stripe/veneur
ssf/samples.go
TimeUnit
func TimeUnit(resolution time.Duration) SampleOption { return func(s *SSFSample) { if unit, ok := resolutions[resolution]; ok { s.Unit = unit } } }
go
func TimeUnit(resolution time.Duration) SampleOption { return func(s *SSFSample) { if unit, ok := resolutions[resolution]; ok { s.Unit = unit } } }
[ "func", "TimeUnit", "(", "resolution", "time", ".", "Duration", ")", "SampleOption", "{", "return", "func", "(", "s", "*", "SSFSample", ")", "{", "if", "unit", ",", "ok", ":=", "resolutions", "[", "resolution", "]", ";", "ok", "{", "s", ".", "Unit", "=", "unit", "\n", "}", "\n", "}", "\n", "}" ]
// TimeUnit sets the unit on a sample to the given resolution's SI // unit symbol. Valid resolutions are the time duration constants from // Nanosecond through Hour. The non-SI units "minute" and "hour" are // represented by "min" and "h" respectively. // // If a resolution is passed that does not correspond exactly to the // duration constants in package time, this option does not affect the // sample at all.
[ "TimeUnit", "sets", "the", "unit", "on", "a", "sample", "to", "the", "given", "resolution", "s", "SI", "unit", "symbol", ".", "Valid", "resolutions", "are", "the", "time", "duration", "constants", "from", "Nanosecond", "through", "Hour", ".", "The", "non", "-", "SI", "units", "minute", "and", "hour", "are", "represented", "by", "min", "and", "h", "respectively", ".", "If", "a", "resolution", "is", "passed", "that", "does", "not", "correspond", "exactly", "to", "the", "duration", "constants", "in", "package", "time", "this", "option", "does", "not", "affect", "the", "sample", "at", "all", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/ssf/samples.go#L93-L99
13,430
stripe/veneur
ssf/samples.go
Gauge
func Gauge(name string, value float32, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_GAUGE, Name: name, Value: value, Tags: tags, SampleRate: 1.0, }, opts) }
go
func Gauge(name string, value float32, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_GAUGE, Name: name, Value: value, Tags: tags, SampleRate: 1.0, }, opts) }
[ "func", "Gauge", "(", "name", "string", ",", "value", "float32", ",", "tags", "map", "[", "string", "]", "string", ",", "opts", "...", "SampleOption", ")", "*", "SSFSample", "{", "return", "create", "(", "&", "SSFSample", "{", "Metric", ":", "SSFSample_GAUGE", ",", "Name", ":", "name", ",", "Value", ":", "value", ",", "Tags", ":", "tags", ",", "SampleRate", ":", "1.0", ",", "}", ",", "opts", ")", "\n", "}" ]
// Gauge returns an SSFSample representing a gauge at a certain // value. It's a convenience wrapper around constructing SSFSample // objects.
[ "Gauge", "returns", "an", "SSFSample", "representing", "a", "gauge", "at", "a", "certain", "value", ".", "It", "s", "a", "convenience", "wrapper", "around", "constructing", "SSFSample", "objects", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/ssf/samples.go#L153-L161
13,431
stripe/veneur
ssf/samples.go
Histogram
func Histogram(name string, value float32, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_HISTOGRAM, Name: name, Value: value, Tags: tags, SampleRate: 1.0, }, opts) }
go
func Histogram(name string, value float32, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_HISTOGRAM, Name: name, Value: value, Tags: tags, SampleRate: 1.0, }, opts) }
[ "func", "Histogram", "(", "name", "string", ",", "value", "float32", ",", "tags", "map", "[", "string", "]", "string", ",", "opts", "...", "SampleOption", ")", "*", "SSFSample", "{", "return", "create", "(", "&", "SSFSample", "{", "Metric", ":", "SSFSample_HISTOGRAM", ",", "Name", ":", "name", ",", "Value", ":", "value", ",", "Tags", ":", "tags", ",", "SampleRate", ":", "1.0", ",", "}", ",", "opts", ")", "\n", "}" ]
// Histogram returns an SSFSample representing a value on a histogram, // like a timer or other range. It's a convenience wrapper around // constructing SSFSample objects.
[ "Histogram", "returns", "an", "SSFSample", "representing", "a", "value", "on", "a", "histogram", "like", "a", "timer", "or", "other", "range", ".", "It", "s", "a", "convenience", "wrapper", "around", "constructing", "SSFSample", "objects", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/ssf/samples.go#L166-L174
13,432
stripe/veneur
ssf/samples.go
Set
func Set(name string, value string, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_SET, Name: name, Message: value, Tags: tags, SampleRate: 1.0, }, opts) }
go
func Set(name string, value string, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_SET, Name: name, Message: value, Tags: tags, SampleRate: 1.0, }, opts) }
[ "func", "Set", "(", "name", "string", ",", "value", "string", ",", "tags", "map", "[", "string", "]", "string", ",", "opts", "...", "SampleOption", ")", "*", "SSFSample", "{", "return", "create", "(", "&", "SSFSample", "{", "Metric", ":", "SSFSample_SET", ",", "Name", ":", "name", ",", "Message", ":", "value", ",", "Tags", ":", "tags", ",", "SampleRate", ":", "1.0", ",", "}", ",", "opts", ")", "\n", "}" ]
// Set returns an SSFSample representing a value on a set, useful for // counting the unique values that occur in a certain time bound.
[ "Set", "returns", "an", "SSFSample", "representing", "a", "value", "on", "a", "set", "useful", "for", "counting", "the", "unique", "values", "that", "occur", "in", "a", "certain", "time", "bound", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/ssf/samples.go#L178-L186
13,433
stripe/veneur
ssf/samples.go
Status
func Status(name string, state SSFSample_Status, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_STATUS, Name: name, Status: state, Tags: tags, SampleRate: 1.0, }, opts) }
go
func Status(name string, state SSFSample_Status, tags map[string]string, opts ...SampleOption) *SSFSample { return create(&SSFSample{ Metric: SSFSample_STATUS, Name: name, Status: state, Tags: tags, SampleRate: 1.0, }, opts) }
[ "func", "Status", "(", "name", "string", ",", "state", "SSFSample_Status", ",", "tags", "map", "[", "string", "]", "string", ",", "opts", "...", "SampleOption", ")", "*", "SSFSample", "{", "return", "create", "(", "&", "SSFSample", "{", "Metric", ":", "SSFSample_STATUS", ",", "Name", ":", "name", ",", "Status", ":", "state", ",", "Tags", ":", "tags", ",", "SampleRate", ":", "1.0", ",", "}", ",", "opts", ")", "\n", "}" ]
// Status returns an SSFSample capturing the reported state // of a service
[ "Status", "returns", "an", "SSFSample", "capturing", "the", "reported", "state", "of", "a", "service" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/ssf/samples.go#L197-L205
13,434
stripe/veneur
sinks/sinks.go
IsAcceptableMetric
func IsAcceptableMetric(metric samplers.InterMetric, sink MetricSink) bool { if metric.Sinks == nil { return true } return metric.Sinks.RouteTo(sink.Name()) }
go
func IsAcceptableMetric(metric samplers.InterMetric, sink MetricSink) bool { if metric.Sinks == nil { return true } return metric.Sinks.RouteTo(sink.Name()) }
[ "func", "IsAcceptableMetric", "(", "metric", "samplers", ".", "InterMetric", ",", "sink", "MetricSink", ")", "bool", "{", "if", "metric", ".", "Sinks", "==", "nil", "{", "return", "true", "\n", "}", "\n", "return", "metric", ".", "Sinks", ".", "RouteTo", "(", "sink", ".", "Name", "(", ")", ")", "\n", "}" ]
// IsAcceptableMetric returns true if a metric is meant to be ingested // by a given sink.
[ "IsAcceptableMetric", "returns", "true", "if", "a", "metric", "is", "meant", "to", "be", "ingested", "by", "a", "given", "sink", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/sinks.go#L51-L56
13,435
stripe/veneur
tdigest/analysis/main.go
runOnce
func runOnce(distribution func() float64, compression float64, samples int, distname string, run int, deviations, centroidErrors, errors, sizes *csv.Writer) { td := tdigest.NewMerging(compression, true) allSamples := make([]float64, samples) for i := 0; i < samples; i++ { sample := distribution() td.Add(sample, 1) allSamples[i] = sample } sort.Float64s(allSamples) centroids := td.Centroids() for i, centroid := range centroids { // compute the approximate cdf for this centroid's approximate mean // this is roughly equivalent to the sum of all previous centroids' // weights, plus half this centroid's weight, divided by the total weight // https://github.com/tdunning/t-digest/blob/master/src/test/java/com/tdunning/math/stats/TDigestTest.java#L357 thisCDF := td.CDF(centroid.Mean) // compute the cdf of the centroid's approximate mean, but over the real sample set realCDF := floatCDF(allSamples, centroid.Mean) // find the real sample that matches this centroid's approximate cdf // this should be close to the centroid's real mean realMean := floatQuantile(allSamples, thisCDF) // compute distances to previous and next centroids (ie the range // that this centroid is expected to cover) distanceToPrev := centroid.Mean - td.Min() if i > 0 { distanceToPrev = centroid.Mean - centroids[i-1].Mean } distanceToNext := td.Max() - centroid.Mean if i < len(centroids)-1 { distanceToNext = centroids[i+1].Mean - centroid.Mean } // compute the centroid's real mean using its sample set sampledMean := 0.0 for _, sample := range centroid.Samples { sampledMean += sample // equivalent to deviations.csv from dunning's tests deviations.Write(stringifySlice( distname, run, thisCDF, centroid.Weight, sample, centroid.Mean, distanceToPrev, distanceToNext, // where is this sample, as a proportion of the range covered by its centroid? (sample-centroid.Mean)/(distanceToNext+distanceToPrev), )) } sampledMean /= float64(len(centroid.Samples)) // and compute the CDF corresopnding to this value sampledCDF := floatCDF(allSamples, sampledMean) // this csv is equivalent to errors.csv from dunning's tests, but // instead of testing a fixed range of quantiles, we test every centroid centroidErrors.Write(stringifySlice( distname, run, centroid.Mean, realMean, // this column is equivalent to the quantile section sampledMean, thisCDF, realCDF, // this column is equivalent to the cdf section sampledCDF, centroid.Weight, distanceToPrev, distanceToNext, )) // this csv is equivalent to sizes.csv from dunning's tests sizes.Write(stringifySlice( distname, run, i, thisCDF, centroid.Weight, )) } // now we compute errors for a fixed set of quantiles, as with errors.csv // in dunning's tests // we cover a wider range of quantiles just for the sake of completeness for i := 0; i <= 1000; i++ { quantile := float64(i) / 1000.0 // find the real sample for the target quantile realQuantile := floatQuantile(allSamples, quantile) // find the estimated location of the target quantile estimatedQuantile := td.Quantile(quantile) // find the estimated cdf of the real sample estimatedCDF := td.CDF(realQuantile) errors.Write(stringifySlice( distname, run, quantile, estimatedCDF, // this column is equivalent to the cdf section realQuantile, estimatedQuantile, // this column is equivalent to the quantile section )) } }
go
func runOnce(distribution func() float64, compression float64, samples int, distname string, run int, deviations, centroidErrors, errors, sizes *csv.Writer) { td := tdigest.NewMerging(compression, true) allSamples := make([]float64, samples) for i := 0; i < samples; i++ { sample := distribution() td.Add(sample, 1) allSamples[i] = sample } sort.Float64s(allSamples) centroids := td.Centroids() for i, centroid := range centroids { // compute the approximate cdf for this centroid's approximate mean // this is roughly equivalent to the sum of all previous centroids' // weights, plus half this centroid's weight, divided by the total weight // https://github.com/tdunning/t-digest/blob/master/src/test/java/com/tdunning/math/stats/TDigestTest.java#L357 thisCDF := td.CDF(centroid.Mean) // compute the cdf of the centroid's approximate mean, but over the real sample set realCDF := floatCDF(allSamples, centroid.Mean) // find the real sample that matches this centroid's approximate cdf // this should be close to the centroid's real mean realMean := floatQuantile(allSamples, thisCDF) // compute distances to previous and next centroids (ie the range // that this centroid is expected to cover) distanceToPrev := centroid.Mean - td.Min() if i > 0 { distanceToPrev = centroid.Mean - centroids[i-1].Mean } distanceToNext := td.Max() - centroid.Mean if i < len(centroids)-1 { distanceToNext = centroids[i+1].Mean - centroid.Mean } // compute the centroid's real mean using its sample set sampledMean := 0.0 for _, sample := range centroid.Samples { sampledMean += sample // equivalent to deviations.csv from dunning's tests deviations.Write(stringifySlice( distname, run, thisCDF, centroid.Weight, sample, centroid.Mean, distanceToPrev, distanceToNext, // where is this sample, as a proportion of the range covered by its centroid? (sample-centroid.Mean)/(distanceToNext+distanceToPrev), )) } sampledMean /= float64(len(centroid.Samples)) // and compute the CDF corresopnding to this value sampledCDF := floatCDF(allSamples, sampledMean) // this csv is equivalent to errors.csv from dunning's tests, but // instead of testing a fixed range of quantiles, we test every centroid centroidErrors.Write(stringifySlice( distname, run, centroid.Mean, realMean, // this column is equivalent to the quantile section sampledMean, thisCDF, realCDF, // this column is equivalent to the cdf section sampledCDF, centroid.Weight, distanceToPrev, distanceToNext, )) // this csv is equivalent to sizes.csv from dunning's tests sizes.Write(stringifySlice( distname, run, i, thisCDF, centroid.Weight, )) } // now we compute errors for a fixed set of quantiles, as with errors.csv // in dunning's tests // we cover a wider range of quantiles just for the sake of completeness for i := 0; i <= 1000; i++ { quantile := float64(i) / 1000.0 // find the real sample for the target quantile realQuantile := floatQuantile(allSamples, quantile) // find the estimated location of the target quantile estimatedQuantile := td.Quantile(quantile) // find the estimated cdf of the real sample estimatedCDF := td.CDF(realQuantile) errors.Write(stringifySlice( distname, run, quantile, estimatedCDF, // this column is equivalent to the cdf section realQuantile, estimatedQuantile, // this column is equivalent to the quantile section )) } }
[ "func", "runOnce", "(", "distribution", "func", "(", ")", "float64", ",", "compression", "float64", ",", "samples", "int", ",", "distname", "string", ",", "run", "int", ",", "deviations", ",", "centroidErrors", ",", "errors", ",", "sizes", "*", "csv", ".", "Writer", ")", "{", "td", ":=", "tdigest", ".", "NewMerging", "(", "compression", ",", "true", ")", "\n\n", "allSamples", ":=", "make", "(", "[", "]", "float64", ",", "samples", ")", "\n", "for", "i", ":=", "0", ";", "i", "<", "samples", ";", "i", "++", "{", "sample", ":=", "distribution", "(", ")", "\n", "td", ".", "Add", "(", "sample", ",", "1", ")", "\n", "allSamples", "[", "i", "]", "=", "sample", "\n", "}", "\n", "sort", ".", "Float64s", "(", "allSamples", ")", "\n\n", "centroids", ":=", "td", ".", "Centroids", "(", ")", "\n", "for", "i", ",", "centroid", ":=", "range", "centroids", "{", "// compute the approximate cdf for this centroid's approximate mean", "// this is roughly equivalent to the sum of all previous centroids'", "// weights, plus half this centroid's weight, divided by the total weight", "// https://github.com/tdunning/t-digest/blob/master/src/test/java/com/tdunning/math/stats/TDigestTest.java#L357", "thisCDF", ":=", "td", ".", "CDF", "(", "centroid", ".", "Mean", ")", "\n\n", "// compute the cdf of the centroid's approximate mean, but over the real sample set", "realCDF", ":=", "floatCDF", "(", "allSamples", ",", "centroid", ".", "Mean", ")", "\n\n", "// find the real sample that matches this centroid's approximate cdf", "// this should be close to the centroid's real mean", "realMean", ":=", "floatQuantile", "(", "allSamples", ",", "thisCDF", ")", "\n\n", "// compute distances to previous and next centroids (ie the range", "// that this centroid is expected to cover)", "distanceToPrev", ":=", "centroid", ".", "Mean", "-", "td", ".", "Min", "(", ")", "\n", "if", "i", ">", "0", "{", "distanceToPrev", "=", "centroid", ".", "Mean", "-", "centroids", "[", "i", "-", "1", "]", ".", "Mean", "\n", "}", "\n", "distanceToNext", ":=", "td", ".", "Max", "(", ")", "-", "centroid", ".", "Mean", "\n", "if", "i", "<", "len", "(", "centroids", ")", "-", "1", "{", "distanceToNext", "=", "centroids", "[", "i", "+", "1", "]", ".", "Mean", "-", "centroid", ".", "Mean", "\n", "}", "\n\n", "// compute the centroid's real mean using its sample set", "sampledMean", ":=", "0.0", "\n", "for", "_", ",", "sample", ":=", "range", "centroid", ".", "Samples", "{", "sampledMean", "+=", "sample", "\n", "// equivalent to deviations.csv from dunning's tests", "deviations", ".", "Write", "(", "stringifySlice", "(", "distname", ",", "run", ",", "thisCDF", ",", "centroid", ".", "Weight", ",", "sample", ",", "centroid", ".", "Mean", ",", "distanceToPrev", ",", "distanceToNext", ",", "// where is this sample, as a proportion of the range covered by its centroid?", "(", "sample", "-", "centroid", ".", "Mean", ")", "/", "(", "distanceToNext", "+", "distanceToPrev", ")", ",", ")", ")", "\n", "}", "\n", "sampledMean", "/=", "float64", "(", "len", "(", "centroid", ".", "Samples", ")", ")", "\n", "// and compute the CDF corresopnding to this value", "sampledCDF", ":=", "floatCDF", "(", "allSamples", ",", "sampledMean", ")", "\n\n", "// this csv is equivalent to errors.csv from dunning's tests, but", "// instead of testing a fixed range of quantiles, we test every centroid", "centroidErrors", ".", "Write", "(", "stringifySlice", "(", "distname", ",", "run", ",", "centroid", ".", "Mean", ",", "realMean", ",", "// this column is equivalent to the quantile section", "sampledMean", ",", "thisCDF", ",", "realCDF", ",", "// this column is equivalent to the cdf section", "sampledCDF", ",", "centroid", ".", "Weight", ",", "distanceToPrev", ",", "distanceToNext", ",", ")", ")", "\n\n", "// this csv is equivalent to sizes.csv from dunning's tests", "sizes", ".", "Write", "(", "stringifySlice", "(", "distname", ",", "run", ",", "i", ",", "thisCDF", ",", "centroid", ".", "Weight", ",", ")", ")", "\n", "}", "\n\n", "// now we compute errors for a fixed set of quantiles, as with errors.csv", "// in dunning's tests", "// we cover a wider range of quantiles just for the sake of completeness", "for", "i", ":=", "0", ";", "i", "<=", "1000", ";", "i", "++", "{", "quantile", ":=", "float64", "(", "i", ")", "/", "1000.0", "\n", "// find the real sample for the target quantile", "realQuantile", ":=", "floatQuantile", "(", "allSamples", ",", "quantile", ")", "\n", "// find the estimated location of the target quantile", "estimatedQuantile", ":=", "td", ".", "Quantile", "(", "quantile", ")", "\n", "// find the estimated cdf of the real sample", "estimatedCDF", ":=", "td", ".", "CDF", "(", "realQuantile", ")", "\n", "errors", ".", "Write", "(", "stringifySlice", "(", "distname", ",", "run", ",", "quantile", ",", "estimatedCDF", ",", "// this column is equivalent to the cdf section", "realQuantile", ",", "estimatedQuantile", ",", "// this column is equivalent to the quantile section", ")", ")", "\n", "}", "\n", "}" ]
// populate a single t-digest, of a given compression, with a given number of // samples, drawn from the given distribution function // then writes various statistics to the given CSVs
[ "populate", "a", "single", "t", "-", "digest", "of", "a", "given", "compression", "with", "a", "given", "number", "of", "samples", "drawn", "from", "the", "given", "distribution", "function", "then", "writes", "various", "statistics", "to", "the", "given", "CSVs" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/analysis/main.go#L19-L124
13,436
stripe/veneur
proxysrv/client_conn_map.go
Get
func (m *clientConnMap) Get(dest string) (conn *grpc.ClientConn, ok bool) { m.RLock() conn, ok = m.conns[dest] m.RUnlock() return }
go
func (m *clientConnMap) Get(dest string) (conn *grpc.ClientConn, ok bool) { m.RLock() conn, ok = m.conns[dest] m.RUnlock() return }
[ "func", "(", "m", "*", "clientConnMap", ")", "Get", "(", "dest", "string", ")", "(", "conn", "*", "grpc", ".", "ClientConn", ",", "ok", "bool", ")", "{", "m", ".", "RLock", "(", ")", "\n", "conn", ",", "ok", "=", "m", ".", "conns", "[", "dest", "]", "\n", "m", ".", "RUnlock", "(", ")", "\n\n", "return", "\n", "}" ]
// Return a gRPC connection for the input destination. The ok value indicates // if the key was found in the map.
[ "Return", "a", "gRPC", "connection", "for", "the", "input", "destination", ".", "The", "ok", "value", "indicates", "if", "the", "key", "was", "found", "in", "the", "map", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/proxysrv/client_conn_map.go#L27-L33
13,437
stripe/veneur
proxysrv/client_conn_map.go
Add
func (m *clientConnMap) Add(dest string) error { // If the connection already exists, just exit early if _, ok := m.Get(dest); ok { return nil } conn, err := grpc.Dial(dest, m.options...) m.Lock() _, ok := m.conns[dest] if !ok && err == nil { m.conns[dest] = conn } m.Unlock() if ok && err == nil { _ = conn.Close() } return err }
go
func (m *clientConnMap) Add(dest string) error { // If the connection already exists, just exit early if _, ok := m.Get(dest); ok { return nil } conn, err := grpc.Dial(dest, m.options...) m.Lock() _, ok := m.conns[dest] if !ok && err == nil { m.conns[dest] = conn } m.Unlock() if ok && err == nil { _ = conn.Close() } return err }
[ "func", "(", "m", "*", "clientConnMap", ")", "Add", "(", "dest", "string", ")", "error", "{", "// If the connection already exists, just exit early", "if", "_", ",", "ok", ":=", "m", ".", "Get", "(", "dest", ")", ";", "ok", "{", "return", "nil", "\n", "}", "\n\n", "conn", ",", "err", ":=", "grpc", ".", "Dial", "(", "dest", ",", "m", ".", "options", "...", ")", "\n\n", "m", ".", "Lock", "(", ")", "\n", "_", ",", "ok", ":=", "m", ".", "conns", "[", "dest", "]", "\n", "if", "!", "ok", "&&", "err", "==", "nil", "{", "m", ".", "conns", "[", "dest", "]", "=", "conn", "\n", "}", "\n", "m", ".", "Unlock", "(", ")", "\n\n", "if", "ok", "&&", "err", "==", "nil", "{", "_", "=", "conn", ".", "Close", "(", ")", "\n", "}", "\n\n", "return", "err", "\n", "}" ]
// Add the destination to the map, and open a new connection to it. If the // destination already exists, this is a no-op.
[ "Add", "the", "destination", "to", "the", "map", "and", "open", "a", "new", "connection", "to", "it", ".", "If", "the", "destination", "already", "exists", "this", "is", "a", "no", "-", "op", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/proxysrv/client_conn_map.go#L37-L57
13,438
stripe/veneur
proxysrv/client_conn_map.go
Delete
func (m *clientConnMap) Delete(dest string) { m.Lock() if conn, ok := m.conns[dest]; ok { _ = conn.Close() } delete(m.conns, dest) m.Unlock() }
go
func (m *clientConnMap) Delete(dest string) { m.Lock() if conn, ok := m.conns[dest]; ok { _ = conn.Close() } delete(m.conns, dest) m.Unlock() }
[ "func", "(", "m", "*", "clientConnMap", ")", "Delete", "(", "dest", "string", ")", "{", "m", ".", "Lock", "(", ")", "\n\n", "if", "conn", ",", "ok", ":=", "m", ".", "conns", "[", "dest", "]", ";", "ok", "{", "_", "=", "conn", ".", "Close", "(", ")", "\n", "}", "\n", "delete", "(", "m", ".", "conns", ",", "dest", ")", "\n\n", "m", ".", "Unlock", "(", ")", "\n", "}" ]
// Delete a destination from the map and close the associated connection. This // is a no-op if the destination doesn't exist.
[ "Delete", "a", "destination", "from", "the", "map", "and", "close", "the", "associated", "connection", ".", "This", "is", "a", "no", "-", "op", "if", "the", "destination", "doesn", "t", "exist", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/proxysrv/client_conn_map.go#L61-L70
13,439
stripe/veneur
proxysrv/client_conn_map.go
Keys
func (m *clientConnMap) Keys() []string { m.RLock() res := make([]string, 0, len(m.conns)) for k := range m.conns { res = append(res, k) } m.RUnlock() return res }
go
func (m *clientConnMap) Keys() []string { m.RLock() res := make([]string, 0, len(m.conns)) for k := range m.conns { res = append(res, k) } m.RUnlock() return res }
[ "func", "(", "m", "*", "clientConnMap", ")", "Keys", "(", ")", "[", "]", "string", "{", "m", ".", "RLock", "(", ")", "\n\n", "res", ":=", "make", "(", "[", "]", "string", ",", "0", ",", "len", "(", "m", ".", "conns", ")", ")", "\n", "for", "k", ":=", "range", "m", ".", "conns", "{", "res", "=", "append", "(", "res", ",", "k", ")", "\n", "}", "\n\n", "m", ".", "RUnlock", "(", ")", "\n", "return", "res", "\n", "}" ]
// Keys returns all of the destinations in the map.
[ "Keys", "returns", "all", "of", "the", "destinations", "in", "the", "map", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/proxysrv/client_conn_map.go#L73-L83
13,440
stripe/veneur
proxysrv/client_conn_map.go
Clear
func (m *clientConnMap) Clear() { m.Lock() for _, conn := range m.conns { _ = conn.Close() } m.conns = make(map[string]*grpc.ClientConn) m.Unlock() }
go
func (m *clientConnMap) Clear() { m.Lock() for _, conn := range m.conns { _ = conn.Close() } m.conns = make(map[string]*grpc.ClientConn) m.Unlock() }
[ "func", "(", "m", "*", "clientConnMap", ")", "Clear", "(", ")", "{", "m", ".", "Lock", "(", ")", "\n\n", "for", "_", ",", "conn", ":=", "range", "m", ".", "conns", "{", "_", "=", "conn", ".", "Close", "(", ")", "\n", "}", "\n\n", "m", ".", "conns", "=", "make", "(", "map", "[", "string", "]", "*", "grpc", ".", "ClientConn", ")", "\n", "m", ".", "Unlock", "(", ")", "\n", "}" ]
// Clear removes all keys from the map and closes each associated connection.
[ "Clear", "removes", "all", "keys", "from", "the", "map", "and", "closes", "each", "associated", "connection", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/proxysrv/client_conn_map.go#L86-L95
13,441
stripe/veneur
sinks/signalfx/signalfx.go
NewClient
func NewClient(endpoint, apiKey string, client *http.Client) DPClient { baseURL, err := url.Parse(endpoint) if err != nil { panic(fmt.Sprintf("Could not parse endpoint base URL %q: %v", endpoint, err)) } httpSink := sfxclient.NewHTTPSink() httpSink.AuthToken = apiKey httpSink.DatapointEndpoint = baseURL.ResolveReference(datapointURL).String() httpSink.EventEndpoint = baseURL.ResolveReference(eventURL).String() httpSink.Client = client return httpSink }
go
func NewClient(endpoint, apiKey string, client *http.Client) DPClient { baseURL, err := url.Parse(endpoint) if err != nil { panic(fmt.Sprintf("Could not parse endpoint base URL %q: %v", endpoint, err)) } httpSink := sfxclient.NewHTTPSink() httpSink.AuthToken = apiKey httpSink.DatapointEndpoint = baseURL.ResolveReference(datapointURL).String() httpSink.EventEndpoint = baseURL.ResolveReference(eventURL).String() httpSink.Client = client return httpSink }
[ "func", "NewClient", "(", "endpoint", ",", "apiKey", "string", ",", "client", "*", "http", ".", "Client", ")", "DPClient", "{", "baseURL", ",", "err", ":=", "url", ".", "Parse", "(", "endpoint", ")", "\n", "if", "err", "!=", "nil", "{", "panic", "(", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "endpoint", ",", "err", ")", ")", "\n", "}", "\n", "httpSink", ":=", "sfxclient", ".", "NewHTTPSink", "(", ")", "\n", "httpSink", ".", "AuthToken", "=", "apiKey", "\n", "httpSink", ".", "DatapointEndpoint", "=", "baseURL", ".", "ResolveReference", "(", "datapointURL", ")", ".", "String", "(", ")", "\n", "httpSink", ".", "EventEndpoint", "=", "baseURL", ".", "ResolveReference", "(", "eventURL", ")", ".", "String", "(", ")", "\n", "httpSink", ".", "Client", "=", "client", "\n", "return", "httpSink", "\n", "}" ]
// NewClient constructs a new signalfx HTTP client for the given // endpoint and API token.
[ "NewClient", "constructs", "a", "new", "signalfx", "HTTP", "client", "for", "the", "given", "endpoint", "and", "API", "token", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/signalfx/signalfx.go#L156-L167
13,442
stripe/veneur
sinks/signalfx/signalfx.go
NewSignalFxSink
func NewSignalFxSink(hostnameTag string, hostname string, commonDimensions map[string]string, log *logrus.Logger, client DPClient, varyBy string, perTagClients map[string]DPClient, metricNamePrefixDrops []string, metricTagPrefixDrops []string, derivedMetrics samplers.DerivedMetricsProcessor, maxPointsInBatch int) (*SignalFxSink, error) { return &SignalFxSink{ defaultClient: client, clientsByTagValue: perTagClients, hostnameTag: hostnameTag, hostname: hostname, commonDimensions: commonDimensions, log: log, varyBy: varyBy, metricNamePrefixDrops: metricNamePrefixDrops, metricTagPrefixDrops: metricTagPrefixDrops, derivedMetrics: derivedMetrics, maxPointsInBatch: maxPointsInBatch, }, nil }
go
func NewSignalFxSink(hostnameTag string, hostname string, commonDimensions map[string]string, log *logrus.Logger, client DPClient, varyBy string, perTagClients map[string]DPClient, metricNamePrefixDrops []string, metricTagPrefixDrops []string, derivedMetrics samplers.DerivedMetricsProcessor, maxPointsInBatch int) (*SignalFxSink, error) { return &SignalFxSink{ defaultClient: client, clientsByTagValue: perTagClients, hostnameTag: hostnameTag, hostname: hostname, commonDimensions: commonDimensions, log: log, varyBy: varyBy, metricNamePrefixDrops: metricNamePrefixDrops, metricTagPrefixDrops: metricTagPrefixDrops, derivedMetrics: derivedMetrics, maxPointsInBatch: maxPointsInBatch, }, nil }
[ "func", "NewSignalFxSink", "(", "hostnameTag", "string", ",", "hostname", "string", ",", "commonDimensions", "map", "[", "string", "]", "string", ",", "log", "*", "logrus", ".", "Logger", ",", "client", "DPClient", ",", "varyBy", "string", ",", "perTagClients", "map", "[", "string", "]", "DPClient", ",", "metricNamePrefixDrops", "[", "]", "string", ",", "metricTagPrefixDrops", "[", "]", "string", ",", "derivedMetrics", "samplers", ".", "DerivedMetricsProcessor", ",", "maxPointsInBatch", "int", ")", "(", "*", "SignalFxSink", ",", "error", ")", "{", "return", "&", "SignalFxSink", "{", "defaultClient", ":", "client", ",", "clientsByTagValue", ":", "perTagClients", ",", "hostnameTag", ":", "hostnameTag", ",", "hostname", ":", "hostname", ",", "commonDimensions", ":", "commonDimensions", ",", "log", ":", "log", ",", "varyBy", ":", "varyBy", ",", "metricNamePrefixDrops", ":", "metricNamePrefixDrops", ",", "metricTagPrefixDrops", ":", "metricTagPrefixDrops", ",", "derivedMetrics", ":", "derivedMetrics", ",", "maxPointsInBatch", ":", "maxPointsInBatch", ",", "}", ",", "nil", "\n", "}" ]
// NewSignalFxSink creates a new SignalFx sink for metrics.
[ "NewSignalFxSink", "creates", "a", "new", "SignalFx", "sink", "for", "metrics", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/signalfx/signalfx.go#L170-L184
13,443
stripe/veneur
sinks/signalfx/signalfx.go
Start
func (sfx *SignalFxSink) Start(traceClient *trace.Client) error { sfx.traceClient = traceClient return nil }
go
func (sfx *SignalFxSink) Start(traceClient *trace.Client) error { sfx.traceClient = traceClient return nil }
[ "func", "(", "sfx", "*", "SignalFxSink", ")", "Start", "(", "traceClient", "*", "trace", ".", "Client", ")", "error", "{", "sfx", ".", "traceClient", "=", "traceClient", "\n", "return", "nil", "\n", "}" ]
// Start begins the sink. For SignalFx this is a noop.
[ "Start", "begins", "the", "sink", ".", "For", "SignalFx", "this", "is", "a", "noop", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/signalfx/signalfx.go#L192-L195
13,444
stripe/veneur
sinks/signalfx/signalfx.go
client
func (sfx *SignalFxSink) client(key string) DPClient { if cl, ok := sfx.clientsByTagValue[key]; ok { return cl } return sfx.defaultClient }
go
func (sfx *SignalFxSink) client(key string) DPClient { if cl, ok := sfx.clientsByTagValue[key]; ok { return cl } return sfx.defaultClient }
[ "func", "(", "sfx", "*", "SignalFxSink", ")", "client", "(", "key", "string", ")", "DPClient", "{", "if", "cl", ",", "ok", ":=", "sfx", ".", "clientsByTagValue", "[", "key", "]", ";", "ok", "{", "return", "cl", "\n", "}", "\n", "return", "sfx", ".", "defaultClient", "\n", "}" ]
// client returns a client that can be used to submit to vary-by tag's // value. If no client is specified for that tag value, the default // client is returned.
[ "client", "returns", "a", "client", "that", "can", "be", "used", "to", "submit", "to", "vary", "-", "by", "tag", "s", "value", ".", "If", "no", "client", "is", "specified", "for", "that", "tag", "value", "the", "default", "client", "is", "returned", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/signalfx/signalfx.go#L200-L205
13,445
stripe/veneur
sinks/signalfx/signalfx.go
newPointCollection
func (sfx *SignalFxSink) newPointCollection() *collection { return &collection{ sink: sfx, points: []*datapoint.Datapoint{}, pointsByKey: map[string][]*datapoint.Datapoint{}, } }
go
func (sfx *SignalFxSink) newPointCollection() *collection { return &collection{ sink: sfx, points: []*datapoint.Datapoint{}, pointsByKey: map[string][]*datapoint.Datapoint{}, } }
[ "func", "(", "sfx", "*", "SignalFxSink", ")", "newPointCollection", "(", ")", "*", "collection", "{", "return", "&", "collection", "{", "sink", ":", "sfx", ",", "points", ":", "[", "]", "*", "datapoint", ".", "Datapoint", "{", "}", ",", "pointsByKey", ":", "map", "[", "string", "]", "[", "]", "*", "datapoint", ".", "Datapoint", "{", "}", ",", "}", "\n", "}" ]
// newPointCollection creates an empty collection object and returns it
[ "newPointCollection", "creates", "an", "empty", "collection", "object", "and", "returns", "it" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/signalfx/signalfx.go#L208-L214
13,446
stripe/veneur
sinks/signalfx/signalfx.go
FlushOtherSamples
func (sfx *SignalFxSink) FlushOtherSamples(ctx context.Context, samples []ssf.SSFSample) { span, _ := trace.StartSpanFromContext(ctx, "") defer span.ClientFinish(sfx.traceClient) var countFailed = 0 var countSuccess = 0 for _, sample := range samples { if _, ok := sample.Tags[dogstatsd.EventIdentifierKey]; ok { err := sfx.reportEvent(ctx, &sample) if err != nil { countFailed++ } else { countSuccess++ } } } if countSuccess > 0 { span.Add(ssf.Count(sinks.EventReportedCount, float32(countSuccess), successSpanTags)) } if countFailed > 0 { span.Add(ssf.Count(sinks.EventReportedCount, float32(countFailed), failureSpanTags)) } }
go
func (sfx *SignalFxSink) FlushOtherSamples(ctx context.Context, samples []ssf.SSFSample) { span, _ := trace.StartSpanFromContext(ctx, "") defer span.ClientFinish(sfx.traceClient) var countFailed = 0 var countSuccess = 0 for _, sample := range samples { if _, ok := sample.Tags[dogstatsd.EventIdentifierKey]; ok { err := sfx.reportEvent(ctx, &sample) if err != nil { countFailed++ } else { countSuccess++ } } } if countSuccess > 0 { span.Add(ssf.Count(sinks.EventReportedCount, float32(countSuccess), successSpanTags)) } if countFailed > 0 { span.Add(ssf.Count(sinks.EventReportedCount, float32(countFailed), failureSpanTags)) } }
[ "func", "(", "sfx", "*", "SignalFxSink", ")", "FlushOtherSamples", "(", "ctx", "context", ".", "Context", ",", "samples", "[", "]", "ssf", ".", "SSFSample", ")", "{", "span", ",", "_", ":=", "trace", ".", "StartSpanFromContext", "(", "ctx", ",", "\"", "\"", ")", "\n", "defer", "span", ".", "ClientFinish", "(", "sfx", ".", "traceClient", ")", "\n", "var", "countFailed", "=", "0", "\n", "var", "countSuccess", "=", "0", "\n", "for", "_", ",", "sample", ":=", "range", "samples", "{", "if", "_", ",", "ok", ":=", "sample", ".", "Tags", "[", "dogstatsd", ".", "EventIdentifierKey", "]", ";", "ok", "{", "err", ":=", "sfx", ".", "reportEvent", "(", "ctx", ",", "&", "sample", ")", "\n", "if", "err", "!=", "nil", "{", "countFailed", "++", "\n", "}", "else", "{", "countSuccess", "++", "\n", "}", "\n", "}", "\n", "}", "\n", "if", "countSuccess", ">", "0", "{", "span", ".", "Add", "(", "ssf", ".", "Count", "(", "sinks", ".", "EventReportedCount", ",", "float32", "(", "countSuccess", ")", ",", "successSpanTags", ")", ")", "\n", "}", "\n", "if", "countFailed", ">", "0", "{", "span", ".", "Add", "(", "ssf", ".", "Count", "(", "sinks", ".", "EventReportedCount", ",", "float32", "(", "countFailed", ")", ",", "failureSpanTags", ")", ")", "\n", "}", "\n", "}" ]
// FlushOtherSamples sends events to SignalFx. Event type samples will be serialized as SFX // Events directly. All other metric types are ignored
[ "FlushOtherSamples", "sends", "events", "to", "SignalFx", ".", "Event", "type", "samples", "will", "be", "serialized", "as", "SFX", "Events", "directly", ".", "All", "other", "metric", "types", "are", "ignored" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/signalfx/signalfx.go#L315-L336
13,447
stripe/veneur
server.go
HandleTracePacket
func (s *Server) HandleTracePacket(packet []byte) { samples := &ssf.Samples{} defer metrics.Report(s.TraceClient, samples) // Unlike metrics, protobuf shouldn't have an issue with 0-length packets if len(packet) == 0 { s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:unknown", "reason:zerolength"}, 1.0) log.Warn("received zero-length trace packet") return } s.Statsd.Histogram("ssf.packet_size", float64(len(packet)), nil, .1) span, err := protocol.ParseSSF(packet) if err != nil { reason := "reason:" + err.Error() s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:ssf_metric", reason}, 1.0) log.WithError(err).Warn("ParseSSF") return } // we want to keep track of this, because it's a client problem, but still // handle the span normally if span.Id == 0 { reason := "reason:" + "empty_id" s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:ssf_metric", reason}, 1.0) log.WithError(err).Warn("ParseSSF") } s.handleSSF(span, "packet") }
go
func (s *Server) HandleTracePacket(packet []byte) { samples := &ssf.Samples{} defer metrics.Report(s.TraceClient, samples) // Unlike metrics, protobuf shouldn't have an issue with 0-length packets if len(packet) == 0 { s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:unknown", "reason:zerolength"}, 1.0) log.Warn("received zero-length trace packet") return } s.Statsd.Histogram("ssf.packet_size", float64(len(packet)), nil, .1) span, err := protocol.ParseSSF(packet) if err != nil { reason := "reason:" + err.Error() s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:ssf_metric", reason}, 1.0) log.WithError(err).Warn("ParseSSF") return } // we want to keep track of this, because it's a client problem, but still // handle the span normally if span.Id == 0 { reason := "reason:" + "empty_id" s.Statsd.Count("ssf.error_total", 1, []string{"ssf_format:packet", "packet_type:ssf_metric", reason}, 1.0) log.WithError(err).Warn("ParseSSF") } s.handleSSF(span, "packet") }
[ "func", "(", "s", "*", "Server", ")", "HandleTracePacket", "(", "packet", "[", "]", "byte", ")", "{", "samples", ":=", "&", "ssf", ".", "Samples", "{", "}", "\n", "defer", "metrics", ".", "Report", "(", "s", ".", "TraceClient", ",", "samples", ")", "\n\n", "// Unlike metrics, protobuf shouldn't have an issue with 0-length packets", "if", "len", "(", "packet", ")", "==", "0", "{", "s", ".", "Statsd", ".", "Count", "(", "\"", "\"", ",", "1", ",", "[", "]", "string", "{", "\"", "\"", ",", "\"", "\"", ",", "\"", "\"", "}", ",", "1.0", ")", "\n", "log", ".", "Warn", "(", "\"", "\"", ")", "\n", "return", "\n", "}", "\n\n", "s", ".", "Statsd", ".", "Histogram", "(", "\"", "\"", ",", "float64", "(", "len", "(", "packet", ")", ")", ",", "nil", ",", ".1", ")", "\n\n", "span", ",", "err", ":=", "protocol", ".", "ParseSSF", "(", "packet", ")", "\n", "if", "err", "!=", "nil", "{", "reason", ":=", "\"", "\"", "+", "err", ".", "Error", "(", ")", "\n", "s", ".", "Statsd", ".", "Count", "(", "\"", "\"", ",", "1", ",", "[", "]", "string", "{", "\"", "\"", ",", "\"", "\"", ",", "reason", "}", ",", "1.0", ")", "\n", "log", ".", "WithError", "(", "err", ")", ".", "Warn", "(", "\"", "\"", ")", "\n", "return", "\n", "}", "\n", "// we want to keep track of this, because it's a client problem, but still", "// handle the span normally", "if", "span", ".", "Id", "==", "0", "{", "reason", ":=", "\"", "\"", "+", "\"", "\"", "\n", "s", ".", "Statsd", ".", "Count", "(", "\"", "\"", ",", "1", ",", "[", "]", "string", "{", "\"", "\"", ",", "\"", "\"", ",", "reason", "}", ",", "1.0", ")", "\n", "log", ".", "WithError", "(", "err", ")", ".", "Warn", "(", "\"", "\"", ")", "\n", "}", "\n\n", "s", ".", "handleSSF", "(", "span", ",", "\"", "\"", ")", "\n", "}" ]
// HandleTracePacket accepts an incoming packet as bytes and sends it to the // appropriate worker.
[ "HandleTracePacket", "accepts", "an", "incoming", "packet", "as", "bytes", "and", "sends", "it", "to", "the", "appropriate", "worker", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L854-L883
13,448
stripe/veneur
server.go
ReadMetricSocket
func (s *Server) ReadMetricSocket(serverConn net.PacketConn, packetPool *sync.Pool) { for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFrom(buf) if err != nil { log.WithError(err).Error("Error reading from UDP metrics socket") continue } s.processMetricPacket(n, buf, packetPool) } }
go
func (s *Server) ReadMetricSocket(serverConn net.PacketConn, packetPool *sync.Pool) { for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFrom(buf) if err != nil { log.WithError(err).Error("Error reading from UDP metrics socket") continue } s.processMetricPacket(n, buf, packetPool) } }
[ "func", "(", "s", "*", "Server", ")", "ReadMetricSocket", "(", "serverConn", "net", ".", "PacketConn", ",", "packetPool", "*", "sync", ".", "Pool", ")", "{", "for", "{", "buf", ":=", "packetPool", ".", "Get", "(", ")", ".", "(", "[", "]", "byte", ")", "\n", "n", ",", "_", ",", "err", ":=", "serverConn", ".", "ReadFrom", "(", "buf", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "continue", "\n", "}", "\n", "s", ".", "processMetricPacket", "(", "n", ",", "buf", ",", "packetPool", ")", "\n", "}", "\n", "}" ]
// ReadMetricSocket listens for available packets to handle.
[ "ReadMetricSocket", "listens", "for", "available", "packets", "to", "handle", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L931-L941
13,449
stripe/veneur
server.go
processMetricPacket
func (s *Server) processMetricPacket(numBytes int, buf []byte, packetPool *sync.Pool) { if numBytes > s.metricMaxLength { metrics.ReportOne(s.TraceClient, ssf.Count("packet.error_total", 1, map[string]string{"packet_type": "unknown", "reason": "toolong"})) return } // statsd allows multiple packets to be joined by newlines and sent as // one larger packet // note that spurious newlines are not allowed in this format, it has // to be exactly one newline between each packet, with no leading or // trailing newlines splitPacket := samplers.NewSplitBytes(buf[:numBytes], '\n') for splitPacket.Next() { s.HandleMetricPacket(splitPacket.Chunk()) } // the Metric struct created by HandleMetricPacket has no byte slices in it, // only strings // therefore there are no outstanding references to this byte slice, we // can return it to the pool packetPool.Put(buf) }
go
func (s *Server) processMetricPacket(numBytes int, buf []byte, packetPool *sync.Pool) { if numBytes > s.metricMaxLength { metrics.ReportOne(s.TraceClient, ssf.Count("packet.error_total", 1, map[string]string{"packet_type": "unknown", "reason": "toolong"})) return } // statsd allows multiple packets to be joined by newlines and sent as // one larger packet // note that spurious newlines are not allowed in this format, it has // to be exactly one newline between each packet, with no leading or // trailing newlines splitPacket := samplers.NewSplitBytes(buf[:numBytes], '\n') for splitPacket.Next() { s.HandleMetricPacket(splitPacket.Chunk()) } // the Metric struct created by HandleMetricPacket has no byte slices in it, // only strings // therefore there are no outstanding references to this byte slice, we // can return it to the pool packetPool.Put(buf) }
[ "func", "(", "s", "*", "Server", ")", "processMetricPacket", "(", "numBytes", "int", ",", "buf", "[", "]", "byte", ",", "packetPool", "*", "sync", ".", "Pool", ")", "{", "if", "numBytes", ">", "s", ".", "metricMaxLength", "{", "metrics", ".", "ReportOne", "(", "s", ".", "TraceClient", ",", "ssf", ".", "Count", "(", "\"", "\"", ",", "1", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "\"", "\"", "}", ")", ")", "\n", "return", "\n", "}", "\n\n", "// statsd allows multiple packets to be joined by newlines and sent as", "// one larger packet", "// note that spurious newlines are not allowed in this format, it has", "// to be exactly one newline between each packet, with no leading or", "// trailing newlines", "splitPacket", ":=", "samplers", ".", "NewSplitBytes", "(", "buf", "[", ":", "numBytes", "]", ",", "'\\n'", ")", "\n", "for", "splitPacket", ".", "Next", "(", ")", "{", "s", ".", "HandleMetricPacket", "(", "splitPacket", ".", "Chunk", "(", ")", ")", "\n", "}", "\n\n", "// the Metric struct created by HandleMetricPacket has no byte slices in it,", "// only strings", "// therefore there are no outstanding references to this byte slice, we", "// can return it to the pool", "packetPool", ".", "Put", "(", "buf", ")", "\n", "}" ]
// Splits the read metric packet into multiple metrics and handles them
[ "Splits", "the", "read", "metric", "packet", "into", "multiple", "metrics", "and", "handles", "them" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L944-L965
13,450
stripe/veneur
server.go
ReadStatsdDatagramSocket
func (s *Server) ReadStatsdDatagramSocket(serverConn *net.UnixConn, packetPool *sync.Pool) { for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFromUnix(buf) if err != nil { select { case <-s.shutdown: log.WithError(err).Info("Ignoring ReadFrom error while shutting down") return default: log.WithError(err).Error("Error reading packet from Unix domain socket") continue } } s.processMetricPacket(n, buf, packetPool) } }
go
func (s *Server) ReadStatsdDatagramSocket(serverConn *net.UnixConn, packetPool *sync.Pool) { for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFromUnix(buf) if err != nil { select { case <-s.shutdown: log.WithError(err).Info("Ignoring ReadFrom error while shutting down") return default: log.WithError(err).Error("Error reading packet from Unix domain socket") continue } } s.processMetricPacket(n, buf, packetPool) } }
[ "func", "(", "s", "*", "Server", ")", "ReadStatsdDatagramSocket", "(", "serverConn", "*", "net", ".", "UnixConn", ",", "packetPool", "*", "sync", ".", "Pool", ")", "{", "for", "{", "buf", ":=", "packetPool", ".", "Get", "(", ")", ".", "(", "[", "]", "byte", ")", "\n", "n", ",", "_", ",", "err", ":=", "serverConn", ".", "ReadFromUnix", "(", "buf", ")", "\n", "if", "err", "!=", "nil", "{", "select", "{", "case", "<-", "s", ".", "shutdown", ":", "log", ".", "WithError", "(", "err", ")", ".", "Info", "(", "\"", "\"", ")", "\n", "return", "\n", "default", ":", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "continue", "\n", "}", "\n", "}", "\n\n", "s", ".", "processMetricPacket", "(", "n", ",", "buf", ",", "packetPool", ")", "\n", "}", "\n", "}" ]
// ReadStatsdDatagramSocket reads statsd metrics packets from connection off a unix datagram socket.
[ "ReadStatsdDatagramSocket", "reads", "statsd", "metrics", "packets", "from", "connection", "off", "a", "unix", "datagram", "socket", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L968-L985
13,451
stripe/veneur
server.go
ReadSSFPacketSocket
func (s *Server) ReadSSFPacketSocket(serverConn net.PacketConn, packetPool *sync.Pool) { // TODO This is duplicated from ReadMetricSocket and feels like it could be it's // own function? p := packetPool.Get().([]byte) if len(p) == 0 { log.WithField("len", len(p)).Fatal( "packetPool making empty slices: trace_max_length_bytes must be >= 0") } packetPool.Put(p) for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFrom(buf) if err != nil { // In tests, the probably-best way to // terminate this reader is to issue a shutdown and close the listening // socket, which returns an error, so let's handle it here: select { case <-s.shutdown: log.WithError(err).Info("Ignoring ReadFrom error while shutting down") return default: log.WithError(err).Error("Error reading from UDP trace socket") continue } } s.HandleTracePacket(buf[:n]) packetPool.Put(buf) } }
go
func (s *Server) ReadSSFPacketSocket(serverConn net.PacketConn, packetPool *sync.Pool) { // TODO This is duplicated from ReadMetricSocket and feels like it could be it's // own function? p := packetPool.Get().([]byte) if len(p) == 0 { log.WithField("len", len(p)).Fatal( "packetPool making empty slices: trace_max_length_bytes must be >= 0") } packetPool.Put(p) for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFrom(buf) if err != nil { // In tests, the probably-best way to // terminate this reader is to issue a shutdown and close the listening // socket, which returns an error, so let's handle it here: select { case <-s.shutdown: log.WithError(err).Info("Ignoring ReadFrom error while shutting down") return default: log.WithError(err).Error("Error reading from UDP trace socket") continue } } s.HandleTracePacket(buf[:n]) packetPool.Put(buf) } }
[ "func", "(", "s", "*", "Server", ")", "ReadSSFPacketSocket", "(", "serverConn", "net", ".", "PacketConn", ",", "packetPool", "*", "sync", ".", "Pool", ")", "{", "// TODO This is duplicated from ReadMetricSocket and feels like it could be it's", "// own function?", "p", ":=", "packetPool", ".", "Get", "(", ")", ".", "(", "[", "]", "byte", ")", "\n", "if", "len", "(", "p", ")", "==", "0", "{", "log", ".", "WithField", "(", "\"", "\"", ",", "len", "(", "p", ")", ")", ".", "Fatal", "(", "\"", "\"", ")", "\n", "}", "\n", "packetPool", ".", "Put", "(", "p", ")", "\n\n", "for", "{", "buf", ":=", "packetPool", ".", "Get", "(", ")", ".", "(", "[", "]", "byte", ")", "\n", "n", ",", "_", ",", "err", ":=", "serverConn", ".", "ReadFrom", "(", "buf", ")", "\n", "if", "err", "!=", "nil", "{", "// In tests, the probably-best way to", "// terminate this reader is to issue a shutdown and close the listening", "// socket, which returns an error, so let's handle it here:", "select", "{", "case", "<-", "s", ".", "shutdown", ":", "log", ".", "WithError", "(", "err", ")", ".", "Info", "(", "\"", "\"", ")", "\n", "return", "\n", "default", ":", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "continue", "\n", "}", "\n", "}", "\n\n", "s", ".", "HandleTracePacket", "(", "buf", "[", ":", "n", "]", ")", "\n", "packetPool", ".", "Put", "(", "buf", ")", "\n", "}", "\n", "}" ]
// ReadSSFPacketSocket reads SSF packets off a packet connection.
[ "ReadSSFPacketSocket", "reads", "SSF", "packets", "off", "a", "packet", "connection", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L988-L1018
13,452
stripe/veneur
server.go
ReadTCPSocket
func (s *Server) ReadTCPSocket(listener net.Listener) { for { conn, err := listener.Accept() if err != nil { select { case <-s.shutdown: // occurs when cleanly shutting down the server e.g. in tests; ignore errors log.WithError(err).Info("Ignoring Accept error while shutting down") return default: log.WithError(err).Fatal("TCP accept failed") } } go s.handleTCPGoroutine(conn) } }
go
func (s *Server) ReadTCPSocket(listener net.Listener) { for { conn, err := listener.Accept() if err != nil { select { case <-s.shutdown: // occurs when cleanly shutting down the server e.g. in tests; ignore errors log.WithError(err).Info("Ignoring Accept error while shutting down") return default: log.WithError(err).Fatal("TCP accept failed") } } go s.handleTCPGoroutine(conn) } }
[ "func", "(", "s", "*", "Server", ")", "ReadTCPSocket", "(", "listener", "net", ".", "Listener", ")", "{", "for", "{", "conn", ",", "err", ":=", "listener", ".", "Accept", "(", ")", "\n", "if", "err", "!=", "nil", "{", "select", "{", "case", "<-", "s", ".", "shutdown", ":", "// occurs when cleanly shutting down the server e.g. in tests; ignore errors", "log", ".", "WithError", "(", "err", ")", ".", "Info", "(", "\"", "\"", ")", "\n", "return", "\n", "default", ":", "log", ".", "WithError", "(", "err", ")", ".", "Fatal", "(", "\"", "\"", ")", "\n", "}", "\n", "}", "\n\n", "go", "s", ".", "handleTCPGoroutine", "(", "conn", ")", "\n", "}", "\n", "}" ]
// ReadTCPSocket listens on Server.TCPAddr for new connections, starting a goroutine for each.
[ "ReadTCPSocket", "listens", "on", "Server", ".", "TCPAddr", "for", "new", "connections", "starting", "a", "goroutine", "for", "each", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L1146-L1162
13,453
stripe/veneur
server.go
HTTPServe
func (s *Server) HTTPServe() { var prf interface { Stop() } // We want to make sure the profile is stopped // exactly once (and only once), even if the // shutdown pre-hook does not run (which it may not) profileStopOnce := sync.Once{} if s.enableProfiling { profileStartOnce.Do(func() { prf = profile.Start() }) defer func() { profileStopOnce.Do(prf.Stop) }() } httpSocket := bind.Socket(s.HTTPAddr) graceful.Timeout(10 * time.Second) graceful.PreHook(func() { if prf != nil { profileStopOnce.Do(prf.Stop) } log.Info("Terminating HTTP listener") }) // Ensure that the server responds to SIGUSR2 even // when *not* running under einhorn. graceful.AddSignal(syscall.SIGUSR2, syscall.SIGHUP) graceful.HandleSignals() gracefulSocket := graceful.WrapListener(httpSocket) log.WithField("address", s.HTTPAddr).Info("HTTP server listening") // Signal that the HTTP server is starting atomic.AddInt32(s.numListeningHTTP, 1) defer atomic.AddInt32(s.numListeningHTTP, -1) bind.Ready() if err := http.Serve(gracefulSocket, s.Handler()); err != nil { log.WithError(err).Error("HTTP server shut down due to error") } log.Info("Stopped HTTP server") graceful.Shutdown() }
go
func (s *Server) HTTPServe() { var prf interface { Stop() } // We want to make sure the profile is stopped // exactly once (and only once), even if the // shutdown pre-hook does not run (which it may not) profileStopOnce := sync.Once{} if s.enableProfiling { profileStartOnce.Do(func() { prf = profile.Start() }) defer func() { profileStopOnce.Do(prf.Stop) }() } httpSocket := bind.Socket(s.HTTPAddr) graceful.Timeout(10 * time.Second) graceful.PreHook(func() { if prf != nil { profileStopOnce.Do(prf.Stop) } log.Info("Terminating HTTP listener") }) // Ensure that the server responds to SIGUSR2 even // when *not* running under einhorn. graceful.AddSignal(syscall.SIGUSR2, syscall.SIGHUP) graceful.HandleSignals() gracefulSocket := graceful.WrapListener(httpSocket) log.WithField("address", s.HTTPAddr).Info("HTTP server listening") // Signal that the HTTP server is starting atomic.AddInt32(s.numListeningHTTP, 1) defer atomic.AddInt32(s.numListeningHTTP, -1) bind.Ready() if err := http.Serve(gracefulSocket, s.Handler()); err != nil { log.WithError(err).Error("HTTP server shut down due to error") } log.Info("Stopped HTTP server") graceful.Shutdown() }
[ "func", "(", "s", "*", "Server", ")", "HTTPServe", "(", ")", "{", "var", "prf", "interface", "{", "Stop", "(", ")", "\n", "}", "\n\n", "// We want to make sure the profile is stopped", "// exactly once (and only once), even if the", "// shutdown pre-hook does not run (which it may not)", "profileStopOnce", ":=", "sync", ".", "Once", "{", "}", "\n\n", "if", "s", ".", "enableProfiling", "{", "profileStartOnce", ".", "Do", "(", "func", "(", ")", "{", "prf", "=", "profile", ".", "Start", "(", ")", "\n", "}", ")", "\n\n", "defer", "func", "(", ")", "{", "profileStopOnce", ".", "Do", "(", "prf", ".", "Stop", ")", "\n", "}", "(", ")", "\n", "}", "\n", "httpSocket", ":=", "bind", ".", "Socket", "(", "s", ".", "HTTPAddr", ")", "\n", "graceful", ".", "Timeout", "(", "10", "*", "time", ".", "Second", ")", "\n", "graceful", ".", "PreHook", "(", "func", "(", ")", "{", "if", "prf", "!=", "nil", "{", "profileStopOnce", ".", "Do", "(", "prf", ".", "Stop", ")", "\n", "}", "\n\n", "log", ".", "Info", "(", "\"", "\"", ")", "\n", "}", ")", "\n\n", "// Ensure that the server responds to SIGUSR2 even", "// when *not* running under einhorn.", "graceful", ".", "AddSignal", "(", "syscall", ".", "SIGUSR2", ",", "syscall", ".", "SIGHUP", ")", "\n", "graceful", ".", "HandleSignals", "(", ")", "\n", "gracefulSocket", ":=", "graceful", ".", "WrapListener", "(", "httpSocket", ")", "\n", "log", ".", "WithField", "(", "\"", "\"", ",", "s", ".", "HTTPAddr", ")", ".", "Info", "(", "\"", "\"", ")", "\n\n", "// Signal that the HTTP server is starting", "atomic", ".", "AddInt32", "(", "s", ".", "numListeningHTTP", ",", "1", ")", "\n", "defer", "atomic", ".", "AddInt32", "(", "s", ".", "numListeningHTTP", ",", "-", "1", ")", "\n", "bind", ".", "Ready", "(", ")", "\n\n", "if", "err", ":=", "http", ".", "Serve", "(", "gracefulSocket", ",", "s", ".", "Handler", "(", ")", ")", ";", "err", "!=", "nil", "{", "log", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n", "log", ".", "Info", "(", "\"", "\"", ")", "\n\n", "graceful", ".", "Shutdown", "(", ")", "\n", "}" ]
// HTTPServe starts the HTTP server and listens perpetually until it encounters an unrecoverable error.
[ "HTTPServe", "starts", "the", "HTTP", "server", "and", "listens", "perpetually", "until", "it", "encounters", "an", "unrecoverable", "error", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L1190-L1238
13,454
stripe/veneur
server.go
registerPlugin
func (s *Server) registerPlugin(p plugins.Plugin) { s.pluginMtx.Lock() defer s.pluginMtx.Unlock() s.plugins = append(s.plugins, p) }
go
func (s *Server) registerPlugin(p plugins.Plugin) { s.pluginMtx.Lock() defer s.pluginMtx.Unlock() s.plugins = append(s.plugins, p) }
[ "func", "(", "s", "*", "Server", ")", "registerPlugin", "(", "p", "plugins", ".", "Plugin", ")", "{", "s", ".", "pluginMtx", ".", "Lock", "(", ")", "\n", "defer", "s", ".", "pluginMtx", ".", "Unlock", "(", ")", "\n", "s", ".", "plugins", "=", "append", "(", "s", ".", "plugins", ",", "p", ")", "\n", "}" ]
// registerPlugin registers a plugin for use // on the veneur server. It is blocking // and not threadsafe.
[ "registerPlugin", "registers", "a", "plugin", "for", "use", "on", "the", "veneur", "server", ".", "It", "is", "blocking", "and", "not", "threadsafe", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L1309-L1313
13,455
stripe/veneur
server.go
CalculateTickDelay
func CalculateTickDelay(interval time.Duration, t time.Time) time.Duration { return t.Truncate(interval).Add(interval).Sub(t) }
go
func CalculateTickDelay(interval time.Duration, t time.Time) time.Duration { return t.Truncate(interval).Add(interval).Sub(t) }
[ "func", "CalculateTickDelay", "(", "interval", "time", ".", "Duration", ",", "t", "time", ".", "Time", ")", "time", ".", "Duration", "{", "return", "t", ".", "Truncate", "(", "interval", ")", ".", "Add", "(", "interval", ")", ".", "Sub", "(", "t", ")", "\n", "}" ]
// CalculateTickDelay takes the provided time, `Truncate`s it a rounded-down // multiple of `interval`, then adds `interval` back to find the "next" tick.
[ "CalculateTickDelay", "takes", "the", "provided", "time", "Truncate", "s", "it", "a", "rounded", "-", "down", "multiple", "of", "interval", "then", "adds", "interval", "back", "to", "find", "the", "next", "tick", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L1325-L1327
13,456
stripe/veneur
server.go
setSinkExcludedTags
func setSinkExcludedTags(excludeRules []string, metricSinks []sinks.MetricSink) { type excludableSink interface { SetExcludedTags([]string) } for _, sink := range metricSinks { if s, ok := sink.(excludableSink); ok { excludedTags := generateExcludedTags(excludeRules, sink.Name()) log.WithFields(logrus.Fields{ "sink": sink.Name(), "excludedTags": excludedTags, }).Debug("Setting excluded tags on sink") s.SetExcludedTags(excludedTags) } } }
go
func setSinkExcludedTags(excludeRules []string, metricSinks []sinks.MetricSink) { type excludableSink interface { SetExcludedTags([]string) } for _, sink := range metricSinks { if s, ok := sink.(excludableSink); ok { excludedTags := generateExcludedTags(excludeRules, sink.Name()) log.WithFields(logrus.Fields{ "sink": sink.Name(), "excludedTags": excludedTags, }).Debug("Setting excluded tags on sink") s.SetExcludedTags(excludedTags) } } }
[ "func", "setSinkExcludedTags", "(", "excludeRules", "[", "]", "string", ",", "metricSinks", "[", "]", "sinks", ".", "MetricSink", ")", "{", "type", "excludableSink", "interface", "{", "SetExcludedTags", "(", "[", "]", "string", ")", "\n", "}", "\n\n", "for", "_", ",", "sink", ":=", "range", "metricSinks", "{", "if", "s", ",", "ok", ":=", "sink", ".", "(", "excludableSink", ")", ";", "ok", "{", "excludedTags", ":=", "generateExcludedTags", "(", "excludeRules", ",", "sink", ".", "Name", "(", ")", ")", "\n", "log", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "sink", ".", "Name", "(", ")", ",", "\"", "\"", ":", "excludedTags", ",", "}", ")", ".", "Debug", "(", "\"", "\"", ")", "\n", "s", ".", "SetExcludedTags", "(", "excludedTags", ")", "\n", "}", "\n", "}", "\n", "}" ]
// Set the list of tags to exclude on each sink
[ "Set", "the", "list", "of", "tags", "to", "exclude", "on", "each", "sink" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/server.go#L1330-L1345
13,457
stripe/veneur
protocol/wire.go
ValidTrace
func ValidTrace(span *ssf.SSFSpan) bool { return span.Id != 0 && span.TraceId != 0 && span.StartTimestamp != 0 && span.EndTimestamp != 0 && span.Name != "" }
go
func ValidTrace(span *ssf.SSFSpan) bool { return span.Id != 0 && span.TraceId != 0 && span.StartTimestamp != 0 && span.EndTimestamp != 0 && span.Name != "" }
[ "func", "ValidTrace", "(", "span", "*", "ssf", ".", "SSFSpan", ")", "bool", "{", "return", "span", ".", "Id", "!=", "0", "&&", "span", ".", "TraceId", "!=", "0", "&&", "span", ".", "StartTimestamp", "!=", "0", "&&", "span", ".", "EndTimestamp", "!=", "0", "&&", "span", ".", "Name", "!=", "\"", "\"", "\n", "}" ]
// ValidTrace returns true if an SSFSpan contains all data necessary // to synthesize a span that can be used as part of a trace.
[ "ValidTrace", "returns", "true", "if", "an", "SSFSpan", "contains", "all", "data", "necessary", "to", "synthesize", "a", "span", "that", "can", "be", "used", "as", "part", "of", "a", "trace", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/protocol/wire.go#L82-L88
13,458
stripe/veneur
protocol/wire.go
ValidateTrace
func ValidateTrace(span *ssf.SSFSpan) error { if !ValidTrace(span) { return &InvalidTrace{span} } return nil }
go
func ValidateTrace(span *ssf.SSFSpan) error { if !ValidTrace(span) { return &InvalidTrace{span} } return nil }
[ "func", "ValidateTrace", "(", "span", "*", "ssf", ".", "SSFSpan", ")", "error", "{", "if", "!", "ValidTrace", "(", "span", ")", "{", "return", "&", "InvalidTrace", "{", "span", "}", "\n", "}", "\n", "return", "nil", "\n", "}" ]
// ValidateTrace is identical to ValidTrace, except instead of returning // a boolean, it returns a non-nil error if the SSFSpan cannot be interpreted // as a span, and nil otherwise.
[ "ValidateTrace", "is", "identical", "to", "ValidTrace", "except", "instead", "of", "returning", "a", "boolean", "it", "returns", "a", "non", "-", "nil", "error", "if", "the", "SSFSpan", "cannot", "be", "interpreted", "as", "a", "span", "and", "nil", "otherwise", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/protocol/wire.go#L93-L98
13,459
stripe/veneur
protocol/wire.go
WriteSSF
func WriteSSF(out io.Writer, ssf *ssf.SSFSpan) (int, error) { pbuf := pbufPool.Get().(*proto.Buffer) err := pbuf.Marshal(ssf) if err != nil { // This is not a framing error, as we haven't written // anything to the stream yet. return 0, err } defer func() { // Make sure we reset the scratch protobuffer (by default, it // would retain its contents) and put it back into the pool: pbuf.Reset() pbufPool.Put(pbuf) }() if err = binary.Write(out, binary.BigEndian, version0); err != nil { return 0, &errFramingIO{err} } if err = binary.Write(out, binary.BigEndian, uint32(len(pbuf.Bytes()))); err != nil { return 0, &errFramingIO{err} } n, err := out.Write(pbuf.Bytes()) if err != nil { return n, &errFramingIO{err} } return n, nil }
go
func WriteSSF(out io.Writer, ssf *ssf.SSFSpan) (int, error) { pbuf := pbufPool.Get().(*proto.Buffer) err := pbuf.Marshal(ssf) if err != nil { // This is not a framing error, as we haven't written // anything to the stream yet. return 0, err } defer func() { // Make sure we reset the scratch protobuffer (by default, it // would retain its contents) and put it back into the pool: pbuf.Reset() pbufPool.Put(pbuf) }() if err = binary.Write(out, binary.BigEndian, version0); err != nil { return 0, &errFramingIO{err} } if err = binary.Write(out, binary.BigEndian, uint32(len(pbuf.Bytes()))); err != nil { return 0, &errFramingIO{err} } n, err := out.Write(pbuf.Bytes()) if err != nil { return n, &errFramingIO{err} } return n, nil }
[ "func", "WriteSSF", "(", "out", "io", ".", "Writer", ",", "ssf", "*", "ssf", ".", "SSFSpan", ")", "(", "int", ",", "error", ")", "{", "pbuf", ":=", "pbufPool", ".", "Get", "(", ")", ".", "(", "*", "proto", ".", "Buffer", ")", "\n", "err", ":=", "pbuf", ".", "Marshal", "(", "ssf", ")", "\n", "if", "err", "!=", "nil", "{", "// This is not a framing error, as we haven't written", "// anything to the stream yet.", "return", "0", ",", "err", "\n", "}", "\n", "defer", "func", "(", ")", "{", "// Make sure we reset the scratch protobuffer (by default, it", "// would retain its contents) and put it back into the pool:", "pbuf", ".", "Reset", "(", ")", "\n", "pbufPool", ".", "Put", "(", "pbuf", ")", "\n", "}", "(", ")", "\n\n", "if", "err", "=", "binary", ".", "Write", "(", "out", ",", "binary", ".", "BigEndian", ",", "version0", ")", ";", "err", "!=", "nil", "{", "return", "0", ",", "&", "errFramingIO", "{", "err", "}", "\n", "}", "\n", "if", "err", "=", "binary", ".", "Write", "(", "out", ",", "binary", ".", "BigEndian", ",", "uint32", "(", "len", "(", "pbuf", ".", "Bytes", "(", ")", ")", ")", ")", ";", "err", "!=", "nil", "{", "return", "0", ",", "&", "errFramingIO", "{", "err", "}", "\n", "}", "\n", "n", ",", "err", ":=", "out", ".", "Write", "(", "pbuf", ".", "Bytes", "(", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "n", ",", "&", "errFramingIO", "{", "err", "}", "\n", "}", "\n", "return", "n", ",", "nil", "\n", "}" ]
// WriteSSF writes an SSF span with a preceding v0 frame onto a stream // and returns the number of bytes written, as well as an error. // // If the error matches IsFramingError, the stream must be considered // poisoned and should not be re-used.
[ "WriteSSF", "writes", "an", "SSF", "span", "with", "a", "preceding", "v0", "frame", "onto", "a", "stream", "and", "returns", "the", "number", "of", "bytes", "written", "as", "well", "as", "an", "error", ".", "If", "the", "error", "matches", "IsFramingError", "the", "stream", "must", "be", "considered", "poisoned", "and", "should", "not", "be", "re", "-", "used", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/protocol/wire.go#L186-L212
13,460
stripe/veneur
sinks/datadog/datadog.go
NewDatadogMetricSink
func NewDatadogMetricSink(interval float64, flushMaxPerBody int, hostname string, tags []string, ddHostname string, apiKey string, httpClient *http.Client, log *logrus.Logger) (*DatadogMetricSink, error) { return &DatadogMetricSink{ HTTPClient: httpClient, APIKey: apiKey, DDHostname: ddHostname, interval: interval, flushMaxPerBody: flushMaxPerBody, hostname: hostname, tags: tags, log: log, }, nil }
go
func NewDatadogMetricSink(interval float64, flushMaxPerBody int, hostname string, tags []string, ddHostname string, apiKey string, httpClient *http.Client, log *logrus.Logger) (*DatadogMetricSink, error) { return &DatadogMetricSink{ HTTPClient: httpClient, APIKey: apiKey, DDHostname: ddHostname, interval: interval, flushMaxPerBody: flushMaxPerBody, hostname: hostname, tags: tags, log: log, }, nil }
[ "func", "NewDatadogMetricSink", "(", "interval", "float64", ",", "flushMaxPerBody", "int", ",", "hostname", "string", ",", "tags", "[", "]", "string", ",", "ddHostname", "string", ",", "apiKey", "string", ",", "httpClient", "*", "http", ".", "Client", ",", "log", "*", "logrus", ".", "Logger", ")", "(", "*", "DatadogMetricSink", ",", "error", ")", "{", "return", "&", "DatadogMetricSink", "{", "HTTPClient", ":", "httpClient", ",", "APIKey", ":", "apiKey", ",", "DDHostname", ":", "ddHostname", ",", "interval", ":", "interval", ",", "flushMaxPerBody", ":", "flushMaxPerBody", ",", "hostname", ":", "hostname", ",", "tags", ":", "tags", ",", "log", ":", "log", ",", "}", ",", "nil", "\n", "}" ]
// NewDatadogMetricSink creates a new Datadog sink for trace spans.
[ "NewDatadogMetricSink", "creates", "a", "new", "Datadog", "sink", "for", "trace", "spans", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/datadog/datadog.go#L83-L94
13,461
stripe/veneur
sinks/datadog/datadog.go
Flush
func (dd *DatadogMetricSink) Flush(ctx context.Context, interMetrics []samplers.InterMetric) error { span, _ := trace.StartSpanFromContext(ctx, "") defer span.ClientFinish(dd.traceClient) ddmetrics, checks := dd.finalizeMetrics(interMetrics) if len(checks) != 0 { // this endpoint is not documented to take an array... but it does // another curious constraint of this endpoint is that it does not // support "Content-Encoding: deflate" err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/api/v1/check_run?api_key=%s", dd.DDHostname, dd.APIKey), checks, "flush_checks", false, map[string]string{"sink": "datadog"}, dd.log) if err == nil { dd.log.WithField("checks", len(checks)).Info("Completed flushing service checks to Datadog") } else { dd.log.WithFields(logrus.Fields{ "checks": len(checks), logrus.ErrorKey: err}).Warn("Error flushing checks to Datadog") } } // break the metrics into chunks of approximately equal size, such that // each chunk is less than the limit // we compute the chunks using rounding-up integer division workers := ((len(ddmetrics) - 1) / dd.flushMaxPerBody) + 1 chunkSize := ((len(ddmetrics) - 1) / workers) + 1 dd.log.WithField("workers", workers).Debug("Worker count chosen") dd.log.WithField("chunkSize", chunkSize).Debug("Chunk size chosen") var wg sync.WaitGroup flushStart := time.Now() for i := 0; i < workers; i++ { chunk := ddmetrics[i*chunkSize:] if i < workers-1 { // trim to chunk size unless this is the last one chunk = chunk[:chunkSize] } wg.Add(1) go dd.flushPart(span.Attach(ctx), chunk, &wg) } wg.Wait() tags := map[string]string{"sink": dd.Name()} span.Add( ssf.Timing(sinks.MetricKeyMetricFlushDuration, time.Since(flushStart), time.Nanosecond, tags), ssf.Count(sinks.MetricKeyTotalMetricsFlushed, float32(len(ddmetrics)), tags), ) dd.log.WithField("metrics", len(ddmetrics)).Info("Completed flush to Datadog") return nil }
go
func (dd *DatadogMetricSink) Flush(ctx context.Context, interMetrics []samplers.InterMetric) error { span, _ := trace.StartSpanFromContext(ctx, "") defer span.ClientFinish(dd.traceClient) ddmetrics, checks := dd.finalizeMetrics(interMetrics) if len(checks) != 0 { // this endpoint is not documented to take an array... but it does // another curious constraint of this endpoint is that it does not // support "Content-Encoding: deflate" err := vhttp.PostHelper(context.TODO(), dd.HTTPClient, dd.traceClient, http.MethodPost, fmt.Sprintf("%s/api/v1/check_run?api_key=%s", dd.DDHostname, dd.APIKey), checks, "flush_checks", false, map[string]string{"sink": "datadog"}, dd.log) if err == nil { dd.log.WithField("checks", len(checks)).Info("Completed flushing service checks to Datadog") } else { dd.log.WithFields(logrus.Fields{ "checks": len(checks), logrus.ErrorKey: err}).Warn("Error flushing checks to Datadog") } } // break the metrics into chunks of approximately equal size, such that // each chunk is less than the limit // we compute the chunks using rounding-up integer division workers := ((len(ddmetrics) - 1) / dd.flushMaxPerBody) + 1 chunkSize := ((len(ddmetrics) - 1) / workers) + 1 dd.log.WithField("workers", workers).Debug("Worker count chosen") dd.log.WithField("chunkSize", chunkSize).Debug("Chunk size chosen") var wg sync.WaitGroup flushStart := time.Now() for i := 0; i < workers; i++ { chunk := ddmetrics[i*chunkSize:] if i < workers-1 { // trim to chunk size unless this is the last one chunk = chunk[:chunkSize] } wg.Add(1) go dd.flushPart(span.Attach(ctx), chunk, &wg) } wg.Wait() tags := map[string]string{"sink": dd.Name()} span.Add( ssf.Timing(sinks.MetricKeyMetricFlushDuration, time.Since(flushStart), time.Nanosecond, tags), ssf.Count(sinks.MetricKeyTotalMetricsFlushed, float32(len(ddmetrics)), tags), ) dd.log.WithField("metrics", len(ddmetrics)).Info("Completed flush to Datadog") return nil }
[ "func", "(", "dd", "*", "DatadogMetricSink", ")", "Flush", "(", "ctx", "context", ".", "Context", ",", "interMetrics", "[", "]", "samplers", ".", "InterMetric", ")", "error", "{", "span", ",", "_", ":=", "trace", ".", "StartSpanFromContext", "(", "ctx", ",", "\"", "\"", ")", "\n", "defer", "span", ".", "ClientFinish", "(", "dd", ".", "traceClient", ")", "\n\n", "ddmetrics", ",", "checks", ":=", "dd", ".", "finalizeMetrics", "(", "interMetrics", ")", "\n\n", "if", "len", "(", "checks", ")", "!=", "0", "{", "// this endpoint is not documented to take an array... but it does", "// another curious constraint of this endpoint is that it does not", "// support \"Content-Encoding: deflate\"", "err", ":=", "vhttp", ".", "PostHelper", "(", "context", ".", "TODO", "(", ")", ",", "dd", ".", "HTTPClient", ",", "dd", ".", "traceClient", ",", "http", ".", "MethodPost", ",", "fmt", ".", "Sprintf", "(", "\"", "\"", ",", "dd", ".", "DDHostname", ",", "dd", ".", "APIKey", ")", ",", "checks", ",", "\"", "\"", ",", "false", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", "}", ",", "dd", ".", "log", ")", "\n", "if", "err", "==", "nil", "{", "dd", ".", "log", ".", "WithField", "(", "\"", "\"", ",", "len", "(", "checks", ")", ")", ".", "Info", "(", "\"", "\"", ")", "\n", "}", "else", "{", "dd", ".", "log", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "len", "(", "checks", ")", ",", "logrus", ".", "ErrorKey", ":", "err", "}", ")", ".", "Warn", "(", "\"", "\"", ")", "\n", "}", "\n", "}", "\n\n", "// break the metrics into chunks of approximately equal size, such that", "// each chunk is less than the limit", "// we compute the chunks using rounding-up integer division", "workers", ":=", "(", "(", "len", "(", "ddmetrics", ")", "-", "1", ")", "/", "dd", ".", "flushMaxPerBody", ")", "+", "1", "\n", "chunkSize", ":=", "(", "(", "len", "(", "ddmetrics", ")", "-", "1", ")", "/", "workers", ")", "+", "1", "\n", "dd", ".", "log", ".", "WithField", "(", "\"", "\"", ",", "workers", ")", ".", "Debug", "(", "\"", "\"", ")", "\n", "dd", ".", "log", ".", "WithField", "(", "\"", "\"", ",", "chunkSize", ")", ".", "Debug", "(", "\"", "\"", ")", "\n", "var", "wg", "sync", ".", "WaitGroup", "\n", "flushStart", ":=", "time", ".", "Now", "(", ")", "\n", "for", "i", ":=", "0", ";", "i", "<", "workers", ";", "i", "++", "{", "chunk", ":=", "ddmetrics", "[", "i", "*", "chunkSize", ":", "]", "\n", "if", "i", "<", "workers", "-", "1", "{", "// trim to chunk size unless this is the last one", "chunk", "=", "chunk", "[", ":", "chunkSize", "]", "\n", "}", "\n", "wg", ".", "Add", "(", "1", ")", "\n", "go", "dd", ".", "flushPart", "(", "span", ".", "Attach", "(", "ctx", ")", ",", "chunk", ",", "&", "wg", ")", "\n", "}", "\n", "wg", ".", "Wait", "(", ")", "\n", "tags", ":=", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "dd", ".", "Name", "(", ")", "}", "\n", "span", ".", "Add", "(", "ssf", ".", "Timing", "(", "sinks", ".", "MetricKeyMetricFlushDuration", ",", "time", ".", "Since", "(", "flushStart", ")", ",", "time", ".", "Nanosecond", ",", "tags", ")", ",", "ssf", ".", "Count", "(", "sinks", ".", "MetricKeyTotalMetricsFlushed", ",", "float32", "(", "len", "(", "ddmetrics", ")", ")", ",", "tags", ")", ",", ")", "\n", "dd", ".", "log", ".", "WithField", "(", "\"", "\"", ",", "len", "(", "ddmetrics", ")", ")", ".", "Info", "(", "\"", "\"", ")", "\n", "return", "nil", "\n", "}" ]
// Flush sends metrics to Datadog
[ "Flush", "sends", "metrics", "to", "Datadog" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/datadog/datadog.go#L108-L154
13,462
stripe/veneur
sinks/datadog/datadog.go
NewDatadogSpanSink
func NewDatadogSpanSink(address string, bufferSize int, httpClient *http.Client, log *logrus.Logger) (*DatadogSpanSink, error) { if bufferSize == 0 { bufferSize = datadogSpanBufferSize } return &DatadogSpanSink{ HTTPClient: httpClient, bufferSize: bufferSize, buffer: ring.New(bufferSize), mutex: &sync.Mutex{}, traceAddress: address, log: log, }, nil }
go
func NewDatadogSpanSink(address string, bufferSize int, httpClient *http.Client, log *logrus.Logger) (*DatadogSpanSink, error) { if bufferSize == 0 { bufferSize = datadogSpanBufferSize } return &DatadogSpanSink{ HTTPClient: httpClient, bufferSize: bufferSize, buffer: ring.New(bufferSize), mutex: &sync.Mutex{}, traceAddress: address, log: log, }, nil }
[ "func", "NewDatadogSpanSink", "(", "address", "string", ",", "bufferSize", "int", ",", "httpClient", "*", "http", ".", "Client", ",", "log", "*", "logrus", ".", "Logger", ")", "(", "*", "DatadogSpanSink", ",", "error", ")", "{", "if", "bufferSize", "==", "0", "{", "bufferSize", "=", "datadogSpanBufferSize", "\n", "}", "\n\n", "return", "&", "DatadogSpanSink", "{", "HTTPClient", ":", "httpClient", ",", "bufferSize", ":", "bufferSize", ",", "buffer", ":", "ring", ".", "New", "(", "bufferSize", ")", ",", "mutex", ":", "&", "sync", ".", "Mutex", "{", "}", ",", "traceAddress", ":", "address", ",", "log", ":", "log", ",", "}", ",", "nil", "\n", "}" ]
// NewDatadogSpanSink creates a new Datadog sink for trace spans.
[ "NewDatadogSpanSink", "creates", "a", "new", "Datadog", "sink", "for", "trace", "spans", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/datadog/datadog.go#L390-L403
13,463
stripe/veneur
sinks/datadog/datadog.go
Ingest
func (dd *DatadogSpanSink) Ingest(span *ssf.SSFSpan) error { if err := protocol.ValidateTrace(span); err != nil { return err } dd.mutex.Lock() defer dd.mutex.Unlock() dd.buffer.Value = span dd.buffer = dd.buffer.Next() return nil }
go
func (dd *DatadogSpanSink) Ingest(span *ssf.SSFSpan) error { if err := protocol.ValidateTrace(span); err != nil { return err } dd.mutex.Lock() defer dd.mutex.Unlock() dd.buffer.Value = span dd.buffer = dd.buffer.Next() return nil }
[ "func", "(", "dd", "*", "DatadogSpanSink", ")", "Ingest", "(", "span", "*", "ssf", ".", "SSFSpan", ")", "error", "{", "if", "err", ":=", "protocol", ".", "ValidateTrace", "(", "span", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "dd", ".", "mutex", ".", "Lock", "(", ")", "\n", "defer", "dd", ".", "mutex", ".", "Unlock", "(", ")", "\n\n", "dd", ".", "buffer", ".", "Value", "=", "span", "\n", "dd", ".", "buffer", "=", "dd", ".", "buffer", ".", "Next", "(", ")", "\n", "return", "nil", "\n", "}" ]
// Ingest takes the span and adds it to the ringbuffer.
[ "Ingest", "takes", "the", "span", "and", "adds", "it", "to", "the", "ringbuffer", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/sinks/datadog/datadog.go#L417-L427
13,464
stripe/veneur
trace/client.go
Capacity
func Capacity(n uint) ClientParam { return func(cl *Client) error { cl.cap = n return nil } }
go
func Capacity(n uint) ClientParam { return func(cl *Client) error { cl.cap = n return nil } }
[ "func", "Capacity", "(", "n", "uint", ")", "ClientParam", "{", "return", "func", "(", "cl", "*", "Client", ")", "error", "{", "cl", ".", "cap", "=", "n", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// Capacity indicates how many spans a client's channel should // accommodate. This parameter can be used on both generic and // networked backends.
[ "Capacity", "indicates", "how", "many", "spans", "a", "client", "s", "channel", "should", "accommodate", ".", "This", "parameter", "can", "be", "used", "on", "both", "generic", "and", "networked", "backends", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L137-L142
13,465
stripe/veneur
trace/client.go
BufferedSize
func BufferedSize(size uint) ClientParam { return func(cl *Client) error { if cl.backendParams != nil { cl.backendParams.bufferSize = size return nil } return ErrClientNotNetworked } }
go
func BufferedSize(size uint) ClientParam { return func(cl *Client) error { if cl.backendParams != nil { cl.backendParams.bufferSize = size return nil } return ErrClientNotNetworked } }
[ "func", "BufferedSize", "(", "size", "uint", ")", "ClientParam", "{", "return", "func", "(", "cl", "*", "Client", ")", "error", "{", "if", "cl", ".", "backendParams", "!=", "nil", "{", "cl", ".", "backendParams", ".", "bufferSize", "=", "size", "\n", "return", "nil", "\n", "}", "\n", "return", "ErrClientNotNetworked", "\n", "}", "\n", "}" ]
// BufferedSize indicates that a client should have a buffer size // bytes large. See the note on the Buffered option about flushing the // buffer.
[ "BufferedSize", "indicates", "that", "a", "client", "should", "have", "a", "buffer", "size", "bytes", "large", ".", "See", "the", "note", "on", "the", "Buffered", "option", "about", "flushing", "the", "buffer", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L163-L171
13,466
stripe/veneur
trace/client.go
FlushInterval
func FlushInterval(interval time.Duration) ClientParam { t := time.NewTicker(interval) return FlushChannel(t.C, t.Stop) }
go
func FlushInterval(interval time.Duration) ClientParam { t := time.NewTicker(interval) return FlushChannel(t.C, t.Stop) }
[ "func", "FlushInterval", "(", "interval", "time", ".", "Duration", ")", "ClientParam", "{", "t", ":=", "time", ".", "NewTicker", "(", "interval", ")", "\n", "return", "FlushChannel", "(", "t", ".", "C", ",", "t", ".", "Stop", ")", "\n", "}" ]
// FlushInterval sets up a buffered client to perform one synchronous // flush per time interval in a new goroutine. The goroutine closes // down when the Client's Close method is called. // // This uses a time.Ticker to trigger the flush, so will not trigger // multiple times if flushing should be slower than the trigger // interval.
[ "FlushInterval", "sets", "up", "a", "buffered", "client", "to", "perform", "one", "synchronous", "flush", "per", "time", "interval", "in", "a", "new", "goroutine", ".", "The", "goroutine", "closes", "down", "when", "the", "Client", "s", "Close", "method", "is", "called", ".", "This", "uses", "a", "time", ".", "Ticker", "to", "trigger", "the", "flush", "so", "will", "not", "trigger", "multiple", "times", "if", "flushing", "should", "be", "slower", "than", "the", "trigger", "interval", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L180-L183
13,467
stripe/veneur
trace/client.go
FlushChannel
func FlushChannel(ch <-chan time.Time, stop func()) ClientParam { return func(cl *Client) error { if cl.backendParams == nil { return ErrClientNotNetworked } cl.flush = func(ctx context.Context) { defer stop() for { select { case <-ch: _ = Flush(cl) case <-ctx.Done(): return } } } return nil } }
go
func FlushChannel(ch <-chan time.Time, stop func()) ClientParam { return func(cl *Client) error { if cl.backendParams == nil { return ErrClientNotNetworked } cl.flush = func(ctx context.Context) { defer stop() for { select { case <-ch: _ = Flush(cl) case <-ctx.Done(): return } } } return nil } }
[ "func", "FlushChannel", "(", "ch", "<-", "chan", "time", ".", "Time", ",", "stop", "func", "(", ")", ")", "ClientParam", "{", "return", "func", "(", "cl", "*", "Client", ")", "error", "{", "if", "cl", ".", "backendParams", "==", "nil", "{", "return", "ErrClientNotNetworked", "\n", "}", "\n", "cl", ".", "flush", "=", "func", "(", "ctx", "context", ".", "Context", ")", "{", "defer", "stop", "(", ")", "\n", "for", "{", "select", "{", "case", "<-", "ch", ":", "_", "=", "Flush", "(", "cl", ")", "\n", "case", "<-", "ctx", ".", "Done", "(", ")", ":", "return", "\n", "}", "\n", "}", "\n", "}", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// FlushChannel sets up a buffered client to perform one synchronous // flush any time the given channel has a Time element ready. When the // Client is closed, FlushWith invokes the passed stop function. // // This functional option is mostly useful for tests; code intended to // be used in production should rely on FlushInterval instead, as // time.Ticker is set up to deal with slow flushes.
[ "FlushChannel", "sets", "up", "a", "buffered", "client", "to", "perform", "one", "synchronous", "flush", "any", "time", "the", "given", "channel", "has", "a", "Time", "element", "ready", ".", "When", "the", "Client", "is", "closed", "FlushWith", "invokes", "the", "passed", "stop", "function", ".", "This", "functional", "option", "is", "mostly", "useful", "for", "tests", ";", "code", "intended", "to", "be", "used", "in", "production", "should", "rely", "on", "FlushInterval", "instead", "as", "time", ".", "Ticker", "is", "set", "up", "to", "deal", "with", "slow", "flushes", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L192-L210
13,468
stripe/veneur
trace/client.go
MaxBackoffTime
func MaxBackoffTime(t time.Duration) ClientParam { return func(cl *Client) error { if cl.backendParams != nil { cl.backendParams.maxBackoff = t return nil } return ErrClientNotNetworked } }
go
func MaxBackoffTime(t time.Duration) ClientParam { return func(cl *Client) error { if cl.backendParams != nil { cl.backendParams.maxBackoff = t return nil } return ErrClientNotNetworked } }
[ "func", "MaxBackoffTime", "(", "t", "time", ".", "Duration", ")", "ClientParam", "{", "return", "func", "(", "cl", "*", "Client", ")", "error", "{", "if", "cl", ".", "backendParams", "!=", "nil", "{", "cl", ".", "backendParams", ".", "maxBackoff", "=", "t", "\n", "return", "nil", "\n", "}", "\n", "return", "ErrClientNotNetworked", "\n", "}", "\n", "}" ]
// MaxBackoffTime sets the maximum time duration waited between // reconnection attempts. If this option is not used, the backend uses // DefaultMaxBackoff.
[ "MaxBackoffTime", "sets", "the", "maximum", "time", "duration", "waited", "between", "reconnection", "attempts", ".", "If", "this", "option", "is", "not", "used", "the", "backend", "uses", "DefaultMaxBackoff", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L228-L236
13,469
stripe/veneur
trace/client.go
ParallelBackends
func ParallelBackends(nBackends uint) ClientParam { return func(cl *Client) error { if cl.backendParams == nil { return ErrClientNotNetworked } cl.nBackends = nBackends return nil } }
go
func ParallelBackends(nBackends uint) ClientParam { return func(cl *Client) error { if cl.backendParams == nil { return ErrClientNotNetworked } cl.nBackends = nBackends return nil } }
[ "func", "ParallelBackends", "(", "nBackends", "uint", ")", "ClientParam", "{", "return", "func", "(", "cl", "*", "Client", ")", "error", "{", "if", "cl", ".", "backendParams", "==", "nil", "{", "return", "ErrClientNotNetworked", "\n", "}", "\n", "cl", ".", "nBackends", "=", "nBackends", "\n", "return", "nil", "\n", "}", "\n", "}" ]
// ParallelBackends sets the number of parallel network backend // connections to send spans with. Each backend holds a connection to // an SSF receiver open.
[ "ParallelBackends", "sets", "the", "number", "of", "parallel", "network", "backend", "connections", "to", "send", "spans", "with", ".", "Each", "backend", "holds", "a", "connection", "to", "an", "SSF", "receiver", "open", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L278-L286
13,470
stripe/veneur
trace/client.go
NewChannelClient
func NewChannelClient(spanChan chan<- *ssf.SSFSpan, opts ...ClientParam) (*Client, error) { cl := &Client{} cl.flushBackends = []flushNotifier{} cl.spans = spanChan for _, opt := range opts { if err := opt(cl); err != nil { return nil, err } } ctx := context.Background() ctx, cl.cancel = context.WithCancel(ctx) cl.run(ctx) return cl, nil }
go
func NewChannelClient(spanChan chan<- *ssf.SSFSpan, opts ...ClientParam) (*Client, error) { cl := &Client{} cl.flushBackends = []flushNotifier{} cl.spans = spanChan for _, opt := range opts { if err := opt(cl); err != nil { return nil, err } } ctx := context.Background() ctx, cl.cancel = context.WithCancel(ctx) cl.run(ctx) return cl, nil }
[ "func", "NewChannelClient", "(", "spanChan", "chan", "<-", "*", "ssf", ".", "SSFSpan", ",", "opts", "...", "ClientParam", ")", "(", "*", "Client", ",", "error", ")", "{", "cl", ":=", "&", "Client", "{", "}", "\n", "cl", ".", "flushBackends", "=", "[", "]", "flushNotifier", "{", "}", "\n", "cl", ".", "spans", "=", "spanChan", "\n\n", "for", "_", ",", "opt", ":=", "range", "opts", "{", "if", "err", ":=", "opt", "(", "cl", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "}", "\n\n", "ctx", ":=", "context", ".", "Background", "(", ")", "\n", "ctx", ",", "cl", ".", "cancel", "=", "context", ".", "WithCancel", "(", "ctx", ")", "\n\n", "cl", ".", "run", "(", "ctx", ")", "\n", "return", "cl", ",", "nil", "\n", "}" ]
// NewChannelClient constructs and returns a Client that can send // directly into a span receiver channel. It provides an alternative // interface to NewBackendClient for constructing internal and // test-only clients.
[ "NewChannelClient", "constructs", "and", "returns", "a", "Client", "that", "can", "send", "directly", "into", "a", "span", "receiver", "channel", ".", "It", "provides", "an", "alternative", "interface", "to", "NewBackendClient", "for", "constructing", "internal", "and", "test", "-", "only", "clients", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L372-L388
13,471
stripe/veneur
trace/client.go
SetDefaultClient
func SetDefaultClient(client *Client) { oldClient := DefaultClient DefaultClient = client // Ensure the old client is closed so it does not leak connections if oldClient != nil { oldClient.Close() } }
go
func SetDefaultClient(client *Client) { oldClient := DefaultClient DefaultClient = client // Ensure the old client is closed so it does not leak connections if oldClient != nil { oldClient.Close() } }
[ "func", "SetDefaultClient", "(", "client", "*", "Client", ")", "{", "oldClient", ":=", "DefaultClient", "\n", "DefaultClient", "=", "client", "\n\n", "// Ensure the old client is closed so it does not leak connections", "if", "oldClient", "!=", "nil", "{", "oldClient", ".", "Close", "(", ")", "\n", "}", "\n", "}" ]
// SetDefaultClient overrides the default client used for recording // traces, and gracefully closes the existing one. // This is not safe to run concurrently with other goroutines.
[ "SetDefaultClient", "overrides", "the", "default", "client", "used", "for", "recording", "traces", "and", "gracefully", "closes", "the", "existing", "one", ".", "This", "is", "not", "safe", "to", "run", "concurrently", "with", "other", "goroutines", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L393-L401
13,472
stripe/veneur
trace/client.go
NeutralizeClient
func NeutralizeClient(client *Client) { client.Close() client.records = nil client.spans = nil client.flushBackends = []flushNotifier{} }
go
func NeutralizeClient(client *Client) { client.Close() client.records = nil client.spans = nil client.flushBackends = []flushNotifier{} }
[ "func", "NeutralizeClient", "(", "client", "*", "Client", ")", "{", "client", ".", "Close", "(", ")", "\n", "client", ".", "records", "=", "nil", "\n", "client", ".", "spans", "=", "nil", "\n", "client", ".", "flushBackends", "=", "[", "]", "flushNotifier", "{", "}", "\n", "}" ]
// NeutralizeClient sets up a client such that all Record or Flush // operations result in ErrWouldBlock. It dashes all hope of a Client // ever successfully recording or flushing spans, and is mostly useful // in tests.
[ "NeutralizeClient", "sets", "up", "a", "client", "such", "that", "all", "Record", "or", "Flush", "operations", "result", "in", "ErrWouldBlock", ".", "It", "dashes", "all", "hope", "of", "a", "Client", "ever", "successfully", "recording", "or", "flushing", "spans", "and", "is", "mostly", "useful", "in", "tests", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L407-L412
13,473
stripe/veneur
trace/client.go
Record
func Record(cl *Client, span *ssf.SSFSpan, done chan<- error) error { if cl == nil { return ErrNoClient } op := &recordOp{span: span, result: done} select { case cl.spans <- span: atomic.AddInt64(&cl.successfulRecords, 1) if done != nil { go func() { done <- nil }() } return nil case cl.records <- op: atomic.AddInt64(&cl.successfulRecords, 1) return nil default: } atomic.AddInt64(&cl.failedRecords, 1) return ErrWouldBlock }
go
func Record(cl *Client, span *ssf.SSFSpan, done chan<- error) error { if cl == nil { return ErrNoClient } op := &recordOp{span: span, result: done} select { case cl.spans <- span: atomic.AddInt64(&cl.successfulRecords, 1) if done != nil { go func() { done <- nil }() } return nil case cl.records <- op: atomic.AddInt64(&cl.successfulRecords, 1) return nil default: } atomic.AddInt64(&cl.failedRecords, 1) return ErrWouldBlock }
[ "func", "Record", "(", "cl", "*", "Client", ",", "span", "*", "ssf", ".", "SSFSpan", ",", "done", "chan", "<-", "error", ")", "error", "{", "if", "cl", "==", "nil", "{", "return", "ErrNoClient", "\n", "}", "\n\n", "op", ":=", "&", "recordOp", "{", "span", ":", "span", ",", "result", ":", "done", "}", "\n", "select", "{", "case", "cl", ".", "spans", "<-", "span", ":", "atomic", ".", "AddInt64", "(", "&", "cl", ".", "successfulRecords", ",", "1", ")", "\n", "if", "done", "!=", "nil", "{", "go", "func", "(", ")", "{", "done", "<-", "nil", "}", "(", ")", "\n", "}", "\n", "return", "nil", "\n", "case", "cl", ".", "records", "<-", "op", ":", "atomic", ".", "AddInt64", "(", "&", "cl", ".", "successfulRecords", ",", "1", ")", "\n", "return", "nil", "\n", "default", ":", "}", "\n", "atomic", ".", "AddInt64", "(", "&", "cl", ".", "failedRecords", ",", "1", ")", "\n", "return", "ErrWouldBlock", "\n", "}" ]
// Record instructs the client to serialize and send a span. It does // not wait for a delivery attempt, instead the Client will send the // result from serializing and submitting the span to the channel // done, if it is non-nil. // // Record returns ErrNoClient if client is nil and ErrWouldBlock if // the client is not able to accomodate another span.
[ "Record", "instructs", "the", "client", "to", "serialize", "and", "send", "a", "span", ".", "It", "does", "not", "wait", "for", "a", "delivery", "attempt", "instead", "the", "Client", "will", "send", "the", "result", "from", "serializing", "and", "submitting", "the", "span", "to", "the", "channel", "done", "if", "it", "is", "non", "-", "nil", ".", "Record", "returns", "ErrNoClient", "if", "client", "is", "nil", "and", "ErrWouldBlock", "if", "the", "client", "is", "not", "able", "to", "accomodate", "another", "span", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/trace/client.go#L469-L489
13,474
stripe/veneur
tdigest/merging_digest.go
NewMerging
func NewMerging(compression float64, debug bool) *MergingDigest { // this is a provable upper bound on the size of the centroid list // TODO: derive it myself sizeBound := int((math.Pi * compression / 2) + 0.5) return &MergingDigest{ compression: compression, mainCentroids: make([]Centroid, 0, sizeBound), tempCentroids: make([]Centroid, 0, estimateTempBuffer(compression)), min: math.Inf(+1), max: math.Inf(-1), debug: debug, } }
go
func NewMerging(compression float64, debug bool) *MergingDigest { // this is a provable upper bound on the size of the centroid list // TODO: derive it myself sizeBound := int((math.Pi * compression / 2) + 0.5) return &MergingDigest{ compression: compression, mainCentroids: make([]Centroid, 0, sizeBound), tempCentroids: make([]Centroid, 0, estimateTempBuffer(compression)), min: math.Inf(+1), max: math.Inf(-1), debug: debug, } }
[ "func", "NewMerging", "(", "compression", "float64", ",", "debug", "bool", ")", "*", "MergingDigest", "{", "// this is a provable upper bound on the size of the centroid list", "// TODO: derive it myself", "sizeBound", ":=", "int", "(", "(", "math", ".", "Pi", "*", "compression", "/", "2", ")", "+", "0.5", ")", "\n\n", "return", "&", "MergingDigest", "{", "compression", ":", "compression", ",", "mainCentroids", ":", "make", "(", "[", "]", "Centroid", ",", "0", ",", "sizeBound", ")", ",", "tempCentroids", ":", "make", "(", "[", "]", "Centroid", ",", "0", ",", "estimateTempBuffer", "(", "compression", ")", ")", ",", "min", ":", "math", ".", "Inf", "(", "+", "1", ")", ",", "max", ":", "math", ".", "Inf", "(", "-", "1", ")", ",", "debug", ":", "debug", ",", "}", "\n", "}" ]
// Initializes a new merging t-digest using the given compression parameter. // Lower compression values result in reduced memory consumption and less // precision, especially at the median. Values from 20 to 1000 are recommended // in Dunning's paper. // // The debug flag adds a list to each centroid, which stores all the samples // that have gone into that centroid. While this is useful for statistical // analysis, it makes the t-digest significantly slower and requires it to // store every sample. This defeats the purpose of using an approximating // histogram at all, so this feature should only be used in tests.
[ "Initializes", "a", "new", "merging", "t", "-", "digest", "using", "the", "given", "compression", "parameter", ".", "Lower", "compression", "values", "result", "in", "reduced", "memory", "consumption", "and", "less", "precision", "especially", "at", "the", "median", ".", "Values", "from", "20", "to", "1000", "are", "recommended", "in", "Dunning", "s", "paper", ".", "The", "debug", "flag", "adds", "a", "list", "to", "each", "centroid", "which", "stores", "all", "the", "samples", "that", "have", "gone", "into", "that", "centroid", ".", "While", "this", "is", "useful", "for", "statistical", "analysis", "it", "makes", "the", "t", "-", "digest", "significantly", "slower", "and", "requires", "it", "to", "store", "every", "sample", ".", "This", "defeats", "the", "purpose", "of", "using", "an", "approximating", "histogram", "at", "all", "so", "this", "feature", "should", "only", "be", "used", "in", "tests", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/merging_digest.go#L68-L81
13,475
stripe/veneur
tdigest/merging_digest.go
NewMergingFromData
func NewMergingFromData(d *MergingDigestData) *MergingDigest { td := &MergingDigest{ compression: d.Compression, mainCentroids: d.MainCentroids, tempCentroids: make([]Centroid, 0, estimateTempBuffer(d.Compression)), min: d.Min, max: d.Max, reciprocalSum: d.ReciprocalSum, } // Initialize the weight to the sum of the weights of the centroids td.mainWeight = 0 for _, c := range td.mainCentroids { td.mainWeight += c.Weight } return td }
go
func NewMergingFromData(d *MergingDigestData) *MergingDigest { td := &MergingDigest{ compression: d.Compression, mainCentroids: d.MainCentroids, tempCentroids: make([]Centroid, 0, estimateTempBuffer(d.Compression)), min: d.Min, max: d.Max, reciprocalSum: d.ReciprocalSum, } // Initialize the weight to the sum of the weights of the centroids td.mainWeight = 0 for _, c := range td.mainCentroids { td.mainWeight += c.Weight } return td }
[ "func", "NewMergingFromData", "(", "d", "*", "MergingDigestData", ")", "*", "MergingDigest", "{", "td", ":=", "&", "MergingDigest", "{", "compression", ":", "d", ".", "Compression", ",", "mainCentroids", ":", "d", ".", "MainCentroids", ",", "tempCentroids", ":", "make", "(", "[", "]", "Centroid", ",", "0", ",", "estimateTempBuffer", "(", "d", ".", "Compression", ")", ")", ",", "min", ":", "d", ".", "Min", ",", "max", ":", "d", ".", "Max", ",", "reciprocalSum", ":", "d", ".", "ReciprocalSum", ",", "}", "\n\n", "// Initialize the weight to the sum of the weights of the centroids", "td", ".", "mainWeight", "=", "0", "\n", "for", "_", ",", "c", ":=", "range", "td", ".", "mainCentroids", "{", "td", ".", "mainWeight", "+=", "c", ".", "Weight", "\n", "}", "\n\n", "return", "td", "\n", "}" ]
// NewMergingFromData returns a MergingDigest with values initialized from // MergingDigestData. This should be the way to generate a MergingDigest // from a serialized protobuf.
[ "NewMergingFromData", "returns", "a", "MergingDigest", "with", "values", "initialized", "from", "MergingDigestData", ".", "This", "should", "be", "the", "way", "to", "generate", "a", "MergingDigest", "from", "a", "serialized", "protobuf", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/merging_digest.go#L86-L103
13,476
stripe/veneur
tdigest/merging_digest.go
Add
func (td *MergingDigest) Add(value float64, weight float64) { if math.IsNaN(value) || math.IsInf(value, 0) || weight <= 0 { panic("invalid value added") } if len(td.tempCentroids) == cap(td.tempCentroids) { td.mergeAllTemps() } td.min = math.Min(td.min, value) td.max = math.Max(td.max, value) td.reciprocalSum += (1 / value) * weight next := Centroid{ Mean: value, Weight: weight, } if td.debug { next.Samples = []float64{value} } td.tempCentroids = append(td.tempCentroids, next) td.tempWeight += weight }
go
func (td *MergingDigest) Add(value float64, weight float64) { if math.IsNaN(value) || math.IsInf(value, 0) || weight <= 0 { panic("invalid value added") } if len(td.tempCentroids) == cap(td.tempCentroids) { td.mergeAllTemps() } td.min = math.Min(td.min, value) td.max = math.Max(td.max, value) td.reciprocalSum += (1 / value) * weight next := Centroid{ Mean: value, Weight: weight, } if td.debug { next.Samples = []float64{value} } td.tempCentroids = append(td.tempCentroids, next) td.tempWeight += weight }
[ "func", "(", "td", "*", "MergingDigest", ")", "Add", "(", "value", "float64", ",", "weight", "float64", ")", "{", "if", "math", ".", "IsNaN", "(", "value", ")", "||", "math", ".", "IsInf", "(", "value", ",", "0", ")", "||", "weight", "<=", "0", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n\n", "if", "len", "(", "td", ".", "tempCentroids", ")", "==", "cap", "(", "td", ".", "tempCentroids", ")", "{", "td", ".", "mergeAllTemps", "(", ")", "\n", "}", "\n\n", "td", ".", "min", "=", "math", ".", "Min", "(", "td", ".", "min", ",", "value", ")", "\n", "td", ".", "max", "=", "math", ".", "Max", "(", "td", ".", "max", ",", "value", ")", "\n", "td", ".", "reciprocalSum", "+=", "(", "1", "/", "value", ")", "*", "weight", "\n\n", "next", ":=", "Centroid", "{", "Mean", ":", "value", ",", "Weight", ":", "weight", ",", "}", "\n", "if", "td", ".", "debug", "{", "next", ".", "Samples", "=", "[", "]", "float64", "{", "value", "}", "\n", "}", "\n", "td", ".", "tempCentroids", "=", "append", "(", "td", ".", "tempCentroids", ",", "next", ")", "\n", "td", ".", "tempWeight", "+=", "weight", "\n", "}" ]
// Adds a new value to the t-digest, with a given weight that must be positive. // Infinities and NaN cannot be added.
[ "Adds", "a", "new", "value", "to", "the", "t", "-", "digest", "with", "a", "given", "weight", "that", "must", "be", "positive", ".", "Infinities", "and", "NaN", "cannot", "be", "added", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/merging_digest.go#L115-L137
13,477
stripe/veneur
tdigest/merging_digest.go
mergeAllTemps
func (td *MergingDigest) mergeAllTemps() { // this optimization is really important! if you remove it, the main list // will get merged into itself every time this is called if len(td.tempCentroids) == 0 { return } // we iterate over both centroid lists from least to greatest mean, so first // we have to sort this one sort.Sort(centroidList(td.tempCentroids)) tempIndex := 0 // total weight that the final t-digest will have, after everything is merged totalWeight := td.mainWeight + td.tempWeight // how much weight has been merged so far mergedWeight := 0.0 // the index of the last quantile to be merged into the previous centroid // this value gets updated each time we split a new centroid out instead of // merging into the current one lastMergedIndex := 0.0 // since we will be merging in-place into td.mainCentroids, we need to keep // track of the indices of the remaining elements actualMainCentroids := td.mainCentroids td.mainCentroids = td.mainCentroids[:0] // to facilitate the in-place merge, we will need a place to store the main // centroids that would be overwritten - we will use space from the start // of tempCentroids for this swappedCentroids := td.tempCentroids[:0] for len(actualMainCentroids)+len(swappedCentroids) != 0 || tempIndex < len(td.tempCentroids) { nextTemp := Centroid{ Mean: math.Inf(+1), Weight: 0, } if tempIndex < len(td.tempCentroids) { nextTemp = td.tempCentroids[tempIndex] } nextMain := Centroid{ Mean: math.Inf(+1), Weight: 0, } if len(swappedCentroids) != 0 { nextMain = swappedCentroids[0] } else if len(actualMainCentroids) != 0 { nextMain = actualMainCentroids[0] } if nextMain.Mean < nextTemp.Mean { if len(actualMainCentroids) != 0 { if len(swappedCentroids) != 0 { // if this came from swap, before merging, we have to save // the next main centroid at the end // this copy is probably the most expensive part of the // in-place merge, compared to merging into a separate buffer copy(swappedCentroids, swappedCentroids[1:]) swappedCentroids[len(swappedCentroids)-1] = actualMainCentroids[0] } actualMainCentroids = actualMainCentroids[1:] } else { // the real main has been completely exhausted, so we're just // cleaning out swapped mains now swappedCentroids = swappedCentroids[1:] } lastMergedIndex = td.mergeOne(mergedWeight, totalWeight, lastMergedIndex, nextMain) mergedWeight += nextMain.Weight } else { // before merging, we have to save the next main centroid somewhere // else, so that we don't overwrite it if len(actualMainCentroids) != 0 { swappedCentroids = append(swappedCentroids, actualMainCentroids[0]) actualMainCentroids = actualMainCentroids[1:] } tempIndex++ lastMergedIndex = td.mergeOne(mergedWeight, totalWeight, lastMergedIndex, nextTemp) mergedWeight += nextTemp.Weight } } td.tempCentroids = td.tempCentroids[:0] td.tempWeight = 0 td.mainWeight = totalWeight }
go
func (td *MergingDigest) mergeAllTemps() { // this optimization is really important! if you remove it, the main list // will get merged into itself every time this is called if len(td.tempCentroids) == 0 { return } // we iterate over both centroid lists from least to greatest mean, so first // we have to sort this one sort.Sort(centroidList(td.tempCentroids)) tempIndex := 0 // total weight that the final t-digest will have, after everything is merged totalWeight := td.mainWeight + td.tempWeight // how much weight has been merged so far mergedWeight := 0.0 // the index of the last quantile to be merged into the previous centroid // this value gets updated each time we split a new centroid out instead of // merging into the current one lastMergedIndex := 0.0 // since we will be merging in-place into td.mainCentroids, we need to keep // track of the indices of the remaining elements actualMainCentroids := td.mainCentroids td.mainCentroids = td.mainCentroids[:0] // to facilitate the in-place merge, we will need a place to store the main // centroids that would be overwritten - we will use space from the start // of tempCentroids for this swappedCentroids := td.tempCentroids[:0] for len(actualMainCentroids)+len(swappedCentroids) != 0 || tempIndex < len(td.tempCentroids) { nextTemp := Centroid{ Mean: math.Inf(+1), Weight: 0, } if tempIndex < len(td.tempCentroids) { nextTemp = td.tempCentroids[tempIndex] } nextMain := Centroid{ Mean: math.Inf(+1), Weight: 0, } if len(swappedCentroids) != 0 { nextMain = swappedCentroids[0] } else if len(actualMainCentroids) != 0 { nextMain = actualMainCentroids[0] } if nextMain.Mean < nextTemp.Mean { if len(actualMainCentroids) != 0 { if len(swappedCentroids) != 0 { // if this came from swap, before merging, we have to save // the next main centroid at the end // this copy is probably the most expensive part of the // in-place merge, compared to merging into a separate buffer copy(swappedCentroids, swappedCentroids[1:]) swappedCentroids[len(swappedCentroids)-1] = actualMainCentroids[0] } actualMainCentroids = actualMainCentroids[1:] } else { // the real main has been completely exhausted, so we're just // cleaning out swapped mains now swappedCentroids = swappedCentroids[1:] } lastMergedIndex = td.mergeOne(mergedWeight, totalWeight, lastMergedIndex, nextMain) mergedWeight += nextMain.Weight } else { // before merging, we have to save the next main centroid somewhere // else, so that we don't overwrite it if len(actualMainCentroids) != 0 { swappedCentroids = append(swappedCentroids, actualMainCentroids[0]) actualMainCentroids = actualMainCentroids[1:] } tempIndex++ lastMergedIndex = td.mergeOne(mergedWeight, totalWeight, lastMergedIndex, nextTemp) mergedWeight += nextTemp.Weight } } td.tempCentroids = td.tempCentroids[:0] td.tempWeight = 0 td.mainWeight = totalWeight }
[ "func", "(", "td", "*", "MergingDigest", ")", "mergeAllTemps", "(", ")", "{", "// this optimization is really important! if you remove it, the main list", "// will get merged into itself every time this is called", "if", "len", "(", "td", ".", "tempCentroids", ")", "==", "0", "{", "return", "\n", "}", "\n\n", "// we iterate over both centroid lists from least to greatest mean, so first", "// we have to sort this one", "sort", ".", "Sort", "(", "centroidList", "(", "td", ".", "tempCentroids", ")", ")", "\n", "tempIndex", ":=", "0", "\n\n", "// total weight that the final t-digest will have, after everything is merged", "totalWeight", ":=", "td", ".", "mainWeight", "+", "td", ".", "tempWeight", "\n", "// how much weight has been merged so far", "mergedWeight", ":=", "0.0", "\n", "// the index of the last quantile to be merged into the previous centroid", "// this value gets updated each time we split a new centroid out instead of", "// merging into the current one", "lastMergedIndex", ":=", "0.0", "\n", "// since we will be merging in-place into td.mainCentroids, we need to keep", "// track of the indices of the remaining elements", "actualMainCentroids", ":=", "td", ".", "mainCentroids", "\n", "td", ".", "mainCentroids", "=", "td", ".", "mainCentroids", "[", ":", "0", "]", "\n", "// to facilitate the in-place merge, we will need a place to store the main", "// centroids that would be overwritten - we will use space from the start", "// of tempCentroids for this", "swappedCentroids", ":=", "td", ".", "tempCentroids", "[", ":", "0", "]", "\n\n", "for", "len", "(", "actualMainCentroids", ")", "+", "len", "(", "swappedCentroids", ")", "!=", "0", "||", "tempIndex", "<", "len", "(", "td", ".", "tempCentroids", ")", "{", "nextTemp", ":=", "Centroid", "{", "Mean", ":", "math", ".", "Inf", "(", "+", "1", ")", ",", "Weight", ":", "0", ",", "}", "\n", "if", "tempIndex", "<", "len", "(", "td", ".", "tempCentroids", ")", "{", "nextTemp", "=", "td", ".", "tempCentroids", "[", "tempIndex", "]", "\n", "}", "\n\n", "nextMain", ":=", "Centroid", "{", "Mean", ":", "math", ".", "Inf", "(", "+", "1", ")", ",", "Weight", ":", "0", ",", "}", "\n", "if", "len", "(", "swappedCentroids", ")", "!=", "0", "{", "nextMain", "=", "swappedCentroids", "[", "0", "]", "\n", "}", "else", "if", "len", "(", "actualMainCentroids", ")", "!=", "0", "{", "nextMain", "=", "actualMainCentroids", "[", "0", "]", "\n", "}", "\n\n", "if", "nextMain", ".", "Mean", "<", "nextTemp", ".", "Mean", "{", "if", "len", "(", "actualMainCentroids", ")", "!=", "0", "{", "if", "len", "(", "swappedCentroids", ")", "!=", "0", "{", "// if this came from swap, before merging, we have to save", "// the next main centroid at the end", "// this copy is probably the most expensive part of the", "// in-place merge, compared to merging into a separate buffer", "copy", "(", "swappedCentroids", ",", "swappedCentroids", "[", "1", ":", "]", ")", "\n", "swappedCentroids", "[", "len", "(", "swappedCentroids", ")", "-", "1", "]", "=", "actualMainCentroids", "[", "0", "]", "\n", "}", "\n", "actualMainCentroids", "=", "actualMainCentroids", "[", "1", ":", "]", "\n", "}", "else", "{", "// the real main has been completely exhausted, so we're just", "// cleaning out swapped mains now", "swappedCentroids", "=", "swappedCentroids", "[", "1", ":", "]", "\n", "}", "\n\n", "lastMergedIndex", "=", "td", ".", "mergeOne", "(", "mergedWeight", ",", "totalWeight", ",", "lastMergedIndex", ",", "nextMain", ")", "\n", "mergedWeight", "+=", "nextMain", ".", "Weight", "\n", "}", "else", "{", "// before merging, we have to save the next main centroid somewhere", "// else, so that we don't overwrite it", "if", "len", "(", "actualMainCentroids", ")", "!=", "0", "{", "swappedCentroids", "=", "append", "(", "swappedCentroids", ",", "actualMainCentroids", "[", "0", "]", ")", "\n", "actualMainCentroids", "=", "actualMainCentroids", "[", "1", ":", "]", "\n", "}", "\n", "tempIndex", "++", "\n\n", "lastMergedIndex", "=", "td", ".", "mergeOne", "(", "mergedWeight", ",", "totalWeight", ",", "lastMergedIndex", ",", "nextTemp", ")", "\n", "mergedWeight", "+=", "nextTemp", ".", "Weight", "\n", "}", "\n", "}", "\n\n", "td", ".", "tempCentroids", "=", "td", ".", "tempCentroids", "[", ":", "0", "]", "\n", "td", ".", "tempWeight", "=", "0", "\n", "td", ".", "mainWeight", "=", "totalWeight", "\n", "}" ]
// combine the mainCentroids and tempCentroids in-place into mainCentroids
[ "combine", "the", "mainCentroids", "and", "tempCentroids", "in", "-", "place", "into", "mainCentroids" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/merging_digest.go#L140-L224
13,478
stripe/veneur
tdigest/merging_digest.go
indexEstimate
func (td *MergingDigest) indexEstimate(quantile float64) float64 { // TODO: a polynomial approximation of arcsine should be a lot faster return td.compression * ((math.Asin(2*quantile-1) / math.Pi) + 0.5) }
go
func (td *MergingDigest) indexEstimate(quantile float64) float64 { // TODO: a polynomial approximation of arcsine should be a lot faster return td.compression * ((math.Asin(2*quantile-1) / math.Pi) + 0.5) }
[ "func", "(", "td", "*", "MergingDigest", ")", "indexEstimate", "(", "quantile", "float64", ")", "float64", "{", "// TODO: a polynomial approximation of arcsine should be a lot faster", "return", "td", ".", "compression", "*", "(", "(", "math", ".", "Asin", "(", "2", "*", "quantile", "-", "1", ")", "/", "math", ".", "Pi", ")", "+", "0.5", ")", "\n", "}" ]
// given a quantile, estimate the index of the centroid that contains it using // the given compression
[ "given", "a", "quantile", "estimate", "the", "index", "of", "the", "centroid", "that", "contains", "it", "using", "the", "given", "compression" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/merging_digest.go#L259-L262
13,479
stripe/veneur
tdigest/merging_digest.go
Quantile
func (td *MergingDigest) Quantile(quantile float64) float64 { if quantile < 0 || quantile > 1 { panic("quantile out of bounds") } td.mergeAllTemps() // add up the weights of centroids in ascending order until we reach a // centroid that pushes us over the quantile q := quantile * td.mainWeight weightSoFar := 0.0 lowerBound := td.min for i, c := range td.mainCentroids { upperBound := td.centroidUpperBound(i) if q <= weightSoFar+c.Weight { // the target quantile is somewhere inside this centroid // we compute how much of this centroid's weight falls into the quantile proportion := (q - weightSoFar) / c.Weight // and interpolate what value that corresponds to inside a uniform // distribution return lowerBound + (proportion * (upperBound - lowerBound)) } // the quantile is above this centroid, so sum the weight and carry on weightSoFar += c.Weight lowerBound = upperBound } // should never be reached unless empty, since the final comparison is // q <= td.mainWeight return math.NaN() }
go
func (td *MergingDigest) Quantile(quantile float64) float64 { if quantile < 0 || quantile > 1 { panic("quantile out of bounds") } td.mergeAllTemps() // add up the weights of centroids in ascending order until we reach a // centroid that pushes us over the quantile q := quantile * td.mainWeight weightSoFar := 0.0 lowerBound := td.min for i, c := range td.mainCentroids { upperBound := td.centroidUpperBound(i) if q <= weightSoFar+c.Weight { // the target quantile is somewhere inside this centroid // we compute how much of this centroid's weight falls into the quantile proportion := (q - weightSoFar) / c.Weight // and interpolate what value that corresponds to inside a uniform // distribution return lowerBound + (proportion * (upperBound - lowerBound)) } // the quantile is above this centroid, so sum the weight and carry on weightSoFar += c.Weight lowerBound = upperBound } // should never be reached unless empty, since the final comparison is // q <= td.mainWeight return math.NaN() }
[ "func", "(", "td", "*", "MergingDigest", ")", "Quantile", "(", "quantile", "float64", ")", "float64", "{", "if", "quantile", "<", "0", "||", "quantile", ">", "1", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "td", ".", "mergeAllTemps", "(", ")", "\n\n", "// add up the weights of centroids in ascending order until we reach a", "// centroid that pushes us over the quantile", "q", ":=", "quantile", "*", "td", ".", "mainWeight", "\n", "weightSoFar", ":=", "0.0", "\n", "lowerBound", ":=", "td", ".", "min", "\n", "for", "i", ",", "c", ":=", "range", "td", ".", "mainCentroids", "{", "upperBound", ":=", "td", ".", "centroidUpperBound", "(", "i", ")", "\n", "if", "q", "<=", "weightSoFar", "+", "c", ".", "Weight", "{", "// the target quantile is somewhere inside this centroid", "// we compute how much of this centroid's weight falls into the quantile", "proportion", ":=", "(", "q", "-", "weightSoFar", ")", "/", "c", ".", "Weight", "\n", "// and interpolate what value that corresponds to inside a uniform", "// distribution", "return", "lowerBound", "+", "(", "proportion", "*", "(", "upperBound", "-", "lowerBound", ")", ")", "\n", "}", "\n\n", "// the quantile is above this centroid, so sum the weight and carry on", "weightSoFar", "+=", "c", ".", "Weight", "\n", "lowerBound", "=", "upperBound", "\n", "}", "\n\n", "// should never be reached unless empty, since the final comparison is", "// q <= td.mainWeight", "return", "math", ".", "NaN", "(", ")", "\n", "}" ]
// Returns a value such that the fraction of values in td below that value is // approximately equal to quantile. Returns NaN if the digest is empty.
[ "Returns", "a", "value", "such", "that", "the", "fraction", "of", "values", "in", "td", "below", "that", "value", "is", "approximately", "equal", "to", "quantile", ".", "Returns", "NaN", "if", "the", "digest", "is", "empty", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/merging_digest.go#L302-L332
13,480
stripe/veneur
tdigest/merging_digest.go
Merge
func (td *MergingDigest) Merge(other *MergingDigest) { oldReciprocalSum := td.reciprocalSum shuffledIndices := rand.Perm(len(other.mainCentroids)) for _, i := range shuffledIndices { td.Add(other.mainCentroids[i].Mean, other.mainCentroids[i].Weight) } // we did not merge other's temps, so we need to add those too // they're unsorted so there's no need to shuffle them for i := range other.tempCentroids { td.Add(other.tempCentroids[i].Mean, other.tempCentroids[i].Weight) } td.reciprocalSum = oldReciprocalSum + other.reciprocalSum }
go
func (td *MergingDigest) Merge(other *MergingDigest) { oldReciprocalSum := td.reciprocalSum shuffledIndices := rand.Perm(len(other.mainCentroids)) for _, i := range shuffledIndices { td.Add(other.mainCentroids[i].Mean, other.mainCentroids[i].Weight) } // we did not merge other's temps, so we need to add those too // they're unsorted so there's no need to shuffle them for i := range other.tempCentroids { td.Add(other.tempCentroids[i].Mean, other.tempCentroids[i].Weight) } td.reciprocalSum = oldReciprocalSum + other.reciprocalSum }
[ "func", "(", "td", "*", "MergingDigest", ")", "Merge", "(", "other", "*", "MergingDigest", ")", "{", "oldReciprocalSum", ":=", "td", ".", "reciprocalSum", "\n", "shuffledIndices", ":=", "rand", ".", "Perm", "(", "len", "(", "other", ".", "mainCentroids", ")", ")", "\n\n", "for", "_", ",", "i", ":=", "range", "shuffledIndices", "{", "td", ".", "Add", "(", "other", ".", "mainCentroids", "[", "i", "]", ".", "Mean", ",", "other", ".", "mainCentroids", "[", "i", "]", ".", "Weight", ")", "\n", "}", "\n\n", "// we did not merge other's temps, so we need to add those too", "// they're unsorted so there's no need to shuffle them", "for", "i", ":=", "range", "other", ".", "tempCentroids", "{", "td", ".", "Add", "(", "other", ".", "tempCentroids", "[", "i", "]", ".", "Mean", ",", "other", ".", "tempCentroids", "[", "i", "]", ".", "Weight", ")", "\n", "}", "\n\n", "td", ".", "reciprocalSum", "=", "oldReciprocalSum", "+", "other", ".", "reciprocalSum", "\n", "}" ]
// Merge another digest into this one. Neither td nor other can be shared // concurrently during the execution of this method.
[ "Merge", "another", "digest", "into", "this", "one", ".", "Neither", "td", "nor", "other", "can", "be", "shared", "concurrently", "during", "the", "execution", "of", "this", "method", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/merging_digest.go#L374-L389
13,481
stripe/veneur
tdigest/merging_digest.go
Centroids
func (td *MergingDigest) Centroids() []Centroid { if !td.debug { panic("must enable debug to call Centroids()") } td.mergeAllTemps() return td.mainCentroids }
go
func (td *MergingDigest) Centroids() []Centroid { if !td.debug { panic("must enable debug to call Centroids()") } td.mergeAllTemps() return td.mainCentroids }
[ "func", "(", "td", "*", "MergingDigest", ")", "Centroids", "(", ")", "[", "]", "Centroid", "{", "if", "!", "td", ".", "debug", "{", "panic", "(", "\"", "\"", ")", "\n", "}", "\n", "td", ".", "mergeAllTemps", "(", ")", "\n", "return", "td", ".", "mainCentroids", "\n", "}" ]
// This function provides direct access to the internal list of centroids in // this t-digest. Having access to this list is very important for analyzing the // t-digest's statistical properties. However, since it violates the encapsulation // of the t-digest, it should be used sparingly. Mutating the returned slice can // result in undefined behavior. // // This function will panic if debug is not enabled for this t-digest.
[ "This", "function", "provides", "direct", "access", "to", "the", "internal", "list", "of", "centroids", "in", "this", "t", "-", "digest", ".", "Having", "access", "to", "this", "list", "is", "very", "important", "for", "analyzing", "the", "t", "-", "digest", "s", "statistical", "properties", ".", "However", "since", "it", "violates", "the", "encapsulation", "of", "the", "t", "-", "digest", "it", "should", "be", "used", "sparingly", ".", "Mutating", "the", "returned", "slice", "can", "result", "in", "undefined", "behavior", ".", "This", "function", "will", "panic", "if", "debug", "is", "not", "enabled", "for", "this", "t", "-", "digest", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/tdigest/merging_digest.go#L463-L469
13,482
stripe/veneur
flusher.go
tallyMetrics
func (s *Server) tallyMetrics(percentiles []float64) ([]WorkerMetrics, metricsSummary) { // allocating this long array to count up the sizes is cheaper than appending // the []WorkerMetrics together one at a time tempMetrics := make([]WorkerMetrics, 0, len(s.Workers)) ms := metricsSummary{} for i, w := range s.Workers { log.WithField("worker", i).Debug("Flushing") wm := w.Flush() tempMetrics = append(tempMetrics, wm) ms.totalCounters += len(wm.counters) ms.totalGauges += len(wm.gauges) ms.totalHistograms += len(wm.histograms) ms.totalSets += len(wm.sets) ms.totalTimers += len(wm.timers) ms.totalGlobalCounters += len(wm.globalCounters) ms.totalGlobalGauges += len(wm.globalGauges) ms.totalGlobalHistograms += len(wm.globalHistograms) ms.totalGlobalTimers += len(wm.globalTimers) ms.totalLocalHistograms += len(wm.localHistograms) ms.totalLocalSets += len(wm.localSets) ms.totalLocalTimers += len(wm.localTimers) ms.totalLocalStatusChecks += len(wm.localStatusChecks) } ms.totalLength = ms.totalCounters + ms.totalGauges + // histograms and timers each report a metric point for each percentile // plus a point for each of their aggregates (ms.totalTimers+ms.totalHistograms)*(s.HistogramAggregates.Count+len(percentiles)) + // local-only histograms will be flushed with percentiles, so we intentionally // use the original percentile list here. // remember that both the global veneur and the local instances have // 'local-only' histograms. ms.totalLocalSets + (ms.totalLocalTimers+ms.totalLocalHistograms)*(s.HistogramAggregates.Count+len(s.HistogramPercentiles)) // Global instances also flush sets and global counters, so be sure and add // them to the total size if !s.IsLocal() { ms.totalLength += ms.totalSets ms.totalLength += ms.totalGlobalCounters ms.totalLength += ms.totalGlobalGauges ms.totalLength += ms.totalGlobalHistograms * (s.HistogramAggregates.Count + len(s.HistogramPercentiles)) ms.totalLength += ms.totalGlobalTimers * (s.HistogramAggregates.Count + len(s.HistogramPercentiles)) } return tempMetrics, ms }
go
func (s *Server) tallyMetrics(percentiles []float64) ([]WorkerMetrics, metricsSummary) { // allocating this long array to count up the sizes is cheaper than appending // the []WorkerMetrics together one at a time tempMetrics := make([]WorkerMetrics, 0, len(s.Workers)) ms := metricsSummary{} for i, w := range s.Workers { log.WithField("worker", i).Debug("Flushing") wm := w.Flush() tempMetrics = append(tempMetrics, wm) ms.totalCounters += len(wm.counters) ms.totalGauges += len(wm.gauges) ms.totalHistograms += len(wm.histograms) ms.totalSets += len(wm.sets) ms.totalTimers += len(wm.timers) ms.totalGlobalCounters += len(wm.globalCounters) ms.totalGlobalGauges += len(wm.globalGauges) ms.totalGlobalHistograms += len(wm.globalHistograms) ms.totalGlobalTimers += len(wm.globalTimers) ms.totalLocalHistograms += len(wm.localHistograms) ms.totalLocalSets += len(wm.localSets) ms.totalLocalTimers += len(wm.localTimers) ms.totalLocalStatusChecks += len(wm.localStatusChecks) } ms.totalLength = ms.totalCounters + ms.totalGauges + // histograms and timers each report a metric point for each percentile // plus a point for each of their aggregates (ms.totalTimers+ms.totalHistograms)*(s.HistogramAggregates.Count+len(percentiles)) + // local-only histograms will be flushed with percentiles, so we intentionally // use the original percentile list here. // remember that both the global veneur and the local instances have // 'local-only' histograms. ms.totalLocalSets + (ms.totalLocalTimers+ms.totalLocalHistograms)*(s.HistogramAggregates.Count+len(s.HistogramPercentiles)) // Global instances also flush sets and global counters, so be sure and add // them to the total size if !s.IsLocal() { ms.totalLength += ms.totalSets ms.totalLength += ms.totalGlobalCounters ms.totalLength += ms.totalGlobalGauges ms.totalLength += ms.totalGlobalHistograms * (s.HistogramAggregates.Count + len(s.HistogramPercentiles)) ms.totalLength += ms.totalGlobalTimers * (s.HistogramAggregates.Count + len(s.HistogramPercentiles)) } return tempMetrics, ms }
[ "func", "(", "s", "*", "Server", ")", "tallyMetrics", "(", "percentiles", "[", "]", "float64", ")", "(", "[", "]", "WorkerMetrics", ",", "metricsSummary", ")", "{", "// allocating this long array to count up the sizes is cheaper than appending", "// the []WorkerMetrics together one at a time", "tempMetrics", ":=", "make", "(", "[", "]", "WorkerMetrics", ",", "0", ",", "len", "(", "s", ".", "Workers", ")", ")", "\n\n", "ms", ":=", "metricsSummary", "{", "}", "\n\n", "for", "i", ",", "w", ":=", "range", "s", ".", "Workers", "{", "log", ".", "WithField", "(", "\"", "\"", ",", "i", ")", ".", "Debug", "(", "\"", "\"", ")", "\n", "wm", ":=", "w", ".", "Flush", "(", ")", "\n", "tempMetrics", "=", "append", "(", "tempMetrics", ",", "wm", ")", "\n\n", "ms", ".", "totalCounters", "+=", "len", "(", "wm", ".", "counters", ")", "\n", "ms", ".", "totalGauges", "+=", "len", "(", "wm", ".", "gauges", ")", "\n", "ms", ".", "totalHistograms", "+=", "len", "(", "wm", ".", "histograms", ")", "\n", "ms", ".", "totalSets", "+=", "len", "(", "wm", ".", "sets", ")", "\n", "ms", ".", "totalTimers", "+=", "len", "(", "wm", ".", "timers", ")", "\n\n", "ms", ".", "totalGlobalCounters", "+=", "len", "(", "wm", ".", "globalCounters", ")", "\n", "ms", ".", "totalGlobalGauges", "+=", "len", "(", "wm", ".", "globalGauges", ")", "\n", "ms", ".", "totalGlobalHistograms", "+=", "len", "(", "wm", ".", "globalHistograms", ")", "\n", "ms", ".", "totalGlobalTimers", "+=", "len", "(", "wm", ".", "globalTimers", ")", "\n\n", "ms", ".", "totalLocalHistograms", "+=", "len", "(", "wm", ".", "localHistograms", ")", "\n", "ms", ".", "totalLocalSets", "+=", "len", "(", "wm", ".", "localSets", ")", "\n", "ms", ".", "totalLocalTimers", "+=", "len", "(", "wm", ".", "localTimers", ")", "\n\n", "ms", ".", "totalLocalStatusChecks", "+=", "len", "(", "wm", ".", "localStatusChecks", ")", "\n", "}", "\n\n", "ms", ".", "totalLength", "=", "ms", ".", "totalCounters", "+", "ms", ".", "totalGauges", "+", "// histograms and timers each report a metric point for each percentile", "// plus a point for each of their aggregates", "(", "ms", ".", "totalTimers", "+", "ms", ".", "totalHistograms", ")", "*", "(", "s", ".", "HistogramAggregates", ".", "Count", "+", "len", "(", "percentiles", ")", ")", "+", "// local-only histograms will be flushed with percentiles, so we intentionally", "// use the original percentile list here.", "// remember that both the global veneur and the local instances have", "// 'local-only' histograms.", "ms", ".", "totalLocalSets", "+", "(", "ms", ".", "totalLocalTimers", "+", "ms", ".", "totalLocalHistograms", ")", "*", "(", "s", ".", "HistogramAggregates", ".", "Count", "+", "len", "(", "s", ".", "HistogramPercentiles", ")", ")", "\n\n", "// Global instances also flush sets and global counters, so be sure and add", "// them to the total size", "if", "!", "s", ".", "IsLocal", "(", ")", "{", "ms", ".", "totalLength", "+=", "ms", ".", "totalSets", "\n", "ms", ".", "totalLength", "+=", "ms", ".", "totalGlobalCounters", "\n", "ms", ".", "totalLength", "+=", "ms", ".", "totalGlobalGauges", "\n", "ms", ".", "totalLength", "+=", "ms", ".", "totalGlobalHistograms", "*", "(", "s", ".", "HistogramAggregates", ".", "Count", "+", "len", "(", "s", ".", "HistogramPercentiles", ")", ")", "\n", "ms", ".", "totalLength", "+=", "ms", ".", "totalGlobalTimers", "*", "(", "s", ".", "HistogramAggregates", ".", "Count", "+", "len", "(", "s", ".", "HistogramPercentiles", ")", ")", "\n", "}", "\n\n", "return", "tempMetrics", ",", "ms", "\n", "}" ]
// tallyMetrics gives a slight overestimate of the number // of metrics we'll be reporting, so that we can pre-allocate // a slice of the correct length instead of constantly appending // for performance
[ "tallyMetrics", "gives", "a", "slight", "overestimate", "of", "the", "number", "of", "metrics", "we", "ll", "be", "reporting", "so", "that", "we", "can", "pre", "-", "allocate", "a", "slice", "of", "the", "correct", "length", "instead", "of", "constantly", "appending", "for", "performance" ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/flusher.go#L153-L204
13,483
stripe/veneur
flusher.go
forwardGRPC
func (s *Server) forwardGRPC(ctx context.Context, wms []WorkerMetrics) { span, _ := trace.StartSpanFromContext(ctx, "") span.SetTag("protocol", "grpc") defer span.ClientFinish(s.TraceClient) exportStart := time.Now() // Collect all of the forwardable metrics from the various WorkerMetrics. var metrics []*metricpb.Metric for _, wm := range wms { metrics = append(metrics, wm.ForwardableMetrics(s.TraceClient)...) } span.Add( ssf.Timing("forward.duration_ns", time.Since(exportStart), time.Nanosecond, map[string]string{"part": "export"}), ssf.Gauge("forward.metrics_total", float32(len(metrics)), nil), // Maintain compatibility with metrics used in HTTP-based forwarding ssf.Count("forward.post_metrics_total", float32(len(metrics)), nil), ) if len(metrics) == 0 { log.Debug("Nothing to forward, skipping.") return } entry := log.WithFields(logrus.Fields{ "metrics": len(metrics), "destination": s.ForwardAddr, "protocol": "grpc", "grpcstate": s.grpcForwardConn.GetState().String(), }) c := forwardrpc.NewForwardClient(s.grpcForwardConn) grpcStart := time.Now() _, err := c.SendMetrics(ctx, &forwardrpc.MetricList{Metrics: metrics}) if err != nil { if ctx.Err() != nil { // We exceeded the deadline of the flush context. span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "deadline_exceeded"})) } else if statErr, ok := status.FromError(err); ok && (statErr.Message() == "all SubConns are in TransientFailure" || statErr.Message() == "transport is closing") { // We could check statErr.Code() == codes.Unavailable, but we don't know all of the cases that // could return that code. These two particular cases are fairly safe and usually associated // with connection rebalancing or host replacement, so we don't want them going to sentry. span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "transient_unavailable"})) } else { span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "send"})) entry.WithError(err).Error("Failed to forward to an upstream Veneur") } } else { entry.Info("Completed forward to an upstream Veneur") } span.Add( ssf.Timing("forward.duration_ns", time.Since(grpcStart), time.Nanosecond, map[string]string{"part": "grpc"}), ssf.Count("forward.error_total", 0, nil), ) }
go
func (s *Server) forwardGRPC(ctx context.Context, wms []WorkerMetrics) { span, _ := trace.StartSpanFromContext(ctx, "") span.SetTag("protocol", "grpc") defer span.ClientFinish(s.TraceClient) exportStart := time.Now() // Collect all of the forwardable metrics from the various WorkerMetrics. var metrics []*metricpb.Metric for _, wm := range wms { metrics = append(metrics, wm.ForwardableMetrics(s.TraceClient)...) } span.Add( ssf.Timing("forward.duration_ns", time.Since(exportStart), time.Nanosecond, map[string]string{"part": "export"}), ssf.Gauge("forward.metrics_total", float32(len(metrics)), nil), // Maintain compatibility with metrics used in HTTP-based forwarding ssf.Count("forward.post_metrics_total", float32(len(metrics)), nil), ) if len(metrics) == 0 { log.Debug("Nothing to forward, skipping.") return } entry := log.WithFields(logrus.Fields{ "metrics": len(metrics), "destination": s.ForwardAddr, "protocol": "grpc", "grpcstate": s.grpcForwardConn.GetState().String(), }) c := forwardrpc.NewForwardClient(s.grpcForwardConn) grpcStart := time.Now() _, err := c.SendMetrics(ctx, &forwardrpc.MetricList{Metrics: metrics}) if err != nil { if ctx.Err() != nil { // We exceeded the deadline of the flush context. span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "deadline_exceeded"})) } else if statErr, ok := status.FromError(err); ok && (statErr.Message() == "all SubConns are in TransientFailure" || statErr.Message() == "transport is closing") { // We could check statErr.Code() == codes.Unavailable, but we don't know all of the cases that // could return that code. These two particular cases are fairly safe and usually associated // with connection rebalancing or host replacement, so we don't want them going to sentry. span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "transient_unavailable"})) } else { span.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "send"})) entry.WithError(err).Error("Failed to forward to an upstream Veneur") } } else { entry.Info("Completed forward to an upstream Veneur") } span.Add( ssf.Timing("forward.duration_ns", time.Since(grpcStart), time.Nanosecond, map[string]string{"part": "grpc"}), ssf.Count("forward.error_total", 0, nil), ) }
[ "func", "(", "s", "*", "Server", ")", "forwardGRPC", "(", "ctx", "context", ".", "Context", ",", "wms", "[", "]", "WorkerMetrics", ")", "{", "span", ",", "_", ":=", "trace", ".", "StartSpanFromContext", "(", "ctx", ",", "\"", "\"", ")", "\n", "span", ".", "SetTag", "(", "\"", "\"", ",", "\"", "\"", ")", "\n", "defer", "span", ".", "ClientFinish", "(", "s", ".", "TraceClient", ")", "\n\n", "exportStart", ":=", "time", ".", "Now", "(", ")", "\n\n", "// Collect all of the forwardable metrics from the various WorkerMetrics.", "var", "metrics", "[", "]", "*", "metricpb", ".", "Metric", "\n", "for", "_", ",", "wm", ":=", "range", "wms", "{", "metrics", "=", "append", "(", "metrics", ",", "wm", ".", "ForwardableMetrics", "(", "s", ".", "TraceClient", ")", "...", ")", "\n", "}", "\n\n", "span", ".", "Add", "(", "ssf", ".", "Timing", "(", "\"", "\"", ",", "time", ".", "Since", "(", "exportStart", ")", ",", "time", ".", "Nanosecond", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", "}", ")", ",", "ssf", ".", "Gauge", "(", "\"", "\"", ",", "float32", "(", "len", "(", "metrics", ")", ")", ",", "nil", ")", ",", "// Maintain compatibility with metrics used in HTTP-based forwarding", "ssf", ".", "Count", "(", "\"", "\"", ",", "float32", "(", "len", "(", "metrics", ")", ")", ",", "nil", ")", ",", ")", "\n\n", "if", "len", "(", "metrics", ")", "==", "0", "{", "log", ".", "Debug", "(", "\"", "\"", ")", "\n", "return", "\n", "}", "\n\n", "entry", ":=", "log", ".", "WithFields", "(", "logrus", ".", "Fields", "{", "\"", "\"", ":", "len", "(", "metrics", ")", ",", "\"", "\"", ":", "s", ".", "ForwardAddr", ",", "\"", "\"", ":", "\"", "\"", ",", "\"", "\"", ":", "s", ".", "grpcForwardConn", ".", "GetState", "(", ")", ".", "String", "(", ")", ",", "}", ")", "\n\n", "c", ":=", "forwardrpc", ".", "NewForwardClient", "(", "s", ".", "grpcForwardConn", ")", "\n\n", "grpcStart", ":=", "time", ".", "Now", "(", ")", "\n", "_", ",", "err", ":=", "c", ".", "SendMetrics", "(", "ctx", ",", "&", "forwardrpc", ".", "MetricList", "{", "Metrics", ":", "metrics", "}", ")", "\n", "if", "err", "!=", "nil", "{", "if", "ctx", ".", "Err", "(", ")", "!=", "nil", "{", "// We exceeded the deadline of the flush context.", "span", ".", "Add", "(", "ssf", ".", "Count", "(", "\"", "\"", ",", "1", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", "}", ")", ")", "\n", "}", "else", "if", "statErr", ",", "ok", ":=", "status", ".", "FromError", "(", "err", ")", ";", "ok", "&&", "(", "statErr", ".", "Message", "(", ")", "==", "\"", "\"", "||", "statErr", ".", "Message", "(", ")", "==", "\"", "\"", ")", "{", "// We could check statErr.Code() == codes.Unavailable, but we don't know all of the cases that", "// could return that code. These two particular cases are fairly safe and usually associated", "// with connection rebalancing or host replacement, so we don't want them going to sentry.", "span", ".", "Add", "(", "ssf", ".", "Count", "(", "\"", "\"", ",", "1", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", "}", ")", ")", "\n", "}", "else", "{", "span", ".", "Add", "(", "ssf", ".", "Count", "(", "\"", "\"", ",", "1", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", "}", ")", ")", "\n", "entry", ".", "WithError", "(", "err", ")", ".", "Error", "(", "\"", "\"", ")", "\n", "}", "\n", "}", "else", "{", "entry", ".", "Info", "(", "\"", "\"", ")", "\n", "}", "\n\n", "span", ".", "Add", "(", "ssf", ".", "Timing", "(", "\"", "\"", ",", "time", ".", "Since", "(", "grpcStart", ")", ",", "time", ".", "Nanosecond", ",", "map", "[", "string", "]", "string", "{", "\"", "\"", ":", "\"", "\"", "}", ")", ",", "ssf", ".", "Count", "(", "\"", "\"", ",", "0", ",", "nil", ")", ",", ")", "\n", "}" ]
// forwardGRPC forwards all input metrics to a downstream Veneur, over gRPC.
[ "forwardGRPC", "forwards", "all", "input", "metrics", "to", "a", "downstream", "Veneur", "over", "gRPC", "." ]
748a3593cd11cfb4543fbe3a3a3b1614a393e3a7
https://github.com/stripe/veneur/blob/748a3593cd11cfb4543fbe3a3a3b1614a393e3a7/flusher.go#L458-L518
13,484
moovweb/gokogiri
xpath/xpath.go
EvaluateAsNodeset
func (xpath *XPath) EvaluateAsNodeset(nodePtr unsafe.Pointer, xpathExpr *Expression) (nodes []unsafe.Pointer, err error) { if nodePtr == nil { //evaluating xpath on a nil node returns no result. return } err = xpath.Evaluate(nodePtr, xpathExpr) if err != nil { return } nodes, err = xpath.ResultAsNodeset() return }
go
func (xpath *XPath) EvaluateAsNodeset(nodePtr unsafe.Pointer, xpathExpr *Expression) (nodes []unsafe.Pointer, err error) { if nodePtr == nil { //evaluating xpath on a nil node returns no result. return } err = xpath.Evaluate(nodePtr, xpathExpr) if err != nil { return } nodes, err = xpath.ResultAsNodeset() return }
[ "func", "(", "xpath", "*", "XPath", ")", "EvaluateAsNodeset", "(", "nodePtr", "unsafe", ".", "Pointer", ",", "xpathExpr", "*", "Expression", ")", "(", "nodes", "[", "]", "unsafe", ".", "Pointer", ",", "err", "error", ")", "{", "if", "nodePtr", "==", "nil", "{", "//evaluating xpath on a nil node returns no result.", "return", "\n", "}", "\n\n", "err", "=", "xpath", ".", "Evaluate", "(", "nodePtr", ",", "xpathExpr", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\n", "}", "\n\n", "nodes", ",", "err", "=", "xpath", ".", "ResultAsNodeset", "(", ")", "\n", "return", "\n", "}" ]
// Evaluate an XPath and attempt to consume the result as a nodeset.
[ "Evaluate", "an", "XPath", "and", "attempt", "to", "consume", "the", "result", "as", "a", "nodeset", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xpath/xpath.go#L107-L120
13,485
moovweb/gokogiri
xpath/xpath.go
ResultAsNodeset
func (xpath *XPath) ResultAsNodeset() (nodes []unsafe.Pointer, err error) { if xpath.ResultPtr == nil { return } if xpath.ReturnType() != XPATH_NODESET { err = errors.New("Cannot convert XPath result to nodeset") } if nodesetPtr := xpath.ResultPtr.nodesetval; nodesetPtr != nil { if nodesetSize := int(nodesetPtr.nodeNr); nodesetSize > 0 { nodes = make([]unsafe.Pointer, nodesetSize) for i := 0; i < nodesetSize; i++ { nodes[i] = unsafe.Pointer(C.fetchNode(nodesetPtr, C.int(i))) } } } return }
go
func (xpath *XPath) ResultAsNodeset() (nodes []unsafe.Pointer, err error) { if xpath.ResultPtr == nil { return } if xpath.ReturnType() != XPATH_NODESET { err = errors.New("Cannot convert XPath result to nodeset") } if nodesetPtr := xpath.ResultPtr.nodesetval; nodesetPtr != nil { if nodesetSize := int(nodesetPtr.nodeNr); nodesetSize > 0 { nodes = make([]unsafe.Pointer, nodesetSize) for i := 0; i < nodesetSize; i++ { nodes[i] = unsafe.Pointer(C.fetchNode(nodesetPtr, C.int(i))) } } } return }
[ "func", "(", "xpath", "*", "XPath", ")", "ResultAsNodeset", "(", ")", "(", "nodes", "[", "]", "unsafe", ".", "Pointer", ",", "err", "error", ")", "{", "if", "xpath", ".", "ResultPtr", "==", "nil", "{", "return", "\n", "}", "\n\n", "if", "xpath", ".", "ReturnType", "(", ")", "!=", "XPATH_NODESET", "{", "err", "=", "errors", ".", "New", "(", "\"", "\"", ")", "\n", "}", "\n\n", "if", "nodesetPtr", ":=", "xpath", ".", "ResultPtr", ".", "nodesetval", ";", "nodesetPtr", "!=", "nil", "{", "if", "nodesetSize", ":=", "int", "(", "nodesetPtr", ".", "nodeNr", ")", ";", "nodesetSize", ">", "0", "{", "nodes", "=", "make", "(", "[", "]", "unsafe", ".", "Pointer", ",", "nodesetSize", ")", "\n", "for", "i", ":=", "0", ";", "i", "<", "nodesetSize", ";", "i", "++", "{", "nodes", "[", "i", "]", "=", "unsafe", ".", "Pointer", "(", "C", ".", "fetchNode", "(", "nodesetPtr", ",", "C", ".", "int", "(", "i", ")", ")", ")", "\n", "}", "\n", "}", "\n", "}", "\n", "return", "\n", "}" ]
// Get the XPath result as a nodeset.
[ "Get", "the", "XPath", "result", "as", "a", "nodeset", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xpath/xpath.go#L164-L182
13,486
moovweb/gokogiri
xpath/xpath.go
ResultAsString
func (xpath *XPath) ResultAsString() (val string, err error) { if xpath.ReturnType() != XPATH_STRING { xpath.ResultPtr = C.xmlXPathConvertString(xpath.ResultPtr) } val = C.GoString((*C.char)(unsafe.Pointer(xpath.ResultPtr.stringval))) return }
go
func (xpath *XPath) ResultAsString() (val string, err error) { if xpath.ReturnType() != XPATH_STRING { xpath.ResultPtr = C.xmlXPathConvertString(xpath.ResultPtr) } val = C.GoString((*C.char)(unsafe.Pointer(xpath.ResultPtr.stringval))) return }
[ "func", "(", "xpath", "*", "XPath", ")", "ResultAsString", "(", ")", "(", "val", "string", ",", "err", "error", ")", "{", "if", "xpath", ".", "ReturnType", "(", ")", "!=", "XPATH_STRING", "{", "xpath", ".", "ResultPtr", "=", "C", ".", "xmlXPathConvertString", "(", "xpath", ".", "ResultPtr", ")", "\n", "}", "\n", "val", "=", "C", ".", "GoString", "(", "(", "*", "C", ".", "char", ")", "(", "unsafe", ".", "Pointer", "(", "xpath", ".", "ResultPtr", ".", "stringval", ")", ")", ")", "\n", "return", "\n", "}" ]
// Coerce the result into a string
[ "Coerce", "the", "result", "into", "a", "string" ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xpath/xpath.go#L185-L191
13,487
moovweb/gokogiri
xpath/xpath.go
ResultAsNumber
func (xpath *XPath) ResultAsNumber() (val float64, err error) { if xpath.ReturnType() != XPATH_NUMBER { xpath.ResultPtr = C.xmlXPathConvertNumber(xpath.ResultPtr) } val = float64(xpath.ResultPtr.floatval) return }
go
func (xpath *XPath) ResultAsNumber() (val float64, err error) { if xpath.ReturnType() != XPATH_NUMBER { xpath.ResultPtr = C.xmlXPathConvertNumber(xpath.ResultPtr) } val = float64(xpath.ResultPtr.floatval) return }
[ "func", "(", "xpath", "*", "XPath", ")", "ResultAsNumber", "(", ")", "(", "val", "float64", ",", "err", "error", ")", "{", "if", "xpath", ".", "ReturnType", "(", ")", "!=", "XPATH_NUMBER", "{", "xpath", ".", "ResultPtr", "=", "C", ".", "xmlXPathConvertNumber", "(", "xpath", ".", "ResultPtr", ")", "\n", "}", "\n", "val", "=", "float64", "(", "xpath", ".", "ResultPtr", ".", "floatval", ")", "\n", "return", "\n", "}" ]
// Coerce the result into a number
[ "Coerce", "the", "result", "into", "a", "number" ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xpath/xpath.go#L194-L200
13,488
moovweb/gokogiri
xpath/xpath.go
ResultAsBoolean
func (xpath *XPath) ResultAsBoolean() (val bool, err error) { xpath.ResultPtr = C.xmlXPathConvertBoolean(xpath.ResultPtr) val = xpath.ResultPtr.boolval != 0 return }
go
func (xpath *XPath) ResultAsBoolean() (val bool, err error) { xpath.ResultPtr = C.xmlXPathConvertBoolean(xpath.ResultPtr) val = xpath.ResultPtr.boolval != 0 return }
[ "func", "(", "xpath", "*", "XPath", ")", "ResultAsBoolean", "(", ")", "(", "val", "bool", ",", "err", "error", ")", "{", "xpath", ".", "ResultPtr", "=", "C", ".", "xmlXPathConvertBoolean", "(", "xpath", ".", "ResultPtr", ")", "\n", "val", "=", "xpath", ".", "ResultPtr", ".", "boolval", "!=", "0", "\n", "return", "\n", "}" ]
// Coerce the result into a boolean
[ "Coerce", "the", "result", "into", "a", "boolean" ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xpath/xpath.go#L203-L207
13,489
moovweb/gokogiri
xpath/xpath.go
SetResolver
func (xpath *XPath) SetResolver(v VariableScope) { C.set_var_lookup(xpath.ContextPtr, unsafe.Pointer(&v)) C.set_function_lookup(xpath.ContextPtr, unsafe.Pointer(&v)) }
go
func (xpath *XPath) SetResolver(v VariableScope) { C.set_var_lookup(xpath.ContextPtr, unsafe.Pointer(&v)) C.set_function_lookup(xpath.ContextPtr, unsafe.Pointer(&v)) }
[ "func", "(", "xpath", "*", "XPath", ")", "SetResolver", "(", "v", "VariableScope", ")", "{", "C", ".", "set_var_lookup", "(", "xpath", ".", "ContextPtr", ",", "unsafe", ".", "Pointer", "(", "&", "v", ")", ")", "\n", "C", ".", "set_function_lookup", "(", "xpath", ".", "ContextPtr", ",", "unsafe", ".", "Pointer", "(", "&", "v", ")", ")", "\n", "}" ]
// Add a variable resolver.
[ "Add", "a", "variable", "resolver", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xpath/xpath.go#L210-L213
13,490
moovweb/gokogiri
xml/node.go
NewNode
func NewNode(nodePtr unsafe.Pointer, document Document) (node Node) { if nodePtr == nil { return nil } xmlNode := &XmlNode{ Ptr: (*C.xmlNode)(nodePtr), Document: document, valid: true, } nodeType := NodeType(C.getNodeType((*C.xmlNode)(nodePtr))) switch nodeType { default: node = xmlNode case XML_ATTRIBUTE_NODE: node = &AttributeNode{XmlNode: xmlNode} case XML_ELEMENT_NODE: node = &ElementNode{XmlNode: xmlNode} case XML_CDATA_SECTION_NODE: node = &CDataNode{XmlNode: xmlNode} case XML_COMMENT_NODE: node = &CommentNode{XmlNode: xmlNode} case XML_PI_NODE: node = &ProcessingInstructionNode{XmlNode: xmlNode} case XML_TEXT_NODE: node = &TextNode{XmlNode: xmlNode} } return }
go
func NewNode(nodePtr unsafe.Pointer, document Document) (node Node) { if nodePtr == nil { return nil } xmlNode := &XmlNode{ Ptr: (*C.xmlNode)(nodePtr), Document: document, valid: true, } nodeType := NodeType(C.getNodeType((*C.xmlNode)(nodePtr))) switch nodeType { default: node = xmlNode case XML_ATTRIBUTE_NODE: node = &AttributeNode{XmlNode: xmlNode} case XML_ELEMENT_NODE: node = &ElementNode{XmlNode: xmlNode} case XML_CDATA_SECTION_NODE: node = &CDataNode{XmlNode: xmlNode} case XML_COMMENT_NODE: node = &CommentNode{XmlNode: xmlNode} case XML_PI_NODE: node = &ProcessingInstructionNode{XmlNode: xmlNode} case XML_TEXT_NODE: node = &TextNode{XmlNode: xmlNode} } return }
[ "func", "NewNode", "(", "nodePtr", "unsafe", ".", "Pointer", ",", "document", "Document", ")", "(", "node", "Node", ")", "{", "if", "nodePtr", "==", "nil", "{", "return", "nil", "\n", "}", "\n", "xmlNode", ":=", "&", "XmlNode", "{", "Ptr", ":", "(", "*", "C", ".", "xmlNode", ")", "(", "nodePtr", ")", ",", "Document", ":", "document", ",", "valid", ":", "true", ",", "}", "\n", "nodeType", ":=", "NodeType", "(", "C", ".", "getNodeType", "(", "(", "*", "C", ".", "xmlNode", ")", "(", "nodePtr", ")", ")", ")", "\n\n", "switch", "nodeType", "{", "default", ":", "node", "=", "xmlNode", "\n", "case", "XML_ATTRIBUTE_NODE", ":", "node", "=", "&", "AttributeNode", "{", "XmlNode", ":", "xmlNode", "}", "\n", "case", "XML_ELEMENT_NODE", ":", "node", "=", "&", "ElementNode", "{", "XmlNode", ":", "xmlNode", "}", "\n", "case", "XML_CDATA_SECTION_NODE", ":", "node", "=", "&", "CDataNode", "{", "XmlNode", ":", "xmlNode", "}", "\n", "case", "XML_COMMENT_NODE", ":", "node", "=", "&", "CommentNode", "{", "XmlNode", ":", "xmlNode", "}", "\n", "case", "XML_PI_NODE", ":", "node", "=", "&", "ProcessingInstructionNode", "{", "XmlNode", ":", "xmlNode", "}", "\n", "case", "XML_TEXT_NODE", ":", "node", "=", "&", "TextNode", "{", "XmlNode", ":", "xmlNode", "}", "\n", "}", "\n", "return", "\n", "}" ]
// NewNode takes a C pointer from the libxml2 library and returns a Node instance of // the appropriate type.
[ "NewNode", "takes", "a", "C", "pointer", "from", "the", "libxml2", "library", "and", "returns", "a", "Node", "instance", "of", "the", "appropriate", "type", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L170-L198
13,491
moovweb/gokogiri
xml/node.go
AddChild
func (xmlNode *XmlNode) AddChild(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addChild(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addChild(node); err != nil { break } } } case Node: err = xmlNode.addChild(t) } return }
go
func (xmlNode *XmlNode) AddChild(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addChild(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addChild(node); err != nil { break } } } case Node: err = xmlNode.addChild(t) } return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "AddChild", "(", "data", "interface", "{", "}", ")", "(", "err", "error", ")", "{", "switch", "t", ":=", "data", ".", "(", "type", ")", "{", "default", ":", "if", "nodes", ",", "err", ":=", "xmlNode", ".", "coerce", "(", "data", ")", ";", "err", "==", "nil", "{", "for", "_", ",", "node", ":=", "range", "nodes", "{", "if", "err", "=", "xmlNode", ".", "addChild", "(", "node", ")", ";", "err", "!=", "nil", "{", "break", "\n", "}", "\n", "}", "\n", "}", "\n", "case", "*", "DocumentFragment", ":", "if", "nodes", ",", "err", ":=", "xmlNode", ".", "coerce", "(", "data", ")", ";", "err", "==", "nil", "{", "for", "_", ",", "node", ":=", "range", "nodes", "{", "if", "err", "=", "xmlNode", ".", "addChild", "(", "node", ")", ";", "err", "!=", "nil", "{", "break", "\n", "}", "\n", "}", "\n", "}", "\n", "case", "Node", ":", "err", "=", "xmlNode", ".", "addChild", "(", "t", ")", "\n", "}", "\n", "return", "\n", "}" ]
// Add a node as a child of the current node. // Passing in a nodeset will add all the nodes as children of the current node.
[ "Add", "a", "node", "as", "a", "child", "of", "the", "current", "node", ".", "Passing", "in", "a", "nodeset", "will", "add", "all", "the", "nodes", "as", "children", "of", "the", "current", "node", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L228-L250
13,492
moovweb/gokogiri
xml/node.go
AddPreviousSibling
func (xmlNode *XmlNode) AddPreviousSibling(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addPreviousSibling(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addPreviousSibling(node); err != nil { break } } } case Node: err = xmlNode.addPreviousSibling(t) } return }
go
func (xmlNode *XmlNode) AddPreviousSibling(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addPreviousSibling(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for _, node := range nodes { if err = xmlNode.addPreviousSibling(node); err != nil { break } } } case Node: err = xmlNode.addPreviousSibling(t) } return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "AddPreviousSibling", "(", "data", "interface", "{", "}", ")", "(", "err", "error", ")", "{", "switch", "t", ":=", "data", ".", "(", "type", ")", "{", "default", ":", "if", "nodes", ",", "err", ":=", "xmlNode", ".", "coerce", "(", "data", ")", ";", "err", "==", "nil", "{", "for", "_", ",", "node", ":=", "range", "nodes", "{", "if", "err", "=", "xmlNode", ".", "addPreviousSibling", "(", "node", ")", ";", "err", "!=", "nil", "{", "break", "\n", "}", "\n", "}", "\n", "}", "\n", "case", "*", "DocumentFragment", ":", "if", "nodes", ",", "err", ":=", "xmlNode", ".", "coerce", "(", "data", ")", ";", "err", "==", "nil", "{", "for", "_", ",", "node", ":=", "range", "nodes", "{", "if", "err", "=", "xmlNode", ".", "addPreviousSibling", "(", "node", ")", ";", "err", "!=", "nil", "{", "break", "\n", "}", "\n", "}", "\n", "}", "\n", "case", "Node", ":", "err", "=", "xmlNode", ".", "addPreviousSibling", "(", "t", ")", "\n", "}", "\n", "return", "\n", "}" ]
// Insert a node immediately before this node in the document. // Passing in a nodeset will add all the nodes, in order.
[ "Insert", "a", "node", "immediately", "before", "this", "node", "in", "the", "document", ".", "Passing", "in", "a", "nodeset", "will", "add", "all", "the", "nodes", "in", "order", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L254-L276
13,493
moovweb/gokogiri
xml/node.go
AddNextSibling
func (xmlNode *XmlNode) AddNextSibling(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for i := len(nodes) - 1; i >= 0; i-- { node := nodes[i] if err = xmlNode.addNextSibling(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for i := len(nodes) - 1; i >= 0; i-- { node := nodes[i] if err = xmlNode.addNextSibling(node); err != nil { break } } } case Node: err = xmlNode.addNextSibling(t) } return }
go
func (xmlNode *XmlNode) AddNextSibling(data interface{}) (err error) { switch t := data.(type) { default: if nodes, err := xmlNode.coerce(data); err == nil { for i := len(nodes) - 1; i >= 0; i-- { node := nodes[i] if err = xmlNode.addNextSibling(node); err != nil { break } } } case *DocumentFragment: if nodes, err := xmlNode.coerce(data); err == nil { for i := len(nodes) - 1; i >= 0; i-- { node := nodes[i] if err = xmlNode.addNextSibling(node); err != nil { break } } } case Node: err = xmlNode.addNextSibling(t) } return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "AddNextSibling", "(", "data", "interface", "{", "}", ")", "(", "err", "error", ")", "{", "switch", "t", ":=", "data", ".", "(", "type", ")", "{", "default", ":", "if", "nodes", ",", "err", ":=", "xmlNode", ".", "coerce", "(", "data", ")", ";", "err", "==", "nil", "{", "for", "i", ":=", "len", "(", "nodes", ")", "-", "1", ";", "i", ">=", "0", ";", "i", "--", "{", "node", ":=", "nodes", "[", "i", "]", "\n", "if", "err", "=", "xmlNode", ".", "addNextSibling", "(", "node", ")", ";", "err", "!=", "nil", "{", "break", "\n", "}", "\n", "}", "\n", "}", "\n", "case", "*", "DocumentFragment", ":", "if", "nodes", ",", "err", ":=", "xmlNode", ".", "coerce", "(", "data", ")", ";", "err", "==", "nil", "{", "for", "i", ":=", "len", "(", "nodes", ")", "-", "1", ";", "i", ">=", "0", ";", "i", "--", "{", "node", ":=", "nodes", "[", "i", "]", "\n", "if", "err", "=", "xmlNode", ".", "addNextSibling", "(", "node", ")", ";", "err", "!=", "nil", "{", "break", "\n", "}", "\n", "}", "\n", "}", "\n", "case", "Node", ":", "err", "=", "xmlNode", ".", "addNextSibling", "(", "t", ")", "\n", "}", "\n", "return", "\n", "}" ]
// Insert a node immediately after this node in the document. // Passing in a nodeset will add all the nodes, in order.
[ "Insert", "a", "node", "immediately", "after", "this", "node", "in", "the", "document", ".", "Passing", "in", "a", "nodeset", "will", "add", "all", "the", "nodes", "in", "order", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L280-L304
13,494
moovweb/gokogiri
xml/node.go
NodePtr
func (xmlNode *XmlNode) NodePtr() (p unsafe.Pointer) { p = unsafe.Pointer(xmlNode.Ptr) return }
go
func (xmlNode *XmlNode) NodePtr() (p unsafe.Pointer) { p = unsafe.Pointer(xmlNode.Ptr) return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "NodePtr", "(", ")", "(", "p", "unsafe", ".", "Pointer", ")", "{", "p", "=", "unsafe", ".", "Pointer", "(", "xmlNode", ".", "Ptr", ")", "\n", "return", "\n", "}" ]
// NodePtr returns a pointer to the underlying C struct.
[ "NodePtr", "returns", "a", "pointer", "to", "the", "underlying", "C", "struct", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L325-L328
13,495
moovweb/gokogiri
xml/node.go
Path
func (xmlNode *XmlNode) Path() (path string) { pathPtr := C.xmlGetNodePath(xmlNode.Ptr) if pathPtr != nil { p := (*C.char)(unsafe.Pointer(pathPtr)) defer C.xmlFreeChars(p) path = C.GoString(p) } return }
go
func (xmlNode *XmlNode) Path() (path string) { pathPtr := C.xmlGetNodePath(xmlNode.Ptr) if pathPtr != nil { p := (*C.char)(unsafe.Pointer(pathPtr)) defer C.xmlFreeChars(p) path = C.GoString(p) } return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "Path", "(", ")", "(", "path", "string", ")", "{", "pathPtr", ":=", "C", ".", "xmlGetNodePath", "(", "xmlNode", ".", "Ptr", ")", "\n", "if", "pathPtr", "!=", "nil", "{", "p", ":=", "(", "*", "C", ".", "char", ")", "(", "unsafe", ".", "Pointer", "(", "pathPtr", ")", ")", "\n", "defer", "C", ".", "xmlFreeChars", "(", "p", ")", "\n", "path", "=", "C", ".", "GoString", "(", "p", ")", "\n", "}", "\n", "return", "\n", "}" ]
// Path returns an XPath expression that can be used to // select this node in the document.
[ "Path", "returns", "an", "XPath", "expression", "that", "can", "be", "used", "to", "select", "this", "node", "in", "the", "document", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L337-L345
13,496
moovweb/gokogiri
xml/node.go
Attribute
func (xmlNode *XmlNode) Attribute(name string) (attribute *AttributeNode) { if xmlNode.NodeType() != XML_ELEMENT_NODE { return } nameBytes := GetCString([]byte(name)) namePtr := unsafe.Pointer(&nameBytes[0]) attrPtr := C.xmlHasNsProp(xmlNode.Ptr, (*C.xmlChar)(namePtr), nil) if attrPtr == nil { return } else { node := NewNode(unsafe.Pointer(attrPtr), xmlNode.Document) if node, ok := node.(*AttributeNode); ok { attribute = node } } return }
go
func (xmlNode *XmlNode) Attribute(name string) (attribute *AttributeNode) { if xmlNode.NodeType() != XML_ELEMENT_NODE { return } nameBytes := GetCString([]byte(name)) namePtr := unsafe.Pointer(&nameBytes[0]) attrPtr := C.xmlHasNsProp(xmlNode.Ptr, (*C.xmlChar)(namePtr), nil) if attrPtr == nil { return } else { node := NewNode(unsafe.Pointer(attrPtr), xmlNode.Document) if node, ok := node.(*AttributeNode); ok { attribute = node } } return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "Attribute", "(", "name", "string", ")", "(", "attribute", "*", "AttributeNode", ")", "{", "if", "xmlNode", ".", "NodeType", "(", ")", "!=", "XML_ELEMENT_NODE", "{", "return", "\n", "}", "\n", "nameBytes", ":=", "GetCString", "(", "[", "]", "byte", "(", "name", ")", ")", "\n", "namePtr", ":=", "unsafe", ".", "Pointer", "(", "&", "nameBytes", "[", "0", "]", ")", "\n", "attrPtr", ":=", "C", ".", "xmlHasNsProp", "(", "xmlNode", ".", "Ptr", ",", "(", "*", "C", ".", "xmlChar", ")", "(", "namePtr", ")", ",", "nil", ")", "\n", "if", "attrPtr", "==", "nil", "{", "return", "\n", "}", "else", "{", "node", ":=", "NewNode", "(", "unsafe", ".", "Pointer", "(", "attrPtr", ")", ",", "xmlNode", ".", "Document", ")", "\n", "if", "node", ",", "ok", ":=", "node", ".", "(", "*", "AttributeNode", ")", ";", "ok", "{", "attribute", "=", "node", "\n", "}", "\n", "}", "\n", "return", "\n", "}" ]
// Return the attribute node, or nil if the attribute does not exist.
[ "Return", "the", "attribute", "node", "or", "nil", "if", "the", "attribute", "does", "not", "exist", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L480-L496
13,497
moovweb/gokogiri
xml/node.go
Attr
func (xmlNode *XmlNode) Attr(name string) (val string) { if xmlNode.NodeType() != XML_ELEMENT_NODE { return } nameBytes := GetCString([]byte(name)) namePtr := unsafe.Pointer(&nameBytes[0]) valPtr := C.xmlGetProp(xmlNode.Ptr, (*C.xmlChar)(namePtr)) if valPtr == nil { return } p := unsafe.Pointer(valPtr) defer C.xmlFreeChars((*C.char)(p)) val = C.GoString((*C.char)(p)) return }
go
func (xmlNode *XmlNode) Attr(name string) (val string) { if xmlNode.NodeType() != XML_ELEMENT_NODE { return } nameBytes := GetCString([]byte(name)) namePtr := unsafe.Pointer(&nameBytes[0]) valPtr := C.xmlGetProp(xmlNode.Ptr, (*C.xmlChar)(namePtr)) if valPtr == nil { return } p := unsafe.Pointer(valPtr) defer C.xmlFreeChars((*C.char)(p)) val = C.GoString((*C.char)(p)) return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "Attr", "(", "name", "string", ")", "(", "val", "string", ")", "{", "if", "xmlNode", ".", "NodeType", "(", ")", "!=", "XML_ELEMENT_NODE", "{", "return", "\n", "}", "\n", "nameBytes", ":=", "GetCString", "(", "[", "]", "byte", "(", "name", ")", ")", "\n", "namePtr", ":=", "unsafe", ".", "Pointer", "(", "&", "nameBytes", "[", "0", "]", ")", "\n", "valPtr", ":=", "C", ".", "xmlGetProp", "(", "xmlNode", ".", "Ptr", ",", "(", "*", "C", ".", "xmlChar", ")", "(", "namePtr", ")", ")", "\n", "if", "valPtr", "==", "nil", "{", "return", "\n", "}", "\n", "p", ":=", "unsafe", ".", "Pointer", "(", "valPtr", ")", "\n", "defer", "C", ".", "xmlFreeChars", "(", "(", "*", "C", ".", "char", ")", "(", "p", ")", ")", "\n", "val", "=", "C", ".", "GoString", "(", "(", "*", "C", ".", "char", ")", "(", "p", ")", ")", "\n", "return", "\n", "}" ]
// Attr returns the value of an attribute. // If you need to check for the existence of an attribute, // use Attribute.
[ "Attr", "returns", "the", "value", "of", "an", "attribute", ".", "If", "you", "need", "to", "check", "for", "the", "existence", "of", "an", "attribute", "use", "Attribute", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L502-L516
13,498
moovweb/gokogiri
xml/node.go
Search
func (xmlNode *XmlNode) Search(data interface{}) (result []Node, err error) { switch data := data.(type) { default: err = ERR_UNDEFINED_SEARCH_PARAM case string: if xpathExpr := xpath.Compile(data); xpathExpr != nil { defer xpathExpr.Free() result, err = xmlNode.Search(xpathExpr) } else { err = errors.New("cannot compile xpath: " + data) } case []byte: result, err = xmlNode.Search(string(data)) case *xpath.Expression: xpathCtx := xmlNode.Document.DocXPathCtx() nodePtrs, err := xpathCtx.EvaluateAsNodeset(unsafe.Pointer(xmlNode.Ptr), data) if nodePtrs == nil || err != nil { return nil, err } for _, nodePtr := range nodePtrs { result = append(result, NewNode(nodePtr, xmlNode.Document)) } } return }
go
func (xmlNode *XmlNode) Search(data interface{}) (result []Node, err error) { switch data := data.(type) { default: err = ERR_UNDEFINED_SEARCH_PARAM case string: if xpathExpr := xpath.Compile(data); xpathExpr != nil { defer xpathExpr.Free() result, err = xmlNode.Search(xpathExpr) } else { err = errors.New("cannot compile xpath: " + data) } case []byte: result, err = xmlNode.Search(string(data)) case *xpath.Expression: xpathCtx := xmlNode.Document.DocXPathCtx() nodePtrs, err := xpathCtx.EvaluateAsNodeset(unsafe.Pointer(xmlNode.Ptr), data) if nodePtrs == nil || err != nil { return nil, err } for _, nodePtr := range nodePtrs { result = append(result, NewNode(nodePtr, xmlNode.Document)) } } return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "Search", "(", "data", "interface", "{", "}", ")", "(", "result", "[", "]", "Node", ",", "err", "error", ")", "{", "switch", "data", ":=", "data", ".", "(", "type", ")", "{", "default", ":", "err", "=", "ERR_UNDEFINED_SEARCH_PARAM", "\n", "case", "string", ":", "if", "xpathExpr", ":=", "xpath", ".", "Compile", "(", "data", ")", ";", "xpathExpr", "!=", "nil", "{", "defer", "xpathExpr", ".", "Free", "(", ")", "\n", "result", ",", "err", "=", "xmlNode", ".", "Search", "(", "xpathExpr", ")", "\n", "}", "else", "{", "err", "=", "errors", ".", "New", "(", "\"", "\"", "+", "data", ")", "\n", "}", "\n", "case", "[", "]", "byte", ":", "result", ",", "err", "=", "xmlNode", ".", "Search", "(", "string", "(", "data", ")", ")", "\n", "case", "*", "xpath", ".", "Expression", ":", "xpathCtx", ":=", "xmlNode", ".", "Document", ".", "DocXPathCtx", "(", ")", "\n", "nodePtrs", ",", "err", ":=", "xpathCtx", ".", "EvaluateAsNodeset", "(", "unsafe", ".", "Pointer", "(", "xmlNode", ".", "Ptr", ")", ",", "data", ")", "\n", "if", "nodePtrs", "==", "nil", "||", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "for", "_", ",", "nodePtr", ":=", "range", "nodePtrs", "{", "result", "=", "append", "(", "result", ",", "NewNode", "(", "nodePtr", ",", "xmlNode", ".", "Document", ")", ")", "\n", "}", "\n", "}", "\n", "return", "\n", "}" ]
// Search for nodes that match an XPath. This is the simplest way to look for nodes.
[ "Search", "for", "nodes", "that", "match", "an", "XPath", ".", "This", "is", "the", "simplest", "way", "to", "look", "for", "nodes", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L572-L596
13,499
moovweb/gokogiri
xml/node.go
EvalXPathAsBoolean
func (xmlNode *XmlNode) EvalXPathAsBoolean(data interface{}, v xpath.VariableScope) (result bool) { switch data := data.(type) { case string: if xpathExpr := xpath.Compile(data); xpathExpr != nil { defer xpathExpr.Free() result = xmlNode.EvalXPathAsBoolean(xpathExpr, v) } else { //err = errors.New("cannot compile xpath: " + data) } case []byte: result = xmlNode.EvalXPathAsBoolean(string(data), v) case *xpath.Expression: xpathCtx := xmlNode.Document.DocXPathCtx() xpathCtx.SetResolver(v) err := xpathCtx.Evaluate(unsafe.Pointer(xmlNode.Ptr), data) if err != nil { return false } result, _ = xpathCtx.ResultAsBoolean() default: //err = ERR_UNDEFINED_SEARCH_PARAM } return }
go
func (xmlNode *XmlNode) EvalXPathAsBoolean(data interface{}, v xpath.VariableScope) (result bool) { switch data := data.(type) { case string: if xpathExpr := xpath.Compile(data); xpathExpr != nil { defer xpathExpr.Free() result = xmlNode.EvalXPathAsBoolean(xpathExpr, v) } else { //err = errors.New("cannot compile xpath: " + data) } case []byte: result = xmlNode.EvalXPathAsBoolean(string(data), v) case *xpath.Expression: xpathCtx := xmlNode.Document.DocXPathCtx() xpathCtx.SetResolver(v) err := xpathCtx.Evaluate(unsafe.Pointer(xmlNode.Ptr), data) if err != nil { return false } result, _ = xpathCtx.ResultAsBoolean() default: //err = ERR_UNDEFINED_SEARCH_PARAM } return }
[ "func", "(", "xmlNode", "*", "XmlNode", ")", "EvalXPathAsBoolean", "(", "data", "interface", "{", "}", ",", "v", "xpath", ".", "VariableScope", ")", "(", "result", "bool", ")", "{", "switch", "data", ":=", "data", ".", "(", "type", ")", "{", "case", "string", ":", "if", "xpathExpr", ":=", "xpath", ".", "Compile", "(", "data", ")", ";", "xpathExpr", "!=", "nil", "{", "defer", "xpathExpr", ".", "Free", "(", ")", "\n", "result", "=", "xmlNode", ".", "EvalXPathAsBoolean", "(", "xpathExpr", ",", "v", ")", "\n", "}", "else", "{", "//err = errors.New(\"cannot compile xpath: \" + data)", "}", "\n", "case", "[", "]", "byte", ":", "result", "=", "xmlNode", ".", "EvalXPathAsBoolean", "(", "string", "(", "data", ")", ",", "v", ")", "\n", "case", "*", "xpath", ".", "Expression", ":", "xpathCtx", ":=", "xmlNode", ".", "Document", ".", "DocXPathCtx", "(", ")", "\n", "xpathCtx", ".", "SetResolver", "(", "v", ")", "\n", "err", ":=", "xpathCtx", ".", "Evaluate", "(", "unsafe", ".", "Pointer", "(", "xmlNode", ".", "Ptr", ")", ",", "data", ")", "\n", "if", "err", "!=", "nil", "{", "return", "false", "\n", "}", "\n", "result", ",", "_", "=", "xpathCtx", ".", "ResultAsBoolean", "(", ")", "\n", "default", ":", "//err = ERR_UNDEFINED_SEARCH_PARAM", "}", "\n", "return", "\n", "}" ]
// Evaluate an XPath and coerce the result to a boolean according to the // XPath rules. In the presence of an error, this function will return false // even if the expression cannot actually be evaluated. // In most cases you are better advised to call EvalXPath; this function is // intended for packages that implement XML standards and that are fully aware // of the consequences of suppressing a compilation error. // If a non-nil VariableScope is provided, any variables or registered functions present // in the xpath will be resolved.
[ "Evaluate", "an", "XPath", "and", "coerce", "the", "result", "to", "a", "boolean", "according", "to", "the", "XPath", "rules", ".", "In", "the", "presence", "of", "an", "error", "this", "function", "will", "return", "false", "even", "if", "the", "expression", "cannot", "actually", "be", "evaluated", ".", "In", "most", "cases", "you", "are", "better", "advised", "to", "call", "EvalXPath", ";", "this", "function", "is", "intended", "for", "packages", "that", "implement", "XML", "standards", "and", "that", "are", "fully", "aware", "of", "the", "consequences", "of", "suppressing", "a", "compilation", "error", ".", "If", "a", "non", "-", "nil", "VariableScope", "is", "provided", "any", "variables", "or", "registered", "functions", "present", "in", "the", "xpath", "will", "be", "resolved", "." ]
a1a828153468a7518b184e698f6265904108d957
https://github.com/moovweb/gokogiri/blob/a1a828153468a7518b184e698f6265904108d957/xml/node.go#L691-L714