id
int32 0
167k
| repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
sequence | docstring
stringlengths 6
2.61k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 85
252
|
---|---|---|---|---|---|---|---|---|---|---|---|
12,800 | influxdata/platform | macro.go | Apply | func (u *MacroUpdate) Apply(m *Macro) error {
if u.Name != "" {
m.Name = u.Name
}
if u.Selected != nil {
m.Selected = u.Selected
}
if u.Arguments != nil {
m.Arguments = u.Arguments
}
return nil
} | go | func (u *MacroUpdate) Apply(m *Macro) error {
if u.Name != "" {
m.Name = u.Name
}
if u.Selected != nil {
m.Selected = u.Selected
}
if u.Arguments != nil {
m.Arguments = u.Arguments
}
return nil
} | [
"func",
"(",
"u",
"*",
"MacroUpdate",
")",
"Apply",
"(",
"m",
"*",
"Macro",
")",
"error",
"{",
"if",
"u",
".",
"Name",
"!=",
"\"",
"\"",
"{",
"m",
".",
"Name",
"=",
"u",
".",
"Name",
"\n",
"}",
"\n\n",
"if",
"u",
".",
"Selected",
"!=",
"nil",
"{",
"m",
".",
"Selected",
"=",
"u",
".",
"Selected",
"\n",
"}",
"\n\n",
"if",
"u",
".",
"Arguments",
"!=",
"nil",
"{",
"m",
".",
"Arguments",
"=",
"u",
".",
"Arguments",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // Apply applies non-zero fields from a MacroUpdate to a Macro | [
"Apply",
"applies",
"non",
"-",
"zero",
"fields",
"from",
"a",
"MacroUpdate",
"to",
"a",
"Macro"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/macro.go#L107-L121 |
12,801 | influxdata/platform | macro.go | UnmarshalJSON | func (a *MacroArguments) UnmarshalJSON(data []byte) error {
type Alias MacroArguments
aux := struct{ *Alias }{Alias: (*Alias)(a)}
err := json.Unmarshal(data, &aux)
if err != nil {
return err
}
// Decode the polymorphic MacroArguments.Values field into the approriate struct
switch aux.Type {
case "constant":
values, ok := aux.Values.([]interface{})
if !ok {
return fmt.Errorf("error parsing %v as MacroConstantArguments", aux.Values)
}
macroValues := make(MacroConstantValues, len(values))
for i, v := range values {
if _, ok := v.(string); !ok {
return fmt.Errorf("expected macro constant value to be string but received %T", v)
}
macroValues[i] = v.(string)
}
a.Values = macroValues
case "map":
values, ok := aux.Values.(map[string]interface{})
if !ok {
return fmt.Errorf("error parsing %v as MacroMapArguments", aux.Values)
}
macroValues := MacroMapValues{}
for k, v := range values {
if _, ok := v.(string); !ok {
return fmt.Errorf("expected macro map value to be string but received %T", v)
}
macroValues[k] = v.(string)
}
a.Values = macroValues
case "query":
values, ok := aux.Values.(map[string]interface{})
if !ok {
return fmt.Errorf("error parsing %v as MacroQueryArguments", aux.Values)
}
macroValues := MacroQueryValues{}
query, prs := values["query"]
if !prs {
return fmt.Errorf("\"query\" key not present in MacroQueryArguments")
}
if _, ok := query.(string); !ok {
return fmt.Errorf("expected \"query\" to be string but received %T", query)
}
language, prs := values["language"]
if !prs {
return fmt.Errorf("\"language\" key not present in MacroQueryArguments")
}
if _, ok := language.(string); !ok {
return fmt.Errorf("expected \"language\" to be string but received %T", language)
}
macroValues.Query = query.(string)
macroValues.Language = language.(string)
a.Values = macroValues
default:
return fmt.Errorf("unknown MacroArguments type %s", aux.Type)
}
return nil
} | go | func (a *MacroArguments) UnmarshalJSON(data []byte) error {
type Alias MacroArguments
aux := struct{ *Alias }{Alias: (*Alias)(a)}
err := json.Unmarshal(data, &aux)
if err != nil {
return err
}
// Decode the polymorphic MacroArguments.Values field into the approriate struct
switch aux.Type {
case "constant":
values, ok := aux.Values.([]interface{})
if !ok {
return fmt.Errorf("error parsing %v as MacroConstantArguments", aux.Values)
}
macroValues := make(MacroConstantValues, len(values))
for i, v := range values {
if _, ok := v.(string); !ok {
return fmt.Errorf("expected macro constant value to be string but received %T", v)
}
macroValues[i] = v.(string)
}
a.Values = macroValues
case "map":
values, ok := aux.Values.(map[string]interface{})
if !ok {
return fmt.Errorf("error parsing %v as MacroMapArguments", aux.Values)
}
macroValues := MacroMapValues{}
for k, v := range values {
if _, ok := v.(string); !ok {
return fmt.Errorf("expected macro map value to be string but received %T", v)
}
macroValues[k] = v.(string)
}
a.Values = macroValues
case "query":
values, ok := aux.Values.(map[string]interface{})
if !ok {
return fmt.Errorf("error parsing %v as MacroQueryArguments", aux.Values)
}
macroValues := MacroQueryValues{}
query, prs := values["query"]
if !prs {
return fmt.Errorf("\"query\" key not present in MacroQueryArguments")
}
if _, ok := query.(string); !ok {
return fmt.Errorf("expected \"query\" to be string but received %T", query)
}
language, prs := values["language"]
if !prs {
return fmt.Errorf("\"language\" key not present in MacroQueryArguments")
}
if _, ok := language.(string); !ok {
return fmt.Errorf("expected \"language\" to be string but received %T", language)
}
macroValues.Query = query.(string)
macroValues.Language = language.(string)
a.Values = macroValues
default:
return fmt.Errorf("unknown MacroArguments type %s", aux.Type)
}
return nil
} | [
"func",
"(",
"a",
"*",
"MacroArguments",
")",
"UnmarshalJSON",
"(",
"data",
"[",
"]",
"byte",
")",
"error",
"{",
"type",
"Alias",
"MacroArguments",
"\n",
"aux",
":=",
"struct",
"{",
"*",
"Alias",
"}",
"{",
"Alias",
":",
"(",
"*",
"Alias",
")",
"(",
"a",
")",
"}",
"\n\n",
"err",
":=",
"json",
".",
"Unmarshal",
"(",
"data",
",",
"&",
"aux",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Decode the polymorphic MacroArguments.Values field into the approriate struct",
"switch",
"aux",
".",
"Type",
"{",
"case",
"\"",
"\"",
":",
"values",
",",
"ok",
":=",
"aux",
".",
"Values",
".",
"(",
"[",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"aux",
".",
"Values",
")",
"\n",
"}",
"\n\n",
"macroValues",
":=",
"make",
"(",
"MacroConstantValues",
",",
"len",
"(",
"values",
")",
")",
"\n",
"for",
"i",
",",
"v",
":=",
"range",
"values",
"{",
"if",
"_",
",",
"ok",
":=",
"v",
".",
"(",
"string",
")",
";",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"v",
")",
"\n",
"}",
"\n",
"macroValues",
"[",
"i",
"]",
"=",
"v",
".",
"(",
"string",
")",
"\n",
"}",
"\n\n",
"a",
".",
"Values",
"=",
"macroValues",
"\n",
"case",
"\"",
"\"",
":",
"values",
",",
"ok",
":=",
"aux",
".",
"Values",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"aux",
".",
"Values",
")",
"\n",
"}",
"\n\n",
"macroValues",
":=",
"MacroMapValues",
"{",
"}",
"\n",
"for",
"k",
",",
"v",
":=",
"range",
"values",
"{",
"if",
"_",
",",
"ok",
":=",
"v",
".",
"(",
"string",
")",
";",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"v",
")",
"\n",
"}",
"\n",
"macroValues",
"[",
"k",
"]",
"=",
"v",
".",
"(",
"string",
")",
"\n",
"}",
"\n\n",
"a",
".",
"Values",
"=",
"macroValues",
"\n",
"case",
"\"",
"\"",
":",
"values",
",",
"ok",
":=",
"aux",
".",
"Values",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"aux",
".",
"Values",
")",
"\n",
"}",
"\n\n",
"macroValues",
":=",
"MacroQueryValues",
"{",
"}",
"\n\n",
"query",
",",
"prs",
":=",
"values",
"[",
"\"",
"\"",
"]",
"\n",
"if",
"!",
"prs",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\\\"",
"\\\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"_",
",",
"ok",
":=",
"query",
".",
"(",
"string",
")",
";",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\\\"",
"\\\"",
"\"",
",",
"query",
")",
"\n",
"}",
"\n\n",
"language",
",",
"prs",
":=",
"values",
"[",
"\"",
"\"",
"]",
"\n",
"if",
"!",
"prs",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\\\"",
"\\\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"_",
",",
"ok",
":=",
"language",
".",
"(",
"string",
")",
";",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\\\"",
"\\\"",
"\"",
",",
"language",
")",
"\n",
"}",
"\n\n",
"macroValues",
".",
"Query",
"=",
"query",
".",
"(",
"string",
")",
"\n",
"macroValues",
".",
"Language",
"=",
"language",
".",
"(",
"string",
")",
"\n",
"a",
".",
"Values",
"=",
"macroValues",
"\n",
"default",
":",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"aux",
".",
"Type",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // UnmarshalJSON unmarshals json into a MacroArguments struct, using the `Type`
// field to assign the approriate struct to the `Values` field | [
"UnmarshalJSON",
"unmarshals",
"json",
"into",
"a",
"MacroArguments",
"struct",
"using",
"the",
"Type",
"field",
"to",
"assign",
"the",
"approriate",
"struct",
"to",
"the",
"Values",
"field"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/macro.go#L125-L198 |
12,802 | influxdata/platform | bolt/view.go | FindViewByID | func (c *Client) FindViewByID(ctx context.Context, id platform.ID) (*platform.View, error) {
var d *platform.View
err := c.db.View(func(tx *bolt.Tx) error {
dash, err := c.findViewByID(ctx, tx, id)
if err != nil {
return &platform.Error{
Err: err,
Op: getOp(platform.OpFindViewByID),
}
}
d = dash
return nil
})
return d, err
} | go | func (c *Client) FindViewByID(ctx context.Context, id platform.ID) (*platform.View, error) {
var d *platform.View
err := c.db.View(func(tx *bolt.Tx) error {
dash, err := c.findViewByID(ctx, tx, id)
if err != nil {
return &platform.Error{
Err: err,
Op: getOp(platform.OpFindViewByID),
}
}
d = dash
return nil
})
return d, err
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"FindViewByID",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
")",
"(",
"*",
"platform",
".",
"View",
",",
"error",
")",
"{",
"var",
"d",
"*",
"platform",
".",
"View",
"\n\n",
"err",
":=",
"c",
".",
"db",
".",
"View",
"(",
"func",
"(",
"tx",
"*",
"bolt",
".",
"Tx",
")",
"error",
"{",
"dash",
",",
"err",
":=",
"c",
".",
"findViewByID",
"(",
"ctx",
",",
"tx",
",",
"id",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"getOp",
"(",
"platform",
".",
"OpFindViewByID",
")",
",",
"}",
"\n",
"}",
"\n",
"d",
"=",
"dash",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n\n",
"return",
"d",
",",
"err",
"\n",
"}"
] | // FindViewByID retrieves a view by id. | [
"FindViewByID",
"retrieves",
"a",
"view",
"by",
"id",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/bolt/view.go#L24-L40 |
12,803 | influxdata/platform | bolt/view.go | FindView | func (c *Client) FindView(ctx context.Context, filter platform.ViewFilter) (*platform.View, error) {
if filter.ID != nil {
return c.FindViewByID(ctx, *filter.ID)
}
var d *platform.View
err := c.db.View(func(tx *bolt.Tx) error {
filterFn := filterViewsFn(filter)
return c.forEachView(ctx, tx, func(dash *platform.View) bool {
if filterFn(dash) {
d = dash
return false
}
return true
})
})
if err != nil {
return nil, &platform.Error{
Err: err,
}
}
if d == nil {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: platform.ErrViewNotFound,
}
}
return d, nil
} | go | func (c *Client) FindView(ctx context.Context, filter platform.ViewFilter) (*platform.View, error) {
if filter.ID != nil {
return c.FindViewByID(ctx, *filter.ID)
}
var d *platform.View
err := c.db.View(func(tx *bolt.Tx) error {
filterFn := filterViewsFn(filter)
return c.forEachView(ctx, tx, func(dash *platform.View) bool {
if filterFn(dash) {
d = dash
return false
}
return true
})
})
if err != nil {
return nil, &platform.Error{
Err: err,
}
}
if d == nil {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: platform.ErrViewNotFound,
}
}
return d, nil
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"FindView",
"(",
"ctx",
"context",
".",
"Context",
",",
"filter",
"platform",
".",
"ViewFilter",
")",
"(",
"*",
"platform",
".",
"View",
",",
"error",
")",
"{",
"if",
"filter",
".",
"ID",
"!=",
"nil",
"{",
"return",
"c",
".",
"FindViewByID",
"(",
"ctx",
",",
"*",
"filter",
".",
"ID",
")",
"\n",
"}",
"\n\n",
"var",
"d",
"*",
"platform",
".",
"View",
"\n",
"err",
":=",
"c",
".",
"db",
".",
"View",
"(",
"func",
"(",
"tx",
"*",
"bolt",
".",
"Tx",
")",
"error",
"{",
"filterFn",
":=",
"filterViewsFn",
"(",
"filter",
")",
"\n",
"return",
"c",
".",
"forEachView",
"(",
"ctx",
",",
"tx",
",",
"func",
"(",
"dash",
"*",
"platform",
".",
"View",
")",
"bool",
"{",
"if",
"filterFn",
"(",
"dash",
")",
"{",
"d",
"=",
"dash",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}",
")",
"\n",
"}",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"}",
"\n",
"}",
"\n\n",
"if",
"d",
"==",
"nil",
"{",
"return",
"nil",
",",
"&",
"platform",
".",
"Error",
"{",
"Code",
":",
"platform",
".",
"ENotFound",
",",
"Msg",
":",
"platform",
".",
"ErrViewNotFound",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"d",
",",
"nil",
"\n",
"}"
] | // FindView retrieves a view using an arbitrary view filter. | [
"FindView",
"retrieves",
"a",
"view",
"using",
"an",
"arbitrary",
"view",
"filter",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/bolt/view.go#L70-L101 |
12,804 | influxdata/platform | bolt/view.go | FindViews | func (c *Client) FindViews(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) {
ds := []*platform.View{}
op := getOp(platform.OpFindViews)
if filter.ID != nil {
d, err := c.FindViewByID(ctx, *filter.ID)
if err != nil && platform.ErrorCode(err) != platform.ENotFound {
return nil, 0, &platform.Error{
Err: err,
Op: op,
}
}
if d != nil {
ds = append(ds, d)
}
return ds, 1, nil
}
err := c.db.View(func(tx *bolt.Tx) error {
dashs, err := c.findViews(ctx, tx, filter)
if err != nil {
return &platform.Error{
Err: err,
Op: op,
}
}
ds = dashs
return nil
})
return ds, len(ds), err
} | go | func (c *Client) FindViews(ctx context.Context, filter platform.ViewFilter) ([]*platform.View, int, error) {
ds := []*platform.View{}
op := getOp(platform.OpFindViews)
if filter.ID != nil {
d, err := c.FindViewByID(ctx, *filter.ID)
if err != nil && platform.ErrorCode(err) != platform.ENotFound {
return nil, 0, &platform.Error{
Err: err,
Op: op,
}
}
if d != nil {
ds = append(ds, d)
}
return ds, 1, nil
}
err := c.db.View(func(tx *bolt.Tx) error {
dashs, err := c.findViews(ctx, tx, filter)
if err != nil {
return &platform.Error{
Err: err,
Op: op,
}
}
ds = dashs
return nil
})
return ds, len(ds), err
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"FindViews",
"(",
"ctx",
"context",
".",
"Context",
",",
"filter",
"platform",
".",
"ViewFilter",
")",
"(",
"[",
"]",
"*",
"platform",
".",
"View",
",",
"int",
",",
"error",
")",
"{",
"ds",
":=",
"[",
"]",
"*",
"platform",
".",
"View",
"{",
"}",
"\n",
"op",
":=",
"getOp",
"(",
"platform",
".",
"OpFindViews",
")",
"\n",
"if",
"filter",
".",
"ID",
"!=",
"nil",
"{",
"d",
",",
"err",
":=",
"c",
".",
"FindViewByID",
"(",
"ctx",
",",
"*",
"filter",
".",
"ID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"&&",
"platform",
".",
"ErrorCode",
"(",
"err",
")",
"!=",
"platform",
".",
"ENotFound",
"{",
"return",
"nil",
",",
"0",
",",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"op",
",",
"}",
"\n",
"}",
"\n",
"if",
"d",
"!=",
"nil",
"{",
"ds",
"=",
"append",
"(",
"ds",
",",
"d",
")",
"\n",
"}",
"\n\n",
"return",
"ds",
",",
"1",
",",
"nil",
"\n",
"}",
"\n\n",
"err",
":=",
"c",
".",
"db",
".",
"View",
"(",
"func",
"(",
"tx",
"*",
"bolt",
".",
"Tx",
")",
"error",
"{",
"dashs",
",",
"err",
":=",
"c",
".",
"findViews",
"(",
"ctx",
",",
"tx",
",",
"filter",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"op",
",",
"}",
"\n",
"}",
"\n",
"ds",
"=",
"dashs",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n\n",
"return",
"ds",
",",
"len",
"(",
"ds",
")",
",",
"err",
"\n",
"}"
] | // FindViews retrives all views that match an arbitrary view filter. | [
"FindViews",
"retrives",
"all",
"views",
"that",
"match",
"an",
"arbitrary",
"view",
"filter",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/bolt/view.go#L125-L156 |
12,805 | influxdata/platform | bolt/view.go | CreateView | func (c *Client) CreateView(ctx context.Context, d *platform.View) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.createView(ctx, tx, d); pe != nil {
return &platform.Error{
Op: getOp(platform.OpCreateView),
Err: pe,
}
}
return nil
})
} | go | func (c *Client) CreateView(ctx context.Context, d *platform.View) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.createView(ctx, tx, d); pe != nil {
return &platform.Error{
Op: getOp(platform.OpCreateView),
Err: pe,
}
}
return nil
})
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"CreateView",
"(",
"ctx",
"context",
".",
"Context",
",",
"d",
"*",
"platform",
".",
"View",
")",
"error",
"{",
"return",
"c",
".",
"db",
".",
"Update",
"(",
"func",
"(",
"tx",
"*",
"bolt",
".",
"Tx",
")",
"error",
"{",
"if",
"pe",
":=",
"c",
".",
"createView",
"(",
"ctx",
",",
"tx",
",",
"d",
")",
";",
"pe",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Op",
":",
"getOp",
"(",
"platform",
".",
"OpCreateView",
")",
",",
"Err",
":",
"pe",
",",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"}"
] | // CreateView creates a platform view and sets d.ID. | [
"CreateView",
"creates",
"a",
"platform",
"view",
"and",
"sets",
"d",
".",
"ID",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/bolt/view.go#L177-L187 |
12,806 | influxdata/platform | bolt/view.go | PutView | func (c *Client) PutView(ctx context.Context, d *platform.View) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.putView(ctx, tx, d); pe != nil {
return pe
}
return nil
})
} | go | func (c *Client) PutView(ctx context.Context, d *platform.View) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.putView(ctx, tx, d); pe != nil {
return pe
}
return nil
})
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"PutView",
"(",
"ctx",
"context",
".",
"Context",
",",
"d",
"*",
"platform",
".",
"View",
")",
"error",
"{",
"return",
"c",
".",
"db",
".",
"Update",
"(",
"func",
"(",
"tx",
"*",
"bolt",
".",
"Tx",
")",
"error",
"{",
"if",
"pe",
":=",
"c",
".",
"putView",
"(",
"ctx",
",",
"tx",
",",
"d",
")",
";",
"pe",
"!=",
"nil",
"{",
"return",
"pe",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"}"
] | // PutView will put a view without setting an ID. | [
"PutView",
"will",
"put",
"a",
"view",
"without",
"setting",
"an",
"ID",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/bolt/view.go#L195-L202 |
12,807 | influxdata/platform | bolt/view.go | UpdateView | func (c *Client) UpdateView(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
var d *platform.View
err := c.db.Update(func(tx *bolt.Tx) error {
dash, pe := c.updateView(ctx, tx, id, upd)
if pe != nil {
return &platform.Error{
Err: pe,
Op: getOp(platform.OpUpdateView),
}
}
d = dash
return nil
})
return d, err
} | go | func (c *Client) UpdateView(ctx context.Context, id platform.ID, upd platform.ViewUpdate) (*platform.View, error) {
var d *platform.View
err := c.db.Update(func(tx *bolt.Tx) error {
dash, pe := c.updateView(ctx, tx, id, upd)
if pe != nil {
return &platform.Error{
Err: pe,
Op: getOp(platform.OpUpdateView),
}
}
d = dash
return nil
})
return d, err
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"UpdateView",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
",",
"upd",
"platform",
".",
"ViewUpdate",
")",
"(",
"*",
"platform",
".",
"View",
",",
"error",
")",
"{",
"var",
"d",
"*",
"platform",
".",
"View",
"\n",
"err",
":=",
"c",
".",
"db",
".",
"Update",
"(",
"func",
"(",
"tx",
"*",
"bolt",
".",
"Tx",
")",
"error",
"{",
"dash",
",",
"pe",
":=",
"c",
".",
"updateView",
"(",
"ctx",
",",
"tx",
",",
"id",
",",
"upd",
")",
"\n",
"if",
"pe",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"pe",
",",
"Op",
":",
"getOp",
"(",
"platform",
".",
"OpUpdateView",
")",
",",
"}",
"\n",
"}",
"\n",
"d",
"=",
"dash",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n\n",
"return",
"d",
",",
"err",
"\n",
"}"
] | // UpdateView updates a view according the parameters set on upd. | [
"UpdateView",
"updates",
"a",
"view",
"according",
"the",
"parameters",
"set",
"on",
"upd",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/bolt/view.go#L242-L257 |
12,808 | influxdata/platform | bolt/view.go | DeleteView | func (c *Client) DeleteView(ctx context.Context, id platform.ID) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.deleteView(ctx, tx, id); pe != nil {
return &platform.Error{
Err: pe,
Op: getOp(platform.OpDeleteView),
}
}
return nil
})
} | go | func (c *Client) DeleteView(ctx context.Context, id platform.ID) error {
return c.db.Update(func(tx *bolt.Tx) error {
if pe := c.deleteView(ctx, tx, id); pe != nil {
return &platform.Error{
Err: pe,
Op: getOp(platform.OpDeleteView),
}
}
return nil
})
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"DeleteView",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
")",
"error",
"{",
"return",
"c",
".",
"db",
".",
"Update",
"(",
"func",
"(",
"tx",
"*",
"bolt",
".",
"Tx",
")",
"error",
"{",
"if",
"pe",
":=",
"c",
".",
"deleteView",
"(",
"ctx",
",",
"tx",
",",
"id",
")",
";",
"pe",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"pe",
",",
"Op",
":",
"getOp",
"(",
"platform",
".",
"OpDeleteView",
")",
",",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"}"
] | // DeleteView deletes a view and prunes it from the index. | [
"DeleteView",
"deletes",
"a",
"view",
"and",
"prunes",
"it",
"from",
"the",
"index",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/bolt/view.go#L281-L291 |
12,809 | influxdata/platform | http/client.go | NewService | func NewService(addr, token string) *Service {
return &Service{
Addr: addr,
Token: token,
AuthorizationService: &AuthorizationService{
Addr: addr,
Token: token,
},
OrganizationService: &OrganizationService{
Addr: addr,
Token: token,
},
UserService: &UserService{
Addr: addr,
Token: token,
},
BucketService: &BucketService{
Addr: addr,
Token: token,
},
QueryService: &QueryService{
Addr: addr,
Token: token,
},
DashboardService: &DashboardService{
Addr: addr,
Token: token,
},
MacroService: &MacroService{
Addr: addr,
Token: token,
},
}
} | go | func NewService(addr, token string) *Service {
return &Service{
Addr: addr,
Token: token,
AuthorizationService: &AuthorizationService{
Addr: addr,
Token: token,
},
OrganizationService: &OrganizationService{
Addr: addr,
Token: token,
},
UserService: &UserService{
Addr: addr,
Token: token,
},
BucketService: &BucketService{
Addr: addr,
Token: token,
},
QueryService: &QueryService{
Addr: addr,
Token: token,
},
DashboardService: &DashboardService{
Addr: addr,
Token: token,
},
MacroService: &MacroService{
Addr: addr,
Token: token,
},
}
} | [
"func",
"NewService",
"(",
"addr",
",",
"token",
"string",
")",
"*",
"Service",
"{",
"return",
"&",
"Service",
"{",
"Addr",
":",
"addr",
",",
"Token",
":",
"token",
",",
"AuthorizationService",
":",
"&",
"AuthorizationService",
"{",
"Addr",
":",
"addr",
",",
"Token",
":",
"token",
",",
"}",
",",
"OrganizationService",
":",
"&",
"OrganizationService",
"{",
"Addr",
":",
"addr",
",",
"Token",
":",
"token",
",",
"}",
",",
"UserService",
":",
"&",
"UserService",
"{",
"Addr",
":",
"addr",
",",
"Token",
":",
"token",
",",
"}",
",",
"BucketService",
":",
"&",
"BucketService",
"{",
"Addr",
":",
"addr",
",",
"Token",
":",
"token",
",",
"}",
",",
"QueryService",
":",
"&",
"QueryService",
"{",
"Addr",
":",
"addr",
",",
"Token",
":",
"token",
",",
"}",
",",
"DashboardService",
":",
"&",
"DashboardService",
"{",
"Addr",
":",
"addr",
",",
"Token",
":",
"token",
",",
"}",
",",
"MacroService",
":",
"&",
"MacroService",
"{",
"Addr",
":",
"addr",
",",
"Token",
":",
"token",
",",
"}",
",",
"}",
"\n",
"}"
] | // NewService returns a service that is an HTTP
// client to a remote | [
"NewService",
"returns",
"a",
"service",
"that",
"is",
"an",
"HTTP",
"client",
"to",
"a",
"remote"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/client.go#L26-L59 |
12,810 | influxdata/platform | http/query_handler.go | postFluxSpec | func (h *FluxHandler) postFluxSpec(w http.ResponseWriter, r *http.Request) {
var req langRequest
ctx := r.Context()
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
EncodeError(ctx, errors.MalformedDataf("invalid json: %v", err), w)
return
}
spec, err := flux.Compile(ctx, req.Query, h.Now())
if err != nil {
EncodeError(ctx, errors.InvalidDataf("invalid spec: %v", err), w)
return
}
res := postFluxSpecResponse{
Spec: spec,
}
if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | go | func (h *FluxHandler) postFluxSpec(w http.ResponseWriter, r *http.Request) {
var req langRequest
ctx := r.Context()
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
EncodeError(ctx, errors.MalformedDataf("invalid json: %v", err), w)
return
}
spec, err := flux.Compile(ctx, req.Query, h.Now())
if err != nil {
EncodeError(ctx, errors.InvalidDataf("invalid spec: %v", err), w)
return
}
res := postFluxSpecResponse{
Spec: spec,
}
if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | [
"func",
"(",
"h",
"*",
"FluxHandler",
")",
"postFluxSpec",
"(",
"w",
"http",
".",
"ResponseWriter",
",",
"r",
"*",
"http",
".",
"Request",
")",
"{",
"var",
"req",
"langRequest",
"\n",
"ctx",
":=",
"r",
".",
"Context",
"(",
")",
"\n\n",
"err",
":=",
"json",
".",
"NewDecoder",
"(",
"r",
".",
"Body",
")",
".",
"Decode",
"(",
"&",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"errors",
".",
"MalformedDataf",
"(",
"\"",
"\"",
",",
"err",
")",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"spec",
",",
"err",
":=",
"flux",
".",
"Compile",
"(",
"ctx",
",",
"req",
".",
"Query",
",",
"h",
".",
"Now",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"errors",
".",
"InvalidDataf",
"(",
"\"",
"\"",
",",
"err",
")",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"res",
":=",
"postFluxSpecResponse",
"{",
"Spec",
":",
"spec",
",",
"}",
"\n\n",
"if",
"err",
":=",
"encodeResponse",
"(",
"ctx",
",",
"w",
",",
"http",
".",
"StatusOK",
",",
"res",
")",
";",
"err",
"!=",
"nil",
"{",
"logEncodingError",
"(",
"h",
".",
"Logger",
",",
"r",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}"
] | // postFluxSpec returns a flux Spec for provided flux string | [
"postFluxSpec",
"returns",
"a",
"flux",
"Spec",
"for",
"provided",
"flux",
"string"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/query_handler.go#L158-L182 |
12,811 | influxdata/platform | http/query_handler.go | getFluxSuggestion | func (h *FluxHandler) getFluxSuggestion(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
name := httprouter.ParamsFromContext(ctx).ByName("name")
completer := complete.DefaultCompleter()
suggestion, err := completer.FunctionSuggestion(name)
if err != nil {
EncodeError(ctx, err, w)
return
}
res := suggestionResponse{Name: name, Params: suggestion.Params}
if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | go | func (h *FluxHandler) getFluxSuggestion(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
name := httprouter.ParamsFromContext(ctx).ByName("name")
completer := complete.DefaultCompleter()
suggestion, err := completer.FunctionSuggestion(name)
if err != nil {
EncodeError(ctx, err, w)
return
}
res := suggestionResponse{Name: name, Params: suggestion.Params}
if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | [
"func",
"(",
"h",
"*",
"FluxHandler",
")",
"getFluxSuggestion",
"(",
"w",
"http",
".",
"ResponseWriter",
",",
"r",
"*",
"http",
".",
"Request",
")",
"{",
"ctx",
":=",
"r",
".",
"Context",
"(",
")",
"\n",
"name",
":=",
"httprouter",
".",
"ParamsFromContext",
"(",
"ctx",
")",
".",
"ByName",
"(",
"\"",
"\"",
")",
"\n",
"completer",
":=",
"complete",
".",
"DefaultCompleter",
"(",
")",
"\n\n",
"suggestion",
",",
"err",
":=",
"completer",
".",
"FunctionSuggestion",
"(",
"name",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"res",
":=",
"suggestionResponse",
"{",
"Name",
":",
"name",
",",
"Params",
":",
"suggestion",
".",
"Params",
"}",
"\n",
"if",
"err",
":=",
"encodeResponse",
"(",
"ctx",
",",
"w",
",",
"http",
".",
"StatusOK",
",",
"res",
")",
";",
"err",
"!=",
"nil",
"{",
"logEncodingError",
"(",
"h",
".",
"Logger",
",",
"r",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}"
] | // getFluxSuggestion returns the function parameters for the requested function | [
"getFluxSuggestion",
"returns",
"the",
"function",
"parameters",
"for",
"the",
"requested",
"function"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/query_handler.go#L234-L250 |
12,812 | influxdata/platform | http/query_handler.go | Query | func (s *FluxService) Query(ctx context.Context, w io.Writer, r *query.ProxyRequest) (int64, error) {
u, err := newURL(s.Addr, fluxPath)
if err != nil {
return 0, err
}
qreq, err := QueryRequestFromProxyRequest(r)
if err != nil {
return 0, err
}
var body bytes.Buffer
if err := json.NewEncoder(&body).Encode(qreq); err != nil {
return 0, err
}
hreq, err := http.NewRequest("POST", u.String(), &body)
if err != nil {
return 0, err
}
SetToken(s.Token, hreq)
hreq.Header.Set("Content-Type", "application/json")
hreq.Header.Set("Accept", "text/csv")
hreq = hreq.WithContext(ctx)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(hreq)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if err := CheckError(resp, true); err != nil {
return 0, err
}
if err := CheckError(resp); err != nil {
return 0, err
}
return io.Copy(w, resp.Body)
} | go | func (s *FluxService) Query(ctx context.Context, w io.Writer, r *query.ProxyRequest) (int64, error) {
u, err := newURL(s.Addr, fluxPath)
if err != nil {
return 0, err
}
qreq, err := QueryRequestFromProxyRequest(r)
if err != nil {
return 0, err
}
var body bytes.Buffer
if err := json.NewEncoder(&body).Encode(qreq); err != nil {
return 0, err
}
hreq, err := http.NewRequest("POST", u.String(), &body)
if err != nil {
return 0, err
}
SetToken(s.Token, hreq)
hreq.Header.Set("Content-Type", "application/json")
hreq.Header.Set("Accept", "text/csv")
hreq = hreq.WithContext(ctx)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(hreq)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if err := CheckError(resp, true); err != nil {
return 0, err
}
if err := CheckError(resp); err != nil {
return 0, err
}
return io.Copy(w, resp.Body)
} | [
"func",
"(",
"s",
"*",
"FluxService",
")",
"Query",
"(",
"ctx",
"context",
".",
"Context",
",",
"w",
"io",
".",
"Writer",
",",
"r",
"*",
"query",
".",
"ProxyRequest",
")",
"(",
"int64",
",",
"error",
")",
"{",
"u",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"fluxPath",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"qreq",
",",
"err",
":=",
"QueryRequestFromProxyRequest",
"(",
"r",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"var",
"body",
"bytes",
".",
"Buffer",
"\n",
"if",
"err",
":=",
"json",
".",
"NewEncoder",
"(",
"&",
"body",
")",
".",
"Encode",
"(",
"qreq",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"hreq",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"u",
".",
"String",
"(",
")",
",",
"&",
"body",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"hreq",
")",
"\n\n",
"hreq",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"hreq",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"hreq",
"=",
"hreq",
".",
"WithContext",
"(",
"ctx",
")",
"\n\n",
"hc",
":=",
"newClient",
"(",
"u",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"hreq",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"defer",
"resp",
".",
"Body",
".",
"Close",
"(",
")",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"return",
"io",
".",
"Copy",
"(",
"w",
",",
"resp",
".",
"Body",
")",
"\n",
"}"
] | // Query runs a flux query against a influx server and sends the results to the io.Writer.
// Will use the token from the context over the token within the service struct. | [
"Query",
"runs",
"a",
"flux",
"query",
"against",
"a",
"influx",
"server",
"and",
"sends",
"the",
"results",
"to",
"the",
"io",
".",
"Writer",
".",
"Will",
"use",
"the",
"token",
"from",
"the",
"context",
"over",
"the",
"token",
"within",
"the",
"service",
"struct",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/query_handler.go#L269-L309 |
12,813 | influxdata/platform | http/query_handler.go | Query | func (s *FluxQueryService) Query(ctx context.Context, r *query.Request) (flux.ResultIterator, error) {
u, err := newURL(s.Addr, fluxPath)
if err != nil {
return nil, err
}
params := url.Values{}
params.Set(OrgID, r.OrganizationID.String())
u.RawQuery = params.Encode()
preq := &query.ProxyRequest{
Request: *r,
Dialect: csv.DefaultDialect(),
}
qreq, err := QueryRequestFromProxyRequest(preq)
if err != nil {
return nil, err
}
var body bytes.Buffer
if err := json.NewEncoder(&body).Encode(qreq); err != nil {
return nil, err
}
hreq, err := http.NewRequest("POST", u.String(), &body)
if err != nil {
return nil, err
}
SetToken(s.Token, hreq)
hreq.Header.Set("Content-Type", "application/json")
hreq.Header.Set("Accept", "text/csv")
hreq = hreq.WithContext(ctx)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(hreq)
if err != nil {
return nil, err
}
if err := CheckError(resp, true); err != nil {
return nil, err
}
decoder := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{})
return decoder.Decode(resp.Body)
} | go | func (s *FluxQueryService) Query(ctx context.Context, r *query.Request) (flux.ResultIterator, error) {
u, err := newURL(s.Addr, fluxPath)
if err != nil {
return nil, err
}
params := url.Values{}
params.Set(OrgID, r.OrganizationID.String())
u.RawQuery = params.Encode()
preq := &query.ProxyRequest{
Request: *r,
Dialect: csv.DefaultDialect(),
}
qreq, err := QueryRequestFromProxyRequest(preq)
if err != nil {
return nil, err
}
var body bytes.Buffer
if err := json.NewEncoder(&body).Encode(qreq); err != nil {
return nil, err
}
hreq, err := http.NewRequest("POST", u.String(), &body)
if err != nil {
return nil, err
}
SetToken(s.Token, hreq)
hreq.Header.Set("Content-Type", "application/json")
hreq.Header.Set("Accept", "text/csv")
hreq = hreq.WithContext(ctx)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(hreq)
if err != nil {
return nil, err
}
if err := CheckError(resp, true); err != nil {
return nil, err
}
decoder := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{})
return decoder.Decode(resp.Body)
} | [
"func",
"(",
"s",
"*",
"FluxQueryService",
")",
"Query",
"(",
"ctx",
"context",
".",
"Context",
",",
"r",
"*",
"query",
".",
"Request",
")",
"(",
"flux",
".",
"ResultIterator",
",",
"error",
")",
"{",
"u",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"fluxPath",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"params",
":=",
"url",
".",
"Values",
"{",
"}",
"\n",
"params",
".",
"Set",
"(",
"OrgID",
",",
"r",
".",
"OrganizationID",
".",
"String",
"(",
")",
")",
"\n",
"u",
".",
"RawQuery",
"=",
"params",
".",
"Encode",
"(",
")",
"\n\n",
"preq",
":=",
"&",
"query",
".",
"ProxyRequest",
"{",
"Request",
":",
"*",
"r",
",",
"Dialect",
":",
"csv",
".",
"DefaultDialect",
"(",
")",
",",
"}",
"\n",
"qreq",
",",
"err",
":=",
"QueryRequestFromProxyRequest",
"(",
"preq",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"var",
"body",
"bytes",
".",
"Buffer",
"\n",
"if",
"err",
":=",
"json",
".",
"NewEncoder",
"(",
"&",
"body",
")",
".",
"Encode",
"(",
"qreq",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"hreq",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"u",
".",
"String",
"(",
")",
",",
"&",
"body",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"hreq",
")",
"\n\n",
"hreq",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"hreq",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"hreq",
"=",
"hreq",
".",
"WithContext",
"(",
"ctx",
")",
"\n\n",
"hc",
":=",
"newClient",
"(",
"u",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"hreq",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"decoder",
":=",
"csv",
".",
"NewMultiResultDecoder",
"(",
"csv",
".",
"ResultDecoderConfig",
"{",
"}",
")",
"\n",
"return",
"decoder",
".",
"Decode",
"(",
"resp",
".",
"Body",
")",
"\n",
"}"
] | // Query runs a flux query against a influx server and decodes the result | [
"Query",
"runs",
"a",
"flux",
"query",
"against",
"a",
"influx",
"server",
"and",
"decodes",
"the",
"result"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/query_handler.go#L321-L366 |
12,814 | influxdata/platform | mock/proto.go | FindProtos | func (s *ProtoService) FindProtos(ctx context.Context) ([]*platform.Proto, error) {
if s.FindProtosFn == nil {
return nil, errors.New("not implemented")
}
return s.FindProtosFn(ctx)
} | go | func (s *ProtoService) FindProtos(ctx context.Context) ([]*platform.Proto, error) {
if s.FindProtosFn == nil {
return nil, errors.New("not implemented")
}
return s.FindProtosFn(ctx)
} | [
"func",
"(",
"s",
"*",
"ProtoService",
")",
"FindProtos",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"[",
"]",
"*",
"platform",
".",
"Proto",
",",
"error",
")",
"{",
"if",
"s",
".",
"FindProtosFn",
"==",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"s",
".",
"FindProtosFn",
"(",
"ctx",
")",
"\n",
"}"
] | // FindProtos returns a list of protos. | [
"FindProtos",
"returns",
"a",
"list",
"of",
"protos",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/mock/proto.go#L18-L23 |
12,815 | influxdata/platform | mock/proto.go | CreateDashboardsFromProto | func (s *ProtoService) CreateDashboardsFromProto(ctx context.Context, protoID, orgID platform.ID) ([]*platform.Dashboard, error) {
if s.CreateDashboardsFromProtoFn == nil {
return nil, errors.New("not implemented")
}
return s.CreateDashboardsFromProtoFn(ctx, protoID, orgID)
} | go | func (s *ProtoService) CreateDashboardsFromProto(ctx context.Context, protoID, orgID platform.ID) ([]*platform.Dashboard, error) {
if s.CreateDashboardsFromProtoFn == nil {
return nil, errors.New("not implemented")
}
return s.CreateDashboardsFromProtoFn(ctx, protoID, orgID)
} | [
"func",
"(",
"s",
"*",
"ProtoService",
")",
"CreateDashboardsFromProto",
"(",
"ctx",
"context",
".",
"Context",
",",
"protoID",
",",
"orgID",
"platform",
".",
"ID",
")",
"(",
"[",
"]",
"*",
"platform",
".",
"Dashboard",
",",
"error",
")",
"{",
"if",
"s",
".",
"CreateDashboardsFromProtoFn",
"==",
"nil",
"{",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"s",
".",
"CreateDashboardsFromProtoFn",
"(",
"ctx",
",",
"protoID",
",",
"orgID",
")",
"\n",
"}"
] | // CreateDashboardsFromProto creates a new set of dashboards for a proto | [
"CreateDashboardsFromProto",
"creates",
"a",
"new",
"set",
"of",
"dashboards",
"for",
"a",
"proto"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/mock/proto.go#L26-L31 |
12,816 | influxdata/platform | task/backend/scheduler.go | Work | func (ts *taskScheduler) Work() {
for _, r := range ts.runners {
r.Start()
if r.IsIdle() {
// Ran out of jobs to start.
break
}
}
} | go | func (ts *taskScheduler) Work() {
for _, r := range ts.runners {
r.Start()
if r.IsIdle() {
// Ran out of jobs to start.
break
}
}
} | [
"func",
"(",
"ts",
"*",
"taskScheduler",
")",
"Work",
"(",
")",
"{",
"for",
"_",
",",
"r",
":=",
"range",
"ts",
".",
"runners",
"{",
"r",
".",
"Start",
"(",
")",
"\n",
"if",
"r",
".",
"IsIdle",
"(",
")",
"{",
"// Ran out of jobs to start.",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // Work begins a work cycle on the taskScheduler.
// As many runners are started as possible. | [
"Work",
"begins",
"a",
"work",
"cycle",
"on",
"the",
"taskScheduler",
".",
"As",
"many",
"runners",
"are",
"started",
"as",
"possible",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/task/backend/scheduler.go#L442-L450 |
12,817 | influxdata/platform | task/backend/scheduler.go | RestartRun | func (r *runner) RestartRun(qr QueuedRun) bool {
if !atomic.CompareAndSwapUint32(r.state, runnerIdle, runnerWorking) {
// already working
return false
}
// create a QueuedRun because we cant stm.CreateNextRun
runLogger := r.logger.With(zap.String("run_id", qr.RunID.String()), zap.Int64("now", qr.Now))
r.wg.Add(1)
r.ts.runningMu.Lock()
rCtx, ok := r.ts.running[qr.RunID]
if !ok {
ctx, cancel := context.WithCancel(context.TODO())
rCtx = runCtx{Context: ctx, CancelFunc: cancel}
r.ts.running[qr.RunID] = rCtx
}
r.ts.runningMu.Unlock()
go r.executeAndWait(rCtx.Context, qr, runLogger)
r.updateRunState(qr, RunStarted, runLogger)
return true
} | go | func (r *runner) RestartRun(qr QueuedRun) bool {
if !atomic.CompareAndSwapUint32(r.state, runnerIdle, runnerWorking) {
// already working
return false
}
// create a QueuedRun because we cant stm.CreateNextRun
runLogger := r.logger.With(zap.String("run_id", qr.RunID.String()), zap.Int64("now", qr.Now))
r.wg.Add(1)
r.ts.runningMu.Lock()
rCtx, ok := r.ts.running[qr.RunID]
if !ok {
ctx, cancel := context.WithCancel(context.TODO())
rCtx = runCtx{Context: ctx, CancelFunc: cancel}
r.ts.running[qr.RunID] = rCtx
}
r.ts.runningMu.Unlock()
go r.executeAndWait(rCtx.Context, qr, runLogger)
r.updateRunState(qr, RunStarted, runLogger)
return true
} | [
"func",
"(",
"r",
"*",
"runner",
")",
"RestartRun",
"(",
"qr",
"QueuedRun",
")",
"bool",
"{",
"if",
"!",
"atomic",
".",
"CompareAndSwapUint32",
"(",
"r",
".",
"state",
",",
"runnerIdle",
",",
"runnerWorking",
")",
"{",
"// already working",
"return",
"false",
"\n",
"}",
"\n",
"// create a QueuedRun because we cant stm.CreateNextRun",
"runLogger",
":=",
"r",
".",
"logger",
".",
"With",
"(",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"qr",
".",
"RunID",
".",
"String",
"(",
")",
")",
",",
"zap",
".",
"Int64",
"(",
"\"",
"\"",
",",
"qr",
".",
"Now",
")",
")",
"\n",
"r",
".",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"r",
".",
"ts",
".",
"runningMu",
".",
"Lock",
"(",
")",
"\n",
"rCtx",
",",
"ok",
":=",
"r",
".",
"ts",
".",
"running",
"[",
"qr",
".",
"RunID",
"]",
"\n",
"if",
"!",
"ok",
"{",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"context",
".",
"TODO",
"(",
")",
")",
"\n",
"rCtx",
"=",
"runCtx",
"{",
"Context",
":",
"ctx",
",",
"CancelFunc",
":",
"cancel",
"}",
"\n",
"r",
".",
"ts",
".",
"running",
"[",
"qr",
".",
"RunID",
"]",
"=",
"rCtx",
"\n",
"}",
"\n",
"r",
".",
"ts",
".",
"runningMu",
".",
"Unlock",
"(",
")",
"\n",
"go",
"r",
".",
"executeAndWait",
"(",
"rCtx",
".",
"Context",
",",
"qr",
",",
"runLogger",
")",
"\n\n",
"r",
".",
"updateRunState",
"(",
"qr",
",",
"RunStarted",
",",
"runLogger",
")",
"\n",
"return",
"true",
"\n",
"}"
] | // RestartRun attempts to restart a queued run if the runner is available to do the work.
// If the runner was already busy we return false. | [
"RestartRun",
"attempts",
"to",
"restart",
"a",
"queued",
"run",
"if",
"the",
"runner",
"is",
"available",
"to",
"do",
"the",
"work",
".",
"If",
"the",
"runner",
"was",
"already",
"busy",
"we",
"return",
"false",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/task/backend/scheduler.go#L567-L587 |
12,818 | influxdata/platform | task/backend/scheduler.go | startFromWorking | func (r *runner) startFromWorking(now int64) {
if nextDue, hasQueue := r.ts.NextDue(); now < nextDue && !hasQueue {
// Not ready for a new run. Go idle again.
atomic.StoreUint32(r.state, runnerIdle)
return
}
ctx, cancel := context.WithCancel(r.ctx)
rc, err := r.desiredState.CreateNextRun(ctx, r.task.ID, now)
if err != nil {
r.logger.Info("Failed to create run", zap.Error(err))
atomic.StoreUint32(r.state, runnerIdle)
cancel() // cancel to prevent context leak
return
}
qr := rc.Created
r.ts.runningMu.Lock()
r.ts.running[qr.RunID] = runCtx{Context: ctx, CancelFunc: cancel}
r.ts.runningMu.Unlock()
r.ts.SetNextDue(rc.NextDue, rc.HasQueue, qr.Now)
// Create a new child logger for the individual run.
// We can't do r.logger = r.logger.With(zap.String("run_id", qr.RunID.String()) because zap doesn't deduplicate fields,
// and we'll quickly end up with many run_ids associated with the log.
runLogger := r.logger.With(zap.String("run_id", qr.RunID.String()), zap.Int64("now", qr.Now))
runLogger.Info("Created run; beginning execution")
r.wg.Add(1)
go r.executeAndWait(ctx, qr, runLogger)
r.updateRunState(qr, RunStarted, runLogger)
} | go | func (r *runner) startFromWorking(now int64) {
if nextDue, hasQueue := r.ts.NextDue(); now < nextDue && !hasQueue {
// Not ready for a new run. Go idle again.
atomic.StoreUint32(r.state, runnerIdle)
return
}
ctx, cancel := context.WithCancel(r.ctx)
rc, err := r.desiredState.CreateNextRun(ctx, r.task.ID, now)
if err != nil {
r.logger.Info("Failed to create run", zap.Error(err))
atomic.StoreUint32(r.state, runnerIdle)
cancel() // cancel to prevent context leak
return
}
qr := rc.Created
r.ts.runningMu.Lock()
r.ts.running[qr.RunID] = runCtx{Context: ctx, CancelFunc: cancel}
r.ts.runningMu.Unlock()
r.ts.SetNextDue(rc.NextDue, rc.HasQueue, qr.Now)
// Create a new child logger for the individual run.
// We can't do r.logger = r.logger.With(zap.String("run_id", qr.RunID.String()) because zap doesn't deduplicate fields,
// and we'll quickly end up with many run_ids associated with the log.
runLogger := r.logger.With(zap.String("run_id", qr.RunID.String()), zap.Int64("now", qr.Now))
runLogger.Info("Created run; beginning execution")
r.wg.Add(1)
go r.executeAndWait(ctx, qr, runLogger)
r.updateRunState(qr, RunStarted, runLogger)
} | [
"func",
"(",
"r",
"*",
"runner",
")",
"startFromWorking",
"(",
"now",
"int64",
")",
"{",
"if",
"nextDue",
",",
"hasQueue",
":=",
"r",
".",
"ts",
".",
"NextDue",
"(",
")",
";",
"now",
"<",
"nextDue",
"&&",
"!",
"hasQueue",
"{",
"// Not ready for a new run. Go idle again.",
"atomic",
".",
"StoreUint32",
"(",
"r",
".",
"state",
",",
"runnerIdle",
")",
"\n",
"return",
"\n",
"}",
"\n",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"r",
".",
"ctx",
")",
"\n",
"rc",
",",
"err",
":=",
"r",
".",
"desiredState",
".",
"CreateNextRun",
"(",
"ctx",
",",
"r",
".",
"task",
".",
"ID",
",",
"now",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"logger",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Error",
"(",
"err",
")",
")",
"\n",
"atomic",
".",
"StoreUint32",
"(",
"r",
".",
"state",
",",
"runnerIdle",
")",
"\n",
"cancel",
"(",
")",
"// cancel to prevent context leak",
"\n",
"return",
"\n",
"}",
"\n",
"qr",
":=",
"rc",
".",
"Created",
"\n",
"r",
".",
"ts",
".",
"runningMu",
".",
"Lock",
"(",
")",
"\n",
"r",
".",
"ts",
".",
"running",
"[",
"qr",
".",
"RunID",
"]",
"=",
"runCtx",
"{",
"Context",
":",
"ctx",
",",
"CancelFunc",
":",
"cancel",
"}",
"\n",
"r",
".",
"ts",
".",
"runningMu",
".",
"Unlock",
"(",
")",
"\n",
"r",
".",
"ts",
".",
"SetNextDue",
"(",
"rc",
".",
"NextDue",
",",
"rc",
".",
"HasQueue",
",",
"qr",
".",
"Now",
")",
"\n\n",
"// Create a new child logger for the individual run.",
"// We can't do r.logger = r.logger.With(zap.String(\"run_id\", qr.RunID.String()) because zap doesn't deduplicate fields,",
"// and we'll quickly end up with many run_ids associated with the log.",
"runLogger",
":=",
"r",
".",
"logger",
".",
"With",
"(",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"qr",
".",
"RunID",
".",
"String",
"(",
")",
")",
",",
"zap",
".",
"Int64",
"(",
"\"",
"\"",
",",
"qr",
".",
"Now",
")",
")",
"\n\n",
"runLogger",
".",
"Info",
"(",
"\"",
"\"",
")",
"\n",
"r",
".",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"r",
".",
"executeAndWait",
"(",
"ctx",
",",
"qr",
",",
"runLogger",
")",
"\n\n",
"r",
".",
"updateRunState",
"(",
"qr",
",",
"RunStarted",
",",
"runLogger",
")",
"\n",
"}"
] | // startFromWorking attempts to create a run if one is due, and then begins execution on a separate goroutine.
// r.state must be runnerWorking when this is called. | [
"startFromWorking",
"attempts",
"to",
"create",
"a",
"run",
"if",
"one",
"is",
"due",
"and",
"then",
"begins",
"execution",
"on",
"a",
"separate",
"goroutine",
".",
"r",
".",
"state",
"must",
"be",
"runnerWorking",
"when",
"this",
"is",
"called",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/task/backend/scheduler.go#L591-L621 |
12,819 | influxdata/platform | mock/basic_auth.go | NewBasicAuthService | func NewBasicAuthService(user, password string) *BasicAuthService {
return &BasicAuthService{
SetPasswordFn: func(context.Context, string, string) error { return fmt.Errorf("mock error") },
ComparePasswordFn: func(context.Context, string, string) error { return fmt.Errorf("mock error") },
CompareAndSetPasswordFn: func(context.Context, string, string, string) error { return fmt.Errorf("mock error") },
}
} | go | func NewBasicAuthService(user, password string) *BasicAuthService {
return &BasicAuthService{
SetPasswordFn: func(context.Context, string, string) error { return fmt.Errorf("mock error") },
ComparePasswordFn: func(context.Context, string, string) error { return fmt.Errorf("mock error") },
CompareAndSetPasswordFn: func(context.Context, string, string, string) error { return fmt.Errorf("mock error") },
}
} | [
"func",
"NewBasicAuthService",
"(",
"user",
",",
"password",
"string",
")",
"*",
"BasicAuthService",
"{",
"return",
"&",
"BasicAuthService",
"{",
"SetPasswordFn",
":",
"func",
"(",
"context",
".",
"Context",
",",
"string",
",",
"string",
")",
"error",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"}",
",",
"ComparePasswordFn",
":",
"func",
"(",
"context",
".",
"Context",
",",
"string",
",",
"string",
")",
"error",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"}",
",",
"CompareAndSetPasswordFn",
":",
"func",
"(",
"context",
".",
"Context",
",",
"string",
",",
"string",
",",
"string",
")",
"error",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"}",
",",
"}",
"\n",
"}"
] | // NewBasicAuthService returns a mock BasicAuthService where its methods will return
// zero values. | [
"NewBasicAuthService",
"returns",
"a",
"mock",
"BasicAuthService",
"where",
"its",
"methods",
"will",
"return",
"zero",
"values",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/mock/basic_auth.go#L18-L24 |
12,820 | influxdata/platform | mock/basic_auth.go | CompareAndSetPassword | func (s *BasicAuthService) CompareAndSetPassword(ctx context.Context, name string, old string, new string) error {
return s.CompareAndSetPasswordFn(ctx, name, old, new)
} | go | func (s *BasicAuthService) CompareAndSetPassword(ctx context.Context, name string, old string, new string) error {
return s.CompareAndSetPasswordFn(ctx, name, old, new)
} | [
"func",
"(",
"s",
"*",
"BasicAuthService",
")",
"CompareAndSetPassword",
"(",
"ctx",
"context",
".",
"Context",
",",
"name",
"string",
",",
"old",
"string",
",",
"new",
"string",
")",
"error",
"{",
"return",
"s",
".",
"CompareAndSetPasswordFn",
"(",
"ctx",
",",
"name",
",",
"old",
",",
"new",
")",
"\n",
"}"
] | // CompareAndSetPassword compares the provided password and sets it to the new password. | [
"CompareAndSetPassword",
"compares",
"the",
"provided",
"password",
"and",
"sets",
"it",
"to",
"the",
"new",
"password",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/mock/basic_auth.go#L37-L39 |
12,821 | influxdata/platform | fs/proto.go | NewProtoService | func NewProtoService(dir string, logger *zap.Logger, s platform.DashboardService) *ProtoService {
return &ProtoService{
Dir: dir,
Logger: logger.With(zap.String("service", "proto")),
DashboardService: s,
}
} | go | func NewProtoService(dir string, logger *zap.Logger, s platform.DashboardService) *ProtoService {
return &ProtoService{
Dir: dir,
Logger: logger.With(zap.String("service", "proto")),
DashboardService: s,
}
} | [
"func",
"NewProtoService",
"(",
"dir",
"string",
",",
"logger",
"*",
"zap",
".",
"Logger",
",",
"s",
"platform",
".",
"DashboardService",
")",
"*",
"ProtoService",
"{",
"return",
"&",
"ProtoService",
"{",
"Dir",
":",
"dir",
",",
"Logger",
":",
"logger",
".",
"With",
"(",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
")",
",",
"DashboardService",
":",
"s",
",",
"}",
"\n",
"}"
] | // NewProtoService creates an instance of a ProtoService. | [
"NewProtoService",
"creates",
"an",
"instance",
"of",
"a",
"ProtoService",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/fs/proto.go#L28-L34 |
12,822 | influxdata/platform | fs/proto.go | Open | func (s *ProtoService) Open(ctx context.Context) error {
if _, err := os.Stat(s.Dir); os.IsNotExist(err) {
if err := os.Mkdir(s.Dir, 0600); err != nil {
return err
}
}
files, err := ioutil.ReadDir(s.Dir)
if err != nil {
return err
}
protos := []*platform.Proto{}
for _, file := range files {
filename := file.Name()
if path.Ext(filename) != protoFileExt {
s.Logger.Info("file extention did not match proto file extension", zap.String("file", filename))
continue
}
octets, err := ioutil.ReadFile(filepath.Join(s.Dir, filename))
if err != nil {
s.Logger.Info("error openeing file", zap.String("file", filename), zap.Error(err))
continue
}
proto := &platform.Proto{}
if err := json.Unmarshal(octets, proto); err != nil {
s.Logger.Info("error unmarshalling file into proto", zap.String("file", filename), zap.Error(err))
continue
}
// TODO(desa): ensure that the proto provided is a valid proto (e.g. that all viewID exists for all cells in a dashboard).
protos = append(protos, proto)
}
s.protos = protos
return nil
} | go | func (s *ProtoService) Open(ctx context.Context) error {
if _, err := os.Stat(s.Dir); os.IsNotExist(err) {
if err := os.Mkdir(s.Dir, 0600); err != nil {
return err
}
}
files, err := ioutil.ReadDir(s.Dir)
if err != nil {
return err
}
protos := []*platform.Proto{}
for _, file := range files {
filename := file.Name()
if path.Ext(filename) != protoFileExt {
s.Logger.Info("file extention did not match proto file extension", zap.String("file", filename))
continue
}
octets, err := ioutil.ReadFile(filepath.Join(s.Dir, filename))
if err != nil {
s.Logger.Info("error openeing file", zap.String("file", filename), zap.Error(err))
continue
}
proto := &platform.Proto{}
if err := json.Unmarshal(octets, proto); err != nil {
s.Logger.Info("error unmarshalling file into proto", zap.String("file", filename), zap.Error(err))
continue
}
// TODO(desa): ensure that the proto provided is a valid proto (e.g. that all viewID exists for all cells in a dashboard).
protos = append(protos, proto)
}
s.protos = protos
return nil
} | [
"func",
"(",
"s",
"*",
"ProtoService",
")",
"Open",
"(",
"ctx",
"context",
".",
"Context",
")",
"error",
"{",
"if",
"_",
",",
"err",
":=",
"os",
".",
"Stat",
"(",
"s",
".",
"Dir",
")",
";",
"os",
".",
"IsNotExist",
"(",
"err",
")",
"{",
"if",
"err",
":=",
"os",
".",
"Mkdir",
"(",
"s",
".",
"Dir",
",",
"0600",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"files",
",",
"err",
":=",
"ioutil",
".",
"ReadDir",
"(",
"s",
".",
"Dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"protos",
":=",
"[",
"]",
"*",
"platform",
".",
"Proto",
"{",
"}",
"\n",
"for",
"_",
",",
"file",
":=",
"range",
"files",
"{",
"filename",
":=",
"file",
".",
"Name",
"(",
")",
"\n",
"if",
"path",
".",
"Ext",
"(",
"filename",
")",
"!=",
"protoFileExt",
"{",
"s",
".",
"Logger",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"filename",
")",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"octets",
",",
"err",
":=",
"ioutil",
".",
"ReadFile",
"(",
"filepath",
".",
"Join",
"(",
"s",
".",
"Dir",
",",
"filename",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"Logger",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"filename",
")",
",",
"zap",
".",
"Error",
"(",
"err",
")",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"proto",
":=",
"&",
"platform",
".",
"Proto",
"{",
"}",
"\n",
"if",
"err",
":=",
"json",
".",
"Unmarshal",
"(",
"octets",
",",
"proto",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"Logger",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"filename",
")",
",",
"zap",
".",
"Error",
"(",
"err",
")",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// TODO(desa): ensure that the proto provided is a valid proto (e.g. that all viewID exists for all cells in a dashboard).",
"protos",
"=",
"append",
"(",
"protos",
",",
"proto",
")",
"\n",
"}",
"\n\n",
"s",
".",
"protos",
"=",
"protos",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // Open loads the protos from the file system and sets them on the service. | [
"Open",
"loads",
"the",
"protos",
"from",
"the",
"file",
"system",
"and",
"sets",
"them",
"on",
"the",
"service",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/fs/proto.go#L42-L82 |
12,823 | influxdata/platform | fs/proto.go | FindProtos | func (s *ProtoService) FindProtos(ctx context.Context) ([]*platform.Proto, error) {
protos := []*platform.Proto{}
for _, proto := range s.protos {
// easy way to make a deep copy
octets, err := json.Marshal(proto)
if err != nil {
return nil, err
}
p := &platform.Proto{}
if err := json.Unmarshal(octets, p); err != nil {
return nil, err
}
protos = append(protos, p)
}
return protos, nil
} | go | func (s *ProtoService) FindProtos(ctx context.Context) ([]*platform.Proto, error) {
protos := []*platform.Proto{}
for _, proto := range s.protos {
// easy way to make a deep copy
octets, err := json.Marshal(proto)
if err != nil {
return nil, err
}
p := &platform.Proto{}
if err := json.Unmarshal(octets, p); err != nil {
return nil, err
}
protos = append(protos, p)
}
return protos, nil
} | [
"func",
"(",
"s",
"*",
"ProtoService",
")",
"FindProtos",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"[",
"]",
"*",
"platform",
".",
"Proto",
",",
"error",
")",
"{",
"protos",
":=",
"[",
"]",
"*",
"platform",
".",
"Proto",
"{",
"}",
"\n",
"for",
"_",
",",
"proto",
":=",
"range",
"s",
".",
"protos",
"{",
"// easy way to make a deep copy",
"octets",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"proto",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"p",
":=",
"&",
"platform",
".",
"Proto",
"{",
"}",
"\n",
"if",
"err",
":=",
"json",
".",
"Unmarshal",
"(",
"octets",
",",
"p",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"protos",
"=",
"append",
"(",
"protos",
",",
"p",
")",
"\n",
"}",
"\n\n",
"return",
"protos",
",",
"nil",
"\n",
"}"
] | // FindProtos returns a list of protos from the file system. | [
"FindProtos",
"returns",
"a",
"list",
"of",
"protos",
"from",
"the",
"file",
"system",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/fs/proto.go#L85-L103 |
12,824 | influxdata/platform | fs/proto.go | CreateDashboardsFromProto | func (s *ProtoService) CreateDashboardsFromProto(ctx context.Context, protoID platform.ID, orgID platform.ID) ([]*platform.Dashboard, error) {
// TODO(desa): this should be done transactionally.
proto, err := s.findProto(ctx, protoID)
if err != nil {
return nil, err
}
dashes := []*platform.Dashboard{}
for _, protodash := range proto.Dashboards {
dash := &platform.Dashboard{}
*dash = protodash.Dashboard
// TODO(desa): add organization id here
dash.Cells = nil
if err := s.DashboardService.CreateDashboard(ctx, dash); err != nil {
return nil, err
}
cells := []*platform.Cell{}
for _, protocell := range protodash.Dashboard.Cells {
cell := &platform.Cell{}
*cell = *protocell
protoview, ok := protodash.Views[cell.ID]
if !ok {
return nil, &platform.Error{Msg: fmt.Sprintf("view for ID %q does not exist", cell.ID)}
}
view := &platform.View{}
*view = protoview
opts := platform.AddDashboardCellOptions{View: view}
if err := s.DashboardService.AddDashboardCell(ctx, dash.ID, cell, opts); err != nil {
return nil, err
}
cells = append(cells, cell)
}
dash.Cells = cells
dashes = append(dashes, dash)
}
return dashes, nil
} | go | func (s *ProtoService) CreateDashboardsFromProto(ctx context.Context, protoID platform.ID, orgID platform.ID) ([]*platform.Dashboard, error) {
// TODO(desa): this should be done transactionally.
proto, err := s.findProto(ctx, protoID)
if err != nil {
return nil, err
}
dashes := []*platform.Dashboard{}
for _, protodash := range proto.Dashboards {
dash := &platform.Dashboard{}
*dash = protodash.Dashboard
// TODO(desa): add organization id here
dash.Cells = nil
if err := s.DashboardService.CreateDashboard(ctx, dash); err != nil {
return nil, err
}
cells := []*platform.Cell{}
for _, protocell := range protodash.Dashboard.Cells {
cell := &platform.Cell{}
*cell = *protocell
protoview, ok := protodash.Views[cell.ID]
if !ok {
return nil, &platform.Error{Msg: fmt.Sprintf("view for ID %q does not exist", cell.ID)}
}
view := &platform.View{}
*view = protoview
opts := platform.AddDashboardCellOptions{View: view}
if err := s.DashboardService.AddDashboardCell(ctx, dash.ID, cell, opts); err != nil {
return nil, err
}
cells = append(cells, cell)
}
dash.Cells = cells
dashes = append(dashes, dash)
}
return dashes, nil
} | [
"func",
"(",
"s",
"*",
"ProtoService",
")",
"CreateDashboardsFromProto",
"(",
"ctx",
"context",
".",
"Context",
",",
"protoID",
"platform",
".",
"ID",
",",
"orgID",
"platform",
".",
"ID",
")",
"(",
"[",
"]",
"*",
"platform",
".",
"Dashboard",
",",
"error",
")",
"{",
"// TODO(desa): this should be done transactionally.",
"proto",
",",
"err",
":=",
"s",
".",
"findProto",
"(",
"ctx",
",",
"protoID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"dashes",
":=",
"[",
"]",
"*",
"platform",
".",
"Dashboard",
"{",
"}",
"\n\n",
"for",
"_",
",",
"protodash",
":=",
"range",
"proto",
".",
"Dashboards",
"{",
"dash",
":=",
"&",
"platform",
".",
"Dashboard",
"{",
"}",
"\n",
"*",
"dash",
"=",
"protodash",
".",
"Dashboard",
"\n",
"// TODO(desa): add organization id here",
"dash",
".",
"Cells",
"=",
"nil",
"\n\n",
"if",
"err",
":=",
"s",
".",
"DashboardService",
".",
"CreateDashboard",
"(",
"ctx",
",",
"dash",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"cells",
":=",
"[",
"]",
"*",
"platform",
".",
"Cell",
"{",
"}",
"\n",
"for",
"_",
",",
"protocell",
":=",
"range",
"protodash",
".",
"Dashboard",
".",
"Cells",
"{",
"cell",
":=",
"&",
"platform",
".",
"Cell",
"{",
"}",
"\n",
"*",
"cell",
"=",
"*",
"protocell",
"\n\n",
"protoview",
",",
"ok",
":=",
"protodash",
".",
"Views",
"[",
"cell",
".",
"ID",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
",",
"&",
"platform",
".",
"Error",
"{",
"Msg",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"cell",
".",
"ID",
")",
"}",
"\n",
"}",
"\n\n",
"view",
":=",
"&",
"platform",
".",
"View",
"{",
"}",
"\n",
"*",
"view",
"=",
"protoview",
"\n",
"opts",
":=",
"platform",
".",
"AddDashboardCellOptions",
"{",
"View",
":",
"view",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"DashboardService",
".",
"AddDashboardCell",
"(",
"ctx",
",",
"dash",
".",
"ID",
",",
"cell",
",",
"opts",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"cells",
"=",
"append",
"(",
"cells",
",",
"cell",
")",
"\n",
"}",
"\n\n",
"dash",
".",
"Cells",
"=",
"cells",
"\n\n",
"dashes",
"=",
"append",
"(",
"dashes",
",",
"dash",
")",
"\n",
"}",
"\n\n",
"return",
"dashes",
",",
"nil",
"\n",
"}"
] | // CreateDashboardsFromProtos creates instances of each dashboard in a proto. | [
"CreateDashboardsFromProtos",
"creates",
"instances",
"of",
"each",
"dashboard",
"in",
"a",
"proto",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/fs/proto.go#L116-L161 |
12,825 | influxdata/platform | gather/scheduler.go | NewScheduler | func NewScheduler(
numScrapers int,
l *zap.Logger,
targets platform.ScraperTargetStoreService,
p nats.Publisher,
s nats.Subscriber,
interval time.Duration,
timeout time.Duration,
) (*Scheduler, error) {
if interval == 0 {
interval = 60 * time.Second
}
if timeout == 0 {
timeout = 30 * time.Second
}
scheduler := &Scheduler{
Targets: targets,
Interval: interval,
Timeout: timeout,
Publisher: p,
Logger: l,
gather: make(chan struct{}, 100),
}
for i := 0; i < numScrapers; i++ {
err := s.Subscribe(promTargetSubject, "", &handler{
Scraper: new(prometheusScraper),
Publisher: p,
Logger: l,
})
if err != nil {
return nil, err
}
}
return scheduler, nil
} | go | func NewScheduler(
numScrapers int,
l *zap.Logger,
targets platform.ScraperTargetStoreService,
p nats.Publisher,
s nats.Subscriber,
interval time.Duration,
timeout time.Duration,
) (*Scheduler, error) {
if interval == 0 {
interval = 60 * time.Second
}
if timeout == 0 {
timeout = 30 * time.Second
}
scheduler := &Scheduler{
Targets: targets,
Interval: interval,
Timeout: timeout,
Publisher: p,
Logger: l,
gather: make(chan struct{}, 100),
}
for i := 0; i < numScrapers; i++ {
err := s.Subscribe(promTargetSubject, "", &handler{
Scraper: new(prometheusScraper),
Publisher: p,
Logger: l,
})
if err != nil {
return nil, err
}
}
return scheduler, nil
} | [
"func",
"NewScheduler",
"(",
"numScrapers",
"int",
",",
"l",
"*",
"zap",
".",
"Logger",
",",
"targets",
"platform",
".",
"ScraperTargetStoreService",
",",
"p",
"nats",
".",
"Publisher",
",",
"s",
"nats",
".",
"Subscriber",
",",
"interval",
"time",
".",
"Duration",
",",
"timeout",
"time",
".",
"Duration",
",",
")",
"(",
"*",
"Scheduler",
",",
"error",
")",
"{",
"if",
"interval",
"==",
"0",
"{",
"interval",
"=",
"60",
"*",
"time",
".",
"Second",
"\n",
"}",
"\n",
"if",
"timeout",
"==",
"0",
"{",
"timeout",
"=",
"30",
"*",
"time",
".",
"Second",
"\n",
"}",
"\n",
"scheduler",
":=",
"&",
"Scheduler",
"{",
"Targets",
":",
"targets",
",",
"Interval",
":",
"interval",
",",
"Timeout",
":",
"timeout",
",",
"Publisher",
":",
"p",
",",
"Logger",
":",
"l",
",",
"gather",
":",
"make",
"(",
"chan",
"struct",
"{",
"}",
",",
"100",
")",
",",
"}",
"\n\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"numScrapers",
";",
"i",
"++",
"{",
"err",
":=",
"s",
".",
"Subscribe",
"(",
"promTargetSubject",
",",
"\"",
"\"",
",",
"&",
"handler",
"{",
"Scraper",
":",
"new",
"(",
"prometheusScraper",
")",
",",
"Publisher",
":",
"p",
",",
"Logger",
":",
"l",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"scheduler",
",",
"nil",
"\n",
"}"
] | // NewScheduler creates a new Scheduler and subscriptions for scraper jobs. | [
"NewScheduler",
"creates",
"a",
"new",
"Scheduler",
"and",
"subscriptions",
"for",
"scraper",
"jobs",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/gather/scheduler.go#L38-L74 |
12,826 | influxdata/platform | http/view_service.go | NewViewHandler | func NewViewHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, userService platform.UserService) *ViewHandler {
h := &ViewHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
UserResourceMappingService: mappingService,
LabelService: labelService,
UserService: userService,
}
h.HandlerFunc("POST", viewsPath, h.handlePostViews)
h.HandlerFunc("GET", viewsPath, h.handleGetViews)
h.HandlerFunc("GET", viewsIDPath, h.handleGetView)
h.HandlerFunc("DELETE", viewsIDPath, h.handleDeleteView)
h.HandlerFunc("PATCH", viewsIDPath, h.handlePatchView)
h.HandlerFunc("POST", viewsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResource, platform.Member))
h.HandlerFunc("GET", viewsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResource, platform.Member))
h.HandlerFunc("DELETE", viewsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member))
h.HandlerFunc("POST", viewsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResource, platform.Owner))
h.HandlerFunc("GET", viewsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResource, platform.Owner))
h.HandlerFunc("DELETE", viewsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner))
h.HandlerFunc("GET", viewsIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", viewsIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", viewsIDLabelsNamePath, newDeleteLabelHandler(h.LabelService))
h.HandlerFunc("PATCH", viewsIDLabelsNamePath, newPatchLabelHandler(h.LabelService))
return h
} | go | func NewViewHandler(mappingService platform.UserResourceMappingService, labelService platform.LabelService, userService platform.UserService) *ViewHandler {
h := &ViewHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
UserResourceMappingService: mappingService,
LabelService: labelService,
UserService: userService,
}
h.HandlerFunc("POST", viewsPath, h.handlePostViews)
h.HandlerFunc("GET", viewsPath, h.handleGetViews)
h.HandlerFunc("GET", viewsIDPath, h.handleGetView)
h.HandlerFunc("DELETE", viewsIDPath, h.handleDeleteView)
h.HandlerFunc("PATCH", viewsIDPath, h.handlePatchView)
h.HandlerFunc("POST", viewsIDMembersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResource, platform.Member))
h.HandlerFunc("GET", viewsIDMembersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResource, platform.Member))
h.HandlerFunc("DELETE", viewsIDMembersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Member))
h.HandlerFunc("POST", viewsIDOwnersPath, newPostMemberHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResource, platform.Owner))
h.HandlerFunc("GET", viewsIDOwnersPath, newGetMembersHandler(h.UserResourceMappingService, h.UserService, platform.DashboardsResource, platform.Owner))
h.HandlerFunc("DELETE", viewsIDOwnersIDPath, newDeleteMemberHandler(h.UserResourceMappingService, platform.Owner))
h.HandlerFunc("GET", viewsIDLabelsPath, newGetLabelsHandler(h.LabelService))
h.HandlerFunc("POST", viewsIDLabelsPath, newPostLabelHandler(h.LabelService))
h.HandlerFunc("DELETE", viewsIDLabelsNamePath, newDeleteLabelHandler(h.LabelService))
h.HandlerFunc("PATCH", viewsIDLabelsNamePath, newPatchLabelHandler(h.LabelService))
return h
} | [
"func",
"NewViewHandler",
"(",
"mappingService",
"platform",
".",
"UserResourceMappingService",
",",
"labelService",
"platform",
".",
"LabelService",
",",
"userService",
"platform",
".",
"UserService",
")",
"*",
"ViewHandler",
"{",
"h",
":=",
"&",
"ViewHandler",
"{",
"Router",
":",
"NewRouter",
"(",
")",
",",
"Logger",
":",
"zap",
".",
"NewNop",
"(",
")",
",",
"UserResourceMappingService",
":",
"mappingService",
",",
"LabelService",
":",
"labelService",
",",
"UserService",
":",
"userService",
",",
"}",
"\n\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsPath",
",",
"h",
".",
"handlePostViews",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsPath",
",",
"h",
".",
"handleGetViews",
")",
"\n\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDPath",
",",
"h",
".",
"handleGetView",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDPath",
",",
"h",
".",
"handleDeleteView",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDPath",
",",
"h",
".",
"handlePatchView",
")",
"\n\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDMembersPath",
",",
"newPostMemberHandler",
"(",
"h",
".",
"UserResourceMappingService",
",",
"h",
".",
"UserService",
",",
"platform",
".",
"DashboardsResource",
",",
"platform",
".",
"Member",
")",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDMembersPath",
",",
"newGetMembersHandler",
"(",
"h",
".",
"UserResourceMappingService",
",",
"h",
".",
"UserService",
",",
"platform",
".",
"DashboardsResource",
",",
"platform",
".",
"Member",
")",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDMembersIDPath",
",",
"newDeleteMemberHandler",
"(",
"h",
".",
"UserResourceMappingService",
",",
"platform",
".",
"Member",
")",
")",
"\n\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDOwnersPath",
",",
"newPostMemberHandler",
"(",
"h",
".",
"UserResourceMappingService",
",",
"h",
".",
"UserService",
",",
"platform",
".",
"DashboardsResource",
",",
"platform",
".",
"Owner",
")",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDOwnersPath",
",",
"newGetMembersHandler",
"(",
"h",
".",
"UserResourceMappingService",
",",
"h",
".",
"UserService",
",",
"platform",
".",
"DashboardsResource",
",",
"platform",
".",
"Owner",
")",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDOwnersIDPath",
",",
"newDeleteMemberHandler",
"(",
"h",
".",
"UserResourceMappingService",
",",
"platform",
".",
"Owner",
")",
")",
"\n\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDLabelsPath",
",",
"newGetLabelsHandler",
"(",
"h",
".",
"LabelService",
")",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDLabelsPath",
",",
"newPostLabelHandler",
"(",
"h",
".",
"LabelService",
")",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDLabelsNamePath",
",",
"newDeleteLabelHandler",
"(",
"h",
".",
"LabelService",
")",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"viewsIDLabelsNamePath",
",",
"newPatchLabelHandler",
"(",
"h",
".",
"LabelService",
")",
")",
"\n\n",
"return",
"h",
"\n",
"}"
] | // NewViewHandler returns a new instance of ViewHandler. | [
"NewViewHandler",
"returns",
"a",
"new",
"instance",
"of",
"ViewHandler",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/view_service.go#L41-L71 |
12,827 | influxdata/platform | http/view_service.go | handleGetViews | func (h *ViewHandler) handleGetViews(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := decodeGetViewsRequest(ctx, r)
views, _, err := h.ViewService.FindViews(ctx, req.filter)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newGetViewsResponse(views)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | go | func (h *ViewHandler) handleGetViews(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := decodeGetViewsRequest(ctx, r)
views, _, err := h.ViewService.FindViews(ctx, req.filter)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newGetViewsResponse(views)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | [
"func",
"(",
"h",
"*",
"ViewHandler",
")",
"handleGetViews",
"(",
"w",
"http",
".",
"ResponseWriter",
",",
"r",
"*",
"http",
".",
"Request",
")",
"{",
"ctx",
":=",
"r",
".",
"Context",
"(",
")",
"\n\n",
"req",
":=",
"decodeGetViewsRequest",
"(",
"ctx",
",",
"r",
")",
"\n\n",
"views",
",",
"_",
",",
"err",
":=",
"h",
".",
"ViewService",
".",
"FindViews",
"(",
"ctx",
",",
"req",
".",
"filter",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"encodeResponse",
"(",
"ctx",
",",
"w",
",",
"http",
".",
"StatusOK",
",",
"newGetViewsResponse",
"(",
"views",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"logEncodingError",
"(",
"h",
".",
"Logger",
",",
"r",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}"
] | // handleGetViews returns all views within the store. | [
"handleGetViews",
"returns",
"all",
"views",
"within",
"the",
"store",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/view_service.go#L109-L124 |
12,828 | influxdata/platform | http/view_service.go | handlePostViews | func (h *ViewHandler) handlePostViews(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodePostViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := h.ViewService.CreateView(ctx, req.View); err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusCreated, newViewResponse(req.View)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | go | func (h *ViewHandler) handlePostViews(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodePostViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := h.ViewService.CreateView(ctx, req.View); err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusCreated, newViewResponse(req.View)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | [
"func",
"(",
"h",
"*",
"ViewHandler",
")",
"handlePostViews",
"(",
"w",
"http",
".",
"ResponseWriter",
",",
"r",
"*",
"http",
".",
"Request",
")",
"{",
"ctx",
":=",
"r",
".",
"Context",
"(",
")",
"\n\n",
"req",
",",
"err",
":=",
"decodePostViewRequest",
"(",
"ctx",
",",
"r",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"err",
":=",
"h",
".",
"ViewService",
".",
"CreateView",
"(",
"ctx",
",",
"req",
".",
"View",
")",
";",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"encodeResponse",
"(",
"ctx",
",",
"w",
",",
"http",
".",
"StatusCreated",
",",
"newViewResponse",
"(",
"req",
".",
"View",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"logEncodingError",
"(",
"h",
".",
"Logger",
",",
"r",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}"
] | // handlePostViews creates a new view. | [
"handlePostViews",
"creates",
"a",
"new",
"view",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/view_service.go#L165-L182 |
12,829 | influxdata/platform | http/view_service.go | handleGetView | func (h *ViewHandler) handleGetView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodeGetViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
view, err := h.ViewService.FindViewByID(ctx, req.ViewID)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newViewResponse(view)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | go | func (h *ViewHandler) handleGetView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodeGetViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
view, err := h.ViewService.FindViewByID(ctx, req.ViewID)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newViewResponse(view)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | [
"func",
"(",
"h",
"*",
"ViewHandler",
")",
"handleGetView",
"(",
"w",
"http",
".",
"ResponseWriter",
",",
"r",
"*",
"http",
".",
"Request",
")",
"{",
"ctx",
":=",
"r",
".",
"Context",
"(",
")",
"\n\n",
"req",
",",
"err",
":=",
"decodeGetViewRequest",
"(",
"ctx",
",",
"r",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"view",
",",
"err",
":=",
"h",
".",
"ViewService",
".",
"FindViewByID",
"(",
"ctx",
",",
"req",
".",
"ViewID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"encodeResponse",
"(",
"ctx",
",",
"w",
",",
"http",
".",
"StatusOK",
",",
"newViewResponse",
"(",
"view",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"logEncodingError",
"(",
"h",
".",
"Logger",
",",
"r",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}"
] | // hanldeGetView retrieves a view by ID. | [
"hanldeGetView",
"retrieves",
"a",
"view",
"by",
"ID",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/view_service.go#L199-L218 |
12,830 | influxdata/platform | http/view_service.go | handleDeleteView | func (h *ViewHandler) handleDeleteView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodeDeleteViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := h.ViewService.DeleteView(ctx, req.ViewID); err != nil {
EncodeError(ctx, err, w)
return
}
w.WriteHeader(http.StatusNoContent)
} | go | func (h *ViewHandler) handleDeleteView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := decodeDeleteViewRequest(ctx, r)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := h.ViewService.DeleteView(ctx, req.ViewID); err != nil {
EncodeError(ctx, err, w)
return
}
w.WriteHeader(http.StatusNoContent)
} | [
"func",
"(",
"h",
"*",
"ViewHandler",
")",
"handleDeleteView",
"(",
"w",
"http",
".",
"ResponseWriter",
",",
"r",
"*",
"http",
".",
"Request",
")",
"{",
"ctx",
":=",
"r",
".",
"Context",
"(",
")",
"\n\n",
"req",
",",
"err",
":=",
"decodeDeleteViewRequest",
"(",
"ctx",
",",
"r",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"h",
".",
"ViewService",
".",
"DeleteView",
"(",
"ctx",
",",
"req",
".",
"ViewID",
")",
";",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"w",
".",
"WriteHeader",
"(",
"http",
".",
"StatusNoContent",
")",
"\n",
"}"
] | // handleDeleteView removes a view by ID. | [
"handleDeleteView",
"removes",
"a",
"view",
"by",
"ID",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/view_service.go#L245-L260 |
12,831 | influxdata/platform | http/view_service.go | handlePatchView | func (h *ViewHandler) handlePatchView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, pe := decodePatchViewRequest(ctx, r)
if pe != nil {
EncodeError(ctx, pe, w)
return
}
view, err := h.ViewService.UpdateView(ctx, req.ViewID, req.Upd)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newViewResponse(view)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | go | func (h *ViewHandler) handlePatchView(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, pe := decodePatchViewRequest(ctx, r)
if pe != nil {
EncodeError(ctx, pe, w)
return
}
view, err := h.ViewService.UpdateView(ctx, req.ViewID, req.Upd)
if err != nil {
EncodeError(ctx, err, w)
return
}
if err := encodeResponse(ctx, w, http.StatusOK, newViewResponse(view)); err != nil {
logEncodingError(h.Logger, r, err)
return
}
} | [
"func",
"(",
"h",
"*",
"ViewHandler",
")",
"handlePatchView",
"(",
"w",
"http",
".",
"ResponseWriter",
",",
"r",
"*",
"http",
".",
"Request",
")",
"{",
"ctx",
":=",
"r",
".",
"Context",
"(",
")",
"\n\n",
"req",
",",
"pe",
":=",
"decodePatchViewRequest",
"(",
"ctx",
",",
"r",
")",
"\n",
"if",
"pe",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"pe",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n",
"view",
",",
"err",
":=",
"h",
".",
"ViewService",
".",
"UpdateView",
"(",
"ctx",
",",
"req",
".",
"ViewID",
",",
"req",
".",
"Upd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"EncodeError",
"(",
"ctx",
",",
"err",
",",
"w",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"encodeResponse",
"(",
"ctx",
",",
"w",
",",
"http",
".",
"StatusOK",
",",
"newViewResponse",
"(",
"view",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"logEncodingError",
"(",
"h",
".",
"Logger",
",",
"r",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}"
] | // handlePatchView updates a view. | [
"handlePatchView",
"updates",
"a",
"view",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/view_service.go#L287-L305 |
12,832 | influxdata/platform | http/view_service.go | Valid | func (r *patchViewRequest) Valid() *platform.Error {
if !r.ViewID.Valid() {
return &platform.Error{
Code: platform.EInvalid,
Msg: "missing view ID",
}
}
return r.Upd.Valid()
} | go | func (r *patchViewRequest) Valid() *platform.Error {
if !r.ViewID.Valid() {
return &platform.Error{
Code: platform.EInvalid,
Msg: "missing view ID",
}
}
return r.Upd.Valid()
} | [
"func",
"(",
"r",
"*",
"patchViewRequest",
")",
"Valid",
"(",
")",
"*",
"platform",
".",
"Error",
"{",
"if",
"!",
"r",
".",
"ViewID",
".",
"Valid",
"(",
")",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Code",
":",
"platform",
".",
"EInvalid",
",",
"Msg",
":",
"\"",
"\"",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"r",
".",
"Upd",
".",
"Valid",
"(",
")",
"\n",
"}"
] | // Valid validates that the view ID is non zero valued and update has expected values set. | [
"Valid",
"validates",
"that",
"the",
"view",
"ID",
"is",
"non",
"zero",
"valued",
"and",
"update",
"has",
"expected",
"values",
"set",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/view_service.go#L351-L360 |
12,833 | influxdata/platform | mock/scraper_service.go | ListTargets | func (s *ScraperTargetStoreService) ListTargets(ctx context.Context) ([]platform.ScraperTarget, error) {
return s.ListTargetsF(ctx)
} | go | func (s *ScraperTargetStoreService) ListTargets(ctx context.Context) ([]platform.ScraperTarget, error) {
return s.ListTargetsF(ctx)
} | [
"func",
"(",
"s",
"*",
"ScraperTargetStoreService",
")",
"ListTargets",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"[",
"]",
"platform",
".",
"ScraperTarget",
",",
"error",
")",
"{",
"return",
"s",
".",
"ListTargetsF",
"(",
"ctx",
")",
"\n",
"}"
] | // ListTargets lists all the scraper targets. | [
"ListTargets",
"lists",
"all",
"the",
"scraper",
"targets",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/mock/scraper_service.go#L21-L23 |
12,834 | influxdata/platform | mock/scraper_service.go | AddTarget | func (s *ScraperTargetStoreService) AddTarget(ctx context.Context, t *platform.ScraperTarget) error {
return s.AddTargetF(ctx, t)
} | go | func (s *ScraperTargetStoreService) AddTarget(ctx context.Context, t *platform.ScraperTarget) error {
return s.AddTargetF(ctx, t)
} | [
"func",
"(",
"s",
"*",
"ScraperTargetStoreService",
")",
"AddTarget",
"(",
"ctx",
"context",
".",
"Context",
",",
"t",
"*",
"platform",
".",
"ScraperTarget",
")",
"error",
"{",
"return",
"s",
".",
"AddTargetF",
"(",
"ctx",
",",
"t",
")",
"\n",
"}"
] | // AddTarget adds a scraper target. | [
"AddTarget",
"adds",
"a",
"scraper",
"target",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/mock/scraper_service.go#L26-L28 |
12,835 | influxdata/platform | storage/retention.go | Next | func (s *seriesIteratorAdapter) Next() (tsdb.SeriesElem, error) {
if s.itr == nil {
return nil, nil
}
row, err := s.itr.Next()
if err != nil {
return nil, err
}
if row == nil {
return nil, nil
}
s.ea.name = row.Name
s.ea.tags = row.Tags
return s.elem, nil
} | go | func (s *seriesIteratorAdapter) Next() (tsdb.SeriesElem, error) {
if s.itr == nil {
return nil, nil
}
row, err := s.itr.Next()
if err != nil {
return nil, err
}
if row == nil {
return nil, nil
}
s.ea.name = row.Name
s.ea.tags = row.Tags
return s.elem, nil
} | [
"func",
"(",
"s",
"*",
"seriesIteratorAdapter",
")",
"Next",
"(",
")",
"(",
"tsdb",
".",
"SeriesElem",
",",
"error",
")",
"{",
"if",
"s",
".",
"itr",
"==",
"nil",
"{",
"return",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"row",
",",
"err",
":=",
"s",
".",
"itr",
".",
"Next",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"row",
"==",
"nil",
"{",
"return",
"nil",
",",
"nil",
"\n",
"}",
"\n\n",
"s",
".",
"ea",
".",
"name",
"=",
"row",
".",
"Name",
"\n",
"s",
".",
"ea",
".",
"tags",
"=",
"row",
".",
"Tags",
"\n",
"return",
"s",
".",
"elem",
",",
"nil",
"\n",
"}"
] | // Next returns the next tsdb.SeriesElem.
//
// The returned tsdb.SeriesElem is valid for use until Next is called again. | [
"Next",
"returns",
"the",
"next",
"tsdb",
".",
"SeriesElem",
".",
"The",
"returned",
"tsdb",
".",
"SeriesElem",
"is",
"valid",
"for",
"use",
"until",
"Next",
"is",
"called",
"again",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/storage/retention.go#L210-L227 |
12,836 | influxdata/platform | tsdb/tsi1/partition.go | deleteNonManifestFiles | func (p *Partition) deleteNonManifestFiles(m *Manifest) error {
dir, err := os.Open(p.path)
if err != nil {
return err
}
defer dir.Close()
fis, err := dir.Readdir(-1)
if err != nil {
return err
}
// Loop over all files and remove any not in the manifest.
for _, fi := range fis {
filename := filepath.Base(fi.Name())
if filename == ManifestFileName || filename == StatsFileName || m.HasFile(filename) {
continue
}
if err := os.RemoveAll(filename); err != nil {
return err
}
}
return dir.Close()
} | go | func (p *Partition) deleteNonManifestFiles(m *Manifest) error {
dir, err := os.Open(p.path)
if err != nil {
return err
}
defer dir.Close()
fis, err := dir.Readdir(-1)
if err != nil {
return err
}
// Loop over all files and remove any not in the manifest.
for _, fi := range fis {
filename := filepath.Base(fi.Name())
if filename == ManifestFileName || filename == StatsFileName || m.HasFile(filename) {
continue
}
if err := os.RemoveAll(filename); err != nil {
return err
}
}
return dir.Close()
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"deleteNonManifestFiles",
"(",
"m",
"*",
"Manifest",
")",
"error",
"{",
"dir",
",",
"err",
":=",
"os",
".",
"Open",
"(",
"p",
".",
"path",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"defer",
"dir",
".",
"Close",
"(",
")",
"\n\n",
"fis",
",",
"err",
":=",
"dir",
".",
"Readdir",
"(",
"-",
"1",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Loop over all files and remove any not in the manifest.",
"for",
"_",
",",
"fi",
":=",
"range",
"fis",
"{",
"filename",
":=",
"filepath",
".",
"Base",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"if",
"filename",
"==",
"ManifestFileName",
"||",
"filename",
"==",
"StatsFileName",
"||",
"m",
".",
"HasFile",
"(",
"filename",
")",
"{",
"continue",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"os",
".",
"RemoveAll",
"(",
"filename",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"dir",
".",
"Close",
"(",
")",
"\n",
"}"
] | // deleteNonManifestFiles removes all files not in the manifest. | [
"deleteNonManifestFiles",
"removes",
"all",
"files",
"not",
"in",
"the",
"manifest",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L291-L316 |
12,837 | influxdata/platform | tsdb/tsi1/partition.go | Manifest | func (p *Partition) Manifest() *Manifest {
m := &Manifest{
Levels: p.levels,
Files: make([]string, len(p.fileSet.files)),
Version: p.version,
path: p.ManifestPath(),
}
for j, f := range p.fileSet.files {
m.Files[j] = filepath.Base(f.Path())
}
return m
} | go | func (p *Partition) Manifest() *Manifest {
m := &Manifest{
Levels: p.levels,
Files: make([]string, len(p.fileSet.files)),
Version: p.version,
path: p.ManifestPath(),
}
for j, f := range p.fileSet.files {
m.Files[j] = filepath.Base(f.Path())
}
return m
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"Manifest",
"(",
")",
"*",
"Manifest",
"{",
"m",
":=",
"&",
"Manifest",
"{",
"Levels",
":",
"p",
".",
"levels",
",",
"Files",
":",
"make",
"(",
"[",
"]",
"string",
",",
"len",
"(",
"p",
".",
"fileSet",
".",
"files",
")",
")",
",",
"Version",
":",
"p",
".",
"version",
",",
"path",
":",
"p",
".",
"ManifestPath",
"(",
")",
",",
"}",
"\n\n",
"for",
"j",
",",
"f",
":=",
"range",
"p",
".",
"fileSet",
".",
"files",
"{",
"m",
".",
"Files",
"[",
"j",
"]",
"=",
"filepath",
".",
"Base",
"(",
"f",
".",
"Path",
"(",
")",
")",
"\n",
"}",
"\n\n",
"return",
"m",
"\n",
"}"
] | // Manifest returns a manifest for the index. | [
"Manifest",
"returns",
"a",
"manifest",
"for",
"the",
"index",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L411-L424 |
12,838 | influxdata/platform | tsdb/tsi1/partition.go | RetainFileSet | func (p *Partition) RetainFileSet() (*FileSet, error) {
select {
case <-p.closing:
return nil, errors.New("index is closing")
default:
p.mu.RLock()
defer p.mu.RUnlock()
return p.retainFileSet(), nil
}
} | go | func (p *Partition) RetainFileSet() (*FileSet, error) {
select {
case <-p.closing:
return nil, errors.New("index is closing")
default:
p.mu.RLock()
defer p.mu.RUnlock()
return p.retainFileSet(), nil
}
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"RetainFileSet",
"(",
")",
"(",
"*",
"FileSet",
",",
"error",
")",
"{",
"select",
"{",
"case",
"<-",
"p",
".",
"closing",
":",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"default",
":",
"p",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"p",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"p",
".",
"retainFileSet",
"(",
")",
",",
"nil",
"\n",
"}",
"\n",
"}"
] | // RetainFileSet returns the current fileset and adds a reference count. | [
"RetainFileSet",
"returns",
"the",
"current",
"fileset",
"and",
"adds",
"a",
"reference",
"count",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L437-L446 |
12,839 | influxdata/platform | tsdb/tsi1/partition.go | ForEachMeasurementName | func (p *Partition) ForEachMeasurementName(fn func(name []byte) error) error {
fs, err := p.RetainFileSet()
if err != nil {
return err
}
defer fs.Release()
itr := fs.MeasurementIterator()
if itr == nil {
return nil
}
for e := itr.Next(); e != nil; e = itr.Next() {
if err := fn(e.Name()); err != nil {
return err
}
}
return nil
} | go | func (p *Partition) ForEachMeasurementName(fn func(name []byte) error) error {
fs, err := p.RetainFileSet()
if err != nil {
return err
}
defer fs.Release()
itr := fs.MeasurementIterator()
if itr == nil {
return nil
}
for e := itr.Next(); e != nil; e = itr.Next() {
if err := fn(e.Name()); err != nil {
return err
}
}
return nil
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"ForEachMeasurementName",
"(",
"fn",
"func",
"(",
"name",
"[",
"]",
"byte",
")",
"error",
")",
"error",
"{",
"fs",
",",
"err",
":=",
"p",
".",
"RetainFileSet",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"defer",
"fs",
".",
"Release",
"(",
")",
"\n\n",
"itr",
":=",
"fs",
".",
"MeasurementIterator",
"(",
")",
"\n",
"if",
"itr",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"for",
"e",
":=",
"itr",
".",
"Next",
"(",
")",
";",
"e",
"!=",
"nil",
";",
"e",
"=",
"itr",
".",
"Next",
"(",
")",
"{",
"if",
"err",
":=",
"fn",
"(",
"e",
".",
"Name",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // ForEachMeasurementName iterates over all measurement names in the index. | [
"ForEachMeasurementName",
"iterates",
"over",
"all",
"measurement",
"names",
"in",
"the",
"index",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L495-L514 |
12,840 | influxdata/platform | tsdb/tsi1/partition.go | MeasurementHasSeries | func (p *Partition) MeasurementHasSeries(name []byte) (bool, error) {
fs, err := p.RetainFileSet()
if err != nil {
return false, err
}
defer fs.Release()
for _, f := range fs.files {
if f.MeasurementHasSeries(p.seriesIDSet, name) {
return true, nil
}
}
return false, nil
} | go | func (p *Partition) MeasurementHasSeries(name []byte) (bool, error) {
fs, err := p.RetainFileSet()
if err != nil {
return false, err
}
defer fs.Release()
for _, f := range fs.files {
if f.MeasurementHasSeries(p.seriesIDSet, name) {
return true, nil
}
}
return false, nil
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"MeasurementHasSeries",
"(",
"name",
"[",
"]",
"byte",
")",
"(",
"bool",
",",
"error",
")",
"{",
"fs",
",",
"err",
":=",
"p",
".",
"RetainFileSet",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"false",
",",
"err",
"\n",
"}",
"\n",
"defer",
"fs",
".",
"Release",
"(",
")",
"\n\n",
"for",
"_",
",",
"f",
":=",
"range",
"fs",
".",
"files",
"{",
"if",
"f",
".",
"MeasurementHasSeries",
"(",
"p",
".",
"seriesIDSet",
",",
"name",
")",
"{",
"return",
"true",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"false",
",",
"nil",
"\n",
"}"
] | // MeasurementHasSeries returns true if a measurement has at least one non-tombstoned series. | [
"MeasurementHasSeries",
"returns",
"true",
"if",
"a",
"measurement",
"has",
"at",
"least",
"one",
"non",
"-",
"tombstoned",
"series",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L517-L531 |
12,841 | influxdata/platform | tsdb/tsi1/partition.go | createSeriesListIfNotExists | func (p *Partition) createSeriesListIfNotExists(collection *tsdb.SeriesCollection) ([]tsdb.SeriesID, error) {
// Is there anything to do? The partition may have been sent an empty batch.
if collection.Length() == 0 {
return nil, nil
} else if len(collection.Names) != len(collection.Tags) {
return nil, fmt.Errorf("uneven batch, partition %s sent %d names and %d tags", p.id, len(collection.Names), len(collection.Tags))
}
// Maintain reference count on files in file set.
fs, err := p.RetainFileSet()
if err != nil {
return nil, err
}
defer fs.Release()
// Ensure fileset cannot change during insert.
now := time.Now()
p.mu.RLock()
// Insert series into log file.
ids, err := p.activeLogFile.AddSeriesList(p.seriesIDSet, collection)
if err != nil {
p.mu.RUnlock()
return nil, err
}
p.mu.RUnlock()
if err := p.CheckLogFile(); err != nil {
return nil, err
}
// NOTE(edd): if this becomes expensive then we can move the count into the
// log file.
var totalNew uint64
for _, id := range ids {
if !id.IsZero() {
totalNew++
}
}
if totalNew > 0 {
p.tracker.AddSeriesCreated(totalNew, time.Since(now))
p.tracker.AddSeries(totalNew)
p.mu.RLock()
p.tracker.SetDiskSize(uint64(p.fileSet.Size()))
p.mu.RUnlock()
}
return ids, nil
} | go | func (p *Partition) createSeriesListIfNotExists(collection *tsdb.SeriesCollection) ([]tsdb.SeriesID, error) {
// Is there anything to do? The partition may have been sent an empty batch.
if collection.Length() == 0 {
return nil, nil
} else if len(collection.Names) != len(collection.Tags) {
return nil, fmt.Errorf("uneven batch, partition %s sent %d names and %d tags", p.id, len(collection.Names), len(collection.Tags))
}
// Maintain reference count on files in file set.
fs, err := p.RetainFileSet()
if err != nil {
return nil, err
}
defer fs.Release()
// Ensure fileset cannot change during insert.
now := time.Now()
p.mu.RLock()
// Insert series into log file.
ids, err := p.activeLogFile.AddSeriesList(p.seriesIDSet, collection)
if err != nil {
p.mu.RUnlock()
return nil, err
}
p.mu.RUnlock()
if err := p.CheckLogFile(); err != nil {
return nil, err
}
// NOTE(edd): if this becomes expensive then we can move the count into the
// log file.
var totalNew uint64
for _, id := range ids {
if !id.IsZero() {
totalNew++
}
}
if totalNew > 0 {
p.tracker.AddSeriesCreated(totalNew, time.Since(now))
p.tracker.AddSeries(totalNew)
p.mu.RLock()
p.tracker.SetDiskSize(uint64(p.fileSet.Size()))
p.mu.RUnlock()
}
return ids, nil
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"createSeriesListIfNotExists",
"(",
"collection",
"*",
"tsdb",
".",
"SeriesCollection",
")",
"(",
"[",
"]",
"tsdb",
".",
"SeriesID",
",",
"error",
")",
"{",
"// Is there anything to do? The partition may have been sent an empty batch.",
"if",
"collection",
".",
"Length",
"(",
")",
"==",
"0",
"{",
"return",
"nil",
",",
"nil",
"\n",
"}",
"else",
"if",
"len",
"(",
"collection",
".",
"Names",
")",
"!=",
"len",
"(",
"collection",
".",
"Tags",
")",
"{",
"return",
"nil",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"p",
".",
"id",
",",
"len",
"(",
"collection",
".",
"Names",
")",
",",
"len",
"(",
"collection",
".",
"Tags",
")",
")",
"\n",
"}",
"\n\n",
"// Maintain reference count on files in file set.",
"fs",
",",
"err",
":=",
"p",
".",
"RetainFileSet",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"defer",
"fs",
".",
"Release",
"(",
")",
"\n\n",
"// Ensure fileset cannot change during insert.",
"now",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"p",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"// Insert series into log file.",
"ids",
",",
"err",
":=",
"p",
".",
"activeLogFile",
".",
"AddSeriesList",
"(",
"p",
".",
"seriesIDSet",
",",
"collection",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"p",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"p",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n\n",
"if",
"err",
":=",
"p",
".",
"CheckLogFile",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"// NOTE(edd): if this becomes expensive then we can move the count into the",
"// log file.",
"var",
"totalNew",
"uint64",
"\n",
"for",
"_",
",",
"id",
":=",
"range",
"ids",
"{",
"if",
"!",
"id",
".",
"IsZero",
"(",
")",
"{",
"totalNew",
"++",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"totalNew",
">",
"0",
"{",
"p",
".",
"tracker",
".",
"AddSeriesCreated",
"(",
"totalNew",
",",
"time",
".",
"Since",
"(",
"now",
")",
")",
"\n",
"p",
".",
"tracker",
".",
"AddSeries",
"(",
"totalNew",
")",
"\n",
"p",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"p",
".",
"tracker",
".",
"SetDiskSize",
"(",
"uint64",
"(",
"p",
".",
"fileSet",
".",
"Size",
"(",
")",
")",
")",
"\n",
"p",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"}",
"\n",
"return",
"ids",
",",
"nil",
"\n",
"}"
] | // createSeriesListIfNotExists creates a list of series if they doesn't exist in
// bulk. | [
"createSeriesListIfNotExists",
"creates",
"a",
"list",
"of",
"series",
"if",
"they",
"doesn",
"t",
"exist",
"in",
"bulk",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L666-L712 |
12,842 | influxdata/platform | tsdb/tsi1/partition.go | TagValueSeriesIDIterator | func (p *Partition) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) {
fs, err := p.RetainFileSet()
if err != nil {
return nil, err
}
itr, err := fs.TagValueSeriesIDIterator(name, key, value)
if err != nil {
return nil, err
} else if itr == nil {
fs.Release()
return nil, nil
}
return newFileSetSeriesIDIterator(fs, itr), nil
} | go | func (p *Partition) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) {
fs, err := p.RetainFileSet()
if err != nil {
return nil, err
}
itr, err := fs.TagValueSeriesIDIterator(name, key, value)
if err != nil {
return nil, err
} else if itr == nil {
fs.Release()
return nil, nil
}
return newFileSetSeriesIDIterator(fs, itr), nil
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"TagValueSeriesIDIterator",
"(",
"name",
",",
"key",
",",
"value",
"[",
"]",
"byte",
")",
"(",
"tsdb",
".",
"SeriesIDIterator",
",",
"error",
")",
"{",
"fs",
",",
"err",
":=",
"p",
".",
"RetainFileSet",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"itr",
",",
"err",
":=",
"fs",
".",
"TagValueSeriesIDIterator",
"(",
"name",
",",
"key",
",",
"value",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"else",
"if",
"itr",
"==",
"nil",
"{",
"fs",
".",
"Release",
"(",
")",
"\n",
"return",
"nil",
",",
"nil",
"\n",
"}",
"\n",
"return",
"newFileSetSeriesIDIterator",
"(",
"fs",
",",
"itr",
")",
",",
"nil",
"\n",
"}"
] | // TagValueSeriesIDIterator returns a series iterator for a single key value. | [
"TagValueSeriesIDIterator",
"returns",
"a",
"series",
"iterator",
"for",
"a",
"single",
"key",
"value",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L803-L817 |
12,843 | influxdata/platform | tsdb/tsi1/partition.go | ForEachMeasurementTagKey | func (p *Partition) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
fs, err := p.RetainFileSet()
if err != nil {
return err
}
defer fs.Release()
itr := fs.TagKeyIterator(name)
if itr == nil {
return nil
}
for e := itr.Next(); e != nil; e = itr.Next() {
if err := fn(e.Key()); err != nil {
return err
}
}
return nil
} | go | func (p *Partition) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
fs, err := p.RetainFileSet()
if err != nil {
return err
}
defer fs.Release()
itr := fs.TagKeyIterator(name)
if itr == nil {
return nil
}
for e := itr.Next(); e != nil; e = itr.Next() {
if err := fn(e.Key()); err != nil {
return err
}
}
return nil
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"ForEachMeasurementTagKey",
"(",
"name",
"[",
"]",
"byte",
",",
"fn",
"func",
"(",
"key",
"[",
"]",
"byte",
")",
"error",
")",
"error",
"{",
"fs",
",",
"err",
":=",
"p",
".",
"RetainFileSet",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"defer",
"fs",
".",
"Release",
"(",
")",
"\n\n",
"itr",
":=",
"fs",
".",
"TagKeyIterator",
"(",
"name",
")",
"\n",
"if",
"itr",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"for",
"e",
":=",
"itr",
".",
"Next",
"(",
")",
";",
"e",
"!=",
"nil",
";",
"e",
"=",
"itr",
".",
"Next",
"(",
")",
"{",
"if",
"err",
":=",
"fn",
"(",
"e",
".",
"Key",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // ForEachMeasurementTagKey iterates over all tag keys in a measurement. | [
"ForEachMeasurementTagKey",
"iterates",
"over",
"all",
"tag",
"keys",
"in",
"a",
"measurement",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L831-L850 |
12,844 | influxdata/platform | tsdb/tsi1/partition.go | Compact | func (p *Partition) Compact() {
p.mu.Lock()
defer p.mu.Unlock()
p.compact()
} | go | func (p *Partition) Compact() {
p.mu.Lock()
defer p.mu.Unlock()
p.compact()
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"Compact",
"(",
")",
"{",
"p",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"p",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"p",
".",
"compact",
"(",
")",
"\n",
"}"
] | // Compact requests a compaction of log files. | [
"Compact",
"requests",
"a",
"compaction",
"of",
"log",
"files",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L863-L867 |
12,845 | influxdata/platform | tsdb/tsi1/partition.go | readStatsFile | func (p *Partition) readStatsFile() error {
p.stats = NewMeasurementCardinalityStats()
f, err := os.Open(p.StatsPath())
if os.IsNotExist(err) {
p.statsSize = 0
return nil
} else if err != nil {
return err
}
defer f.Close()
n, err := p.stats.ReadFrom(bufio.NewReader(f))
if err != nil {
return err
}
p.statsSize = n
return nil
} | go | func (p *Partition) readStatsFile() error {
p.stats = NewMeasurementCardinalityStats()
f, err := os.Open(p.StatsPath())
if os.IsNotExist(err) {
p.statsSize = 0
return nil
} else if err != nil {
return err
}
defer f.Close()
n, err := p.stats.ReadFrom(bufio.NewReader(f))
if err != nil {
return err
}
p.statsSize = n
return nil
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"readStatsFile",
"(",
")",
"error",
"{",
"p",
".",
"stats",
"=",
"NewMeasurementCardinalityStats",
"(",
")",
"\n\n",
"f",
",",
"err",
":=",
"os",
".",
"Open",
"(",
"p",
".",
"StatsPath",
"(",
")",
")",
"\n",
"if",
"os",
".",
"IsNotExist",
"(",
"err",
")",
"{",
"p",
".",
"statsSize",
"=",
"0",
"\n",
"return",
"nil",
"\n",
"}",
"else",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"defer",
"f",
".",
"Close",
"(",
")",
"\n\n",
"n",
",",
"err",
":=",
"p",
".",
"stats",
".",
"ReadFrom",
"(",
"bufio",
".",
"NewReader",
"(",
"f",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"p",
".",
"statsSize",
"=",
"n",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // readStatsFile reads the stats file into memory and updates the stats size. | [
"readStatsFile",
"reads",
"the",
"stats",
"file",
"into",
"memory",
"and",
"updates",
"the",
"stats",
"size",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L1236-L1255 |
12,846 | influxdata/platform | tsdb/tsi1/partition.go | writeStatsFile | func (p *Partition) writeStatsFile() error {
tmpPath := p.StatsPath() + ".tmp"
f, err := os.Create(tmpPath)
if err != nil {
return err
}
defer f.Close()
n, err := p.stats.WriteTo(f)
if err != nil {
return err
}
if err := f.Close(); err != nil {
return err
} else if err := os.Rename(tmpPath, p.StatsPath()); err != nil {
return err
}
p.statsSize = n
return nil
} | go | func (p *Partition) writeStatsFile() error {
tmpPath := p.StatsPath() + ".tmp"
f, err := os.Create(tmpPath)
if err != nil {
return err
}
defer f.Close()
n, err := p.stats.WriteTo(f)
if err != nil {
return err
}
if err := f.Close(); err != nil {
return err
} else if err := os.Rename(tmpPath, p.StatsPath()); err != nil {
return err
}
p.statsSize = n
return nil
} | [
"func",
"(",
"p",
"*",
"Partition",
")",
"writeStatsFile",
"(",
")",
"error",
"{",
"tmpPath",
":=",
"p",
".",
"StatsPath",
"(",
")",
"+",
"\"",
"\"",
"\n",
"f",
",",
"err",
":=",
"os",
".",
"Create",
"(",
"tmpPath",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"defer",
"f",
".",
"Close",
"(",
")",
"\n\n",
"n",
",",
"err",
":=",
"p",
".",
"stats",
".",
"WriteTo",
"(",
"f",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"f",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"else",
"if",
"err",
":=",
"os",
".",
"Rename",
"(",
"tmpPath",
",",
"p",
".",
"StatsPath",
"(",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"p",
".",
"statsSize",
"=",
"n",
"\n",
"return",
"nil",
"\n",
"}"
] | // writeStatsFile writes the stats file and updates the stats size. | [
"writeStatsFile",
"writes",
"the",
"stats",
"file",
"and",
"updates",
"the",
"stats",
"size",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/partition.go#L1258-L1279 |
12,847 | influxdata/platform | storage/engine.go | WithLogger | func (e *Engine) WithLogger(log *zap.Logger) {
fields := []zap.Field{}
if e.nodeID != nil {
fields = append(fields, zap.Int("node_id", *e.nodeID))
}
if e.engineID != nil {
fields = append(fields, zap.Int("engine_id", *e.engineID))
}
fields = append(fields, zap.String("service", "storage-engine"))
e.logger = log.With(fields...)
e.sfile.WithLogger(e.logger)
e.index.WithLogger(e.logger)
e.engine.WithLogger(e.logger)
e.retentionEnforcer.WithLogger(e.logger)
} | go | func (e *Engine) WithLogger(log *zap.Logger) {
fields := []zap.Field{}
if e.nodeID != nil {
fields = append(fields, zap.Int("node_id", *e.nodeID))
}
if e.engineID != nil {
fields = append(fields, zap.Int("engine_id", *e.engineID))
}
fields = append(fields, zap.String("service", "storage-engine"))
e.logger = log.With(fields...)
e.sfile.WithLogger(e.logger)
e.index.WithLogger(e.logger)
e.engine.WithLogger(e.logger)
e.retentionEnforcer.WithLogger(e.logger)
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"WithLogger",
"(",
"log",
"*",
"zap",
".",
"Logger",
")",
"{",
"fields",
":=",
"[",
"]",
"zap",
".",
"Field",
"{",
"}",
"\n",
"if",
"e",
".",
"nodeID",
"!=",
"nil",
"{",
"fields",
"=",
"append",
"(",
"fields",
",",
"zap",
".",
"Int",
"(",
"\"",
"\"",
",",
"*",
"e",
".",
"nodeID",
")",
")",
"\n",
"}",
"\n\n",
"if",
"e",
".",
"engineID",
"!=",
"nil",
"{",
"fields",
"=",
"append",
"(",
"fields",
",",
"zap",
".",
"Int",
"(",
"\"",
"\"",
",",
"*",
"e",
".",
"engineID",
")",
")",
"\n",
"}",
"\n",
"fields",
"=",
"append",
"(",
"fields",
",",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
")",
"\n\n",
"e",
".",
"logger",
"=",
"log",
".",
"With",
"(",
"fields",
"...",
")",
"\n",
"e",
".",
"sfile",
".",
"WithLogger",
"(",
"e",
".",
"logger",
")",
"\n",
"e",
".",
"index",
".",
"WithLogger",
"(",
"e",
".",
"logger",
")",
"\n",
"e",
".",
"engine",
".",
"WithLogger",
"(",
"e",
".",
"logger",
")",
"\n",
"e",
".",
"retentionEnforcer",
".",
"WithLogger",
"(",
"e",
".",
"logger",
")",
"\n",
"}"
] | // WithLogger sets the logger on the Store. It must be called before Open. | [
"WithLogger",
"sets",
"the",
"logger",
"on",
"the",
"Store",
".",
"It",
"must",
"be",
"called",
"before",
"Open",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/storage/engine.go#L148-L164 |
12,848 | influxdata/platform | storage/engine.go | WritePoints | func (e *Engine) WritePoints(points []models.Point) error {
collection := tsdb.NewSeriesCollection(points)
j := 0
for iter := collection.Iterator(); iter.Next(); {
tags := iter.Tags()
if tags.Len() > 0 && bytes.Equal(tags[0].Key, tsdb.FieldKeyTagKeyBytes) && bytes.Equal(tags[0].Value, timeBytes) {
// Field key "time" is invalid
if collection.Reason == "" {
collection.Reason = fmt.Sprintf("invalid field key: input field %q is invalid", timeBytes)
}
collection.Dropped++
collection.DroppedKeys = append(collection.DroppedKeys, iter.Key())
continue
}
// Filter out any tags with key equal to "time": they are invalid.
if tags.Get(timeBytes) != nil {
if collection.Reason == "" {
collection.Reason = fmt.Sprintf("invalid tag key: input tag %q on measurement %q is invalid", timeBytes, iter.Name())
}
collection.Dropped++
collection.DroppedKeys = append(collection.DroppedKeys, iter.Key())
continue
}
// Drop any series with invalid unicode characters in the key.
if e.config.ValidateKeys && !models.ValidKeyTokens(string(iter.Name()), tags) {
if collection.Reason == "" {
collection.Reason = fmt.Sprintf("key contains invalid unicode: %q", iter.Key())
}
collection.Dropped++
collection.DroppedKeys = append(collection.DroppedKeys, iter.Key())
continue
}
collection.Copy(j, iter.Index())
j++
}
collection.Truncate(j)
e.mu.RLock()
defer e.mu.RUnlock()
if e.closing == nil {
return ErrEngineClosed
}
// Add new series to the index and series file. Check for partial writes.
if err := e.index.CreateSeriesListIfNotExists(collection); err != nil {
// ignore PartialWriteErrors. The collection captures it.
// TODO(edd/jeff): should we just remove PartialWriteError from the index then?
if _, ok := err.(tsdb.PartialWriteError); !ok {
return err
}
}
// Write the points to the cache and WAL.
if err := e.engine.WritePoints(collection.Points); err != nil {
return err
}
return collection.PartialWriteError()
} | go | func (e *Engine) WritePoints(points []models.Point) error {
collection := tsdb.NewSeriesCollection(points)
j := 0
for iter := collection.Iterator(); iter.Next(); {
tags := iter.Tags()
if tags.Len() > 0 && bytes.Equal(tags[0].Key, tsdb.FieldKeyTagKeyBytes) && bytes.Equal(tags[0].Value, timeBytes) {
// Field key "time" is invalid
if collection.Reason == "" {
collection.Reason = fmt.Sprintf("invalid field key: input field %q is invalid", timeBytes)
}
collection.Dropped++
collection.DroppedKeys = append(collection.DroppedKeys, iter.Key())
continue
}
// Filter out any tags with key equal to "time": they are invalid.
if tags.Get(timeBytes) != nil {
if collection.Reason == "" {
collection.Reason = fmt.Sprintf("invalid tag key: input tag %q on measurement %q is invalid", timeBytes, iter.Name())
}
collection.Dropped++
collection.DroppedKeys = append(collection.DroppedKeys, iter.Key())
continue
}
// Drop any series with invalid unicode characters in the key.
if e.config.ValidateKeys && !models.ValidKeyTokens(string(iter.Name()), tags) {
if collection.Reason == "" {
collection.Reason = fmt.Sprintf("key contains invalid unicode: %q", iter.Key())
}
collection.Dropped++
collection.DroppedKeys = append(collection.DroppedKeys, iter.Key())
continue
}
collection.Copy(j, iter.Index())
j++
}
collection.Truncate(j)
e.mu.RLock()
defer e.mu.RUnlock()
if e.closing == nil {
return ErrEngineClosed
}
// Add new series to the index and series file. Check for partial writes.
if err := e.index.CreateSeriesListIfNotExists(collection); err != nil {
// ignore PartialWriteErrors. The collection captures it.
// TODO(edd/jeff): should we just remove PartialWriteError from the index then?
if _, ok := err.(tsdb.PartialWriteError); !ok {
return err
}
}
// Write the points to the cache and WAL.
if err := e.engine.WritePoints(collection.Points); err != nil {
return err
}
return collection.PartialWriteError()
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"WritePoints",
"(",
"points",
"[",
"]",
"models",
".",
"Point",
")",
"error",
"{",
"collection",
":=",
"tsdb",
".",
"NewSeriesCollection",
"(",
"points",
")",
"\n\n",
"j",
":=",
"0",
"\n",
"for",
"iter",
":=",
"collection",
".",
"Iterator",
"(",
")",
";",
"iter",
".",
"Next",
"(",
")",
";",
"{",
"tags",
":=",
"iter",
".",
"Tags",
"(",
")",
"\n\n",
"if",
"tags",
".",
"Len",
"(",
")",
">",
"0",
"&&",
"bytes",
".",
"Equal",
"(",
"tags",
"[",
"0",
"]",
".",
"Key",
",",
"tsdb",
".",
"FieldKeyTagKeyBytes",
")",
"&&",
"bytes",
".",
"Equal",
"(",
"tags",
"[",
"0",
"]",
".",
"Value",
",",
"timeBytes",
")",
"{",
"// Field key \"time\" is invalid",
"if",
"collection",
".",
"Reason",
"==",
"\"",
"\"",
"{",
"collection",
".",
"Reason",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"timeBytes",
")",
"\n",
"}",
"\n",
"collection",
".",
"Dropped",
"++",
"\n",
"collection",
".",
"DroppedKeys",
"=",
"append",
"(",
"collection",
".",
"DroppedKeys",
",",
"iter",
".",
"Key",
"(",
")",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// Filter out any tags with key equal to \"time\": they are invalid.",
"if",
"tags",
".",
"Get",
"(",
"timeBytes",
")",
"!=",
"nil",
"{",
"if",
"collection",
".",
"Reason",
"==",
"\"",
"\"",
"{",
"collection",
".",
"Reason",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"timeBytes",
",",
"iter",
".",
"Name",
"(",
")",
")",
"\n",
"}",
"\n",
"collection",
".",
"Dropped",
"++",
"\n",
"collection",
".",
"DroppedKeys",
"=",
"append",
"(",
"collection",
".",
"DroppedKeys",
",",
"iter",
".",
"Key",
"(",
")",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// Drop any series with invalid unicode characters in the key.",
"if",
"e",
".",
"config",
".",
"ValidateKeys",
"&&",
"!",
"models",
".",
"ValidKeyTokens",
"(",
"string",
"(",
"iter",
".",
"Name",
"(",
")",
")",
",",
"tags",
")",
"{",
"if",
"collection",
".",
"Reason",
"==",
"\"",
"\"",
"{",
"collection",
".",
"Reason",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"iter",
".",
"Key",
"(",
")",
")",
"\n",
"}",
"\n",
"collection",
".",
"Dropped",
"++",
"\n",
"collection",
".",
"DroppedKeys",
"=",
"append",
"(",
"collection",
".",
"DroppedKeys",
",",
"iter",
".",
"Key",
"(",
")",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"collection",
".",
"Copy",
"(",
"j",
",",
"iter",
".",
"Index",
"(",
")",
")",
"\n",
"j",
"++",
"\n",
"}",
"\n",
"collection",
".",
"Truncate",
"(",
"j",
")",
"\n\n",
"e",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"e",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n\n",
"if",
"e",
".",
"closing",
"==",
"nil",
"{",
"return",
"ErrEngineClosed",
"\n",
"}",
"\n\n",
"// Add new series to the index and series file. Check for partial writes.",
"if",
"err",
":=",
"e",
".",
"index",
".",
"CreateSeriesListIfNotExists",
"(",
"collection",
")",
";",
"err",
"!=",
"nil",
"{",
"// ignore PartialWriteErrors. The collection captures it.",
"// TODO(edd/jeff): should we just remove PartialWriteError from the index then?",
"if",
"_",
",",
"ok",
":=",
"err",
".",
"(",
"tsdb",
".",
"PartialWriteError",
")",
";",
"!",
"ok",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Write the points to the cache and WAL.",
"if",
"err",
":=",
"e",
".",
"engine",
".",
"WritePoints",
"(",
"collection",
".",
"Points",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"return",
"collection",
".",
"PartialWriteError",
"(",
")",
"\n",
"}"
] | // WritePoints writes the provided points to the engine.
//
// The Engine expects all points to have been correctly validated by the caller.
// WritePoints will however determine if there are any field type conflicts, and
// return an appropriate error in that case. | [
"WritePoints",
"writes",
"the",
"provided",
"points",
"to",
"the",
"engine",
".",
"The",
"Engine",
"expects",
"all",
"points",
"to",
"have",
"been",
"correctly",
"validated",
"by",
"the",
"caller",
".",
"WritePoints",
"will",
"however",
"determine",
"if",
"there",
"are",
"any",
"field",
"type",
"conflicts",
"and",
"return",
"an",
"appropriate",
"error",
"in",
"that",
"case",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/storage/engine.go#L304-L367 |
12,849 | influxdata/platform | storage/engine.go | DeleteSeriesRangeWithPredicate | func (e *Engine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, fn func([]byte, models.Tags) (int64, int64, bool)) error {
e.mu.RLock()
defer e.mu.RUnlock()
if e.closing == nil {
return ErrEngineClosed
}
return e.engine.DeleteSeriesRangeWithPredicate(itr, fn)
} | go | func (e *Engine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, fn func([]byte, models.Tags) (int64, int64, bool)) error {
e.mu.RLock()
defer e.mu.RUnlock()
if e.closing == nil {
return ErrEngineClosed
}
return e.engine.DeleteSeriesRangeWithPredicate(itr, fn)
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"DeleteSeriesRangeWithPredicate",
"(",
"itr",
"tsdb",
".",
"SeriesIterator",
",",
"fn",
"func",
"(",
"[",
"]",
"byte",
",",
"models",
".",
"Tags",
")",
"(",
"int64",
",",
"int64",
",",
"bool",
")",
")",
"error",
"{",
"e",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"e",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"e",
".",
"closing",
"==",
"nil",
"{",
"return",
"ErrEngineClosed",
"\n",
"}",
"\n",
"return",
"e",
".",
"engine",
".",
"DeleteSeriesRangeWithPredicate",
"(",
"itr",
",",
"fn",
")",
"\n",
"}"
] | // DeleteSeriesRangeWithPredicate deletes all series data iterated over if fn returns
// true for that series. | [
"DeleteSeriesRangeWithPredicate",
"deletes",
"all",
"series",
"data",
"iterated",
"over",
"if",
"fn",
"returns",
"true",
"for",
"that",
"series",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/storage/engine.go#L387-L394 |
12,850 | influxdata/platform | kv/example.go | NewExampleService | func NewExampleService(kv Store, idGen platform.IDGenerator) *ExampleService {
return &ExampleService{
kv: kv,
idGenerator: idGen,
}
} | go | func NewExampleService(kv Store, idGen platform.IDGenerator) *ExampleService {
return &ExampleService{
kv: kv,
idGenerator: idGen,
}
} | [
"func",
"NewExampleService",
"(",
"kv",
"Store",
",",
"idGen",
"platform",
".",
"IDGenerator",
")",
"*",
"ExampleService",
"{",
"return",
"&",
"ExampleService",
"{",
"kv",
":",
"kv",
",",
"idGenerator",
":",
"idGen",
",",
"}",
"\n",
"}"
] | // NewExampleService creates an instance of an example service. | [
"NewExampleService",
"creates",
"an",
"instance",
"of",
"an",
"example",
"service",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L25-L30 |
12,851 | influxdata/platform | kv/example.go | Initialize | func (c *ExampleService) Initialize() error {
return c.kv.Update(func(tx Tx) error {
if _, err := tx.Bucket([]byte(exampleBucket)); err != nil {
return err
}
if _, err := tx.Bucket([]byte(exampleIndex)); err != nil {
return err
}
return nil
})
} | go | func (c *ExampleService) Initialize() error {
return c.kv.Update(func(tx Tx) error {
if _, err := tx.Bucket([]byte(exampleBucket)); err != nil {
return err
}
if _, err := tx.Bucket([]byte(exampleIndex)); err != nil {
return err
}
return nil
})
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"Initialize",
"(",
")",
"error",
"{",
"return",
"c",
".",
"kv",
".",
"Update",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"if",
"_",
",",
"err",
":=",
"tx",
".",
"Bucket",
"(",
"[",
"]",
"byte",
"(",
"exampleBucket",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"_",
",",
"err",
":=",
"tx",
".",
"Bucket",
"(",
"[",
"]",
"byte",
"(",
"exampleIndex",
")",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"}"
] | // Initialize creates the buckets for the example service | [
"Initialize",
"creates",
"the",
"buckets",
"for",
"the",
"example",
"service"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L33-L43 |
12,852 | influxdata/platform | kv/example.go | FindUserByID | func (c *ExampleService) FindUserByID(ctx context.Context, id platform.ID) (*platform.User, error) {
var u *platform.User
err := c.kv.View(func(tx Tx) error {
usr, err := c.findUserByID(ctx, tx, id)
if err != nil {
return err
}
u = usr
return nil
})
if err != nil {
return nil, &platform.Error{
Op: "kv/" + platform.OpFindUserByID,
Err: err,
}
}
return u, nil
} | go | func (c *ExampleService) FindUserByID(ctx context.Context, id platform.ID) (*platform.User, error) {
var u *platform.User
err := c.kv.View(func(tx Tx) error {
usr, err := c.findUserByID(ctx, tx, id)
if err != nil {
return err
}
u = usr
return nil
})
if err != nil {
return nil, &platform.Error{
Op: "kv/" + platform.OpFindUserByID,
Err: err,
}
}
return u, nil
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"FindUserByID",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
")",
"(",
"*",
"platform",
".",
"User",
",",
"error",
")",
"{",
"var",
"u",
"*",
"platform",
".",
"User",
"\n\n",
"err",
":=",
"c",
".",
"kv",
".",
"View",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"usr",
",",
"err",
":=",
"c",
".",
"findUserByID",
"(",
"ctx",
",",
"tx",
",",
"id",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"u",
"=",
"usr",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"&",
"platform",
".",
"Error",
"{",
"Op",
":",
"\"",
"\"",
"+",
"platform",
".",
"OpFindUserByID",
",",
"Err",
":",
"err",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"u",
",",
"nil",
"\n",
"}"
] | // FindUserByID retrieves a example by id. | [
"FindUserByID",
"retrieves",
"a",
"example",
"by",
"id",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L46-L66 |
12,853 | influxdata/platform | kv/example.go | FindUserByName | func (c *ExampleService) FindUserByName(ctx context.Context, n string) (*platform.User, error) {
var u *platform.User
err := c.kv.View(func(tx Tx) error {
usr, err := c.findUserByName(ctx, tx, n)
if err != nil {
return err
}
u = usr
return nil
})
return u, err
} | go | func (c *ExampleService) FindUserByName(ctx context.Context, n string) (*platform.User, error) {
var u *platform.User
err := c.kv.View(func(tx Tx) error {
usr, err := c.findUserByName(ctx, tx, n)
if err != nil {
return err
}
u = usr
return nil
})
return u, err
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"FindUserByName",
"(",
"ctx",
"context",
".",
"Context",
",",
"n",
"string",
")",
"(",
"*",
"platform",
".",
"User",
",",
"error",
")",
"{",
"var",
"u",
"*",
"platform",
".",
"User",
"\n\n",
"err",
":=",
"c",
".",
"kv",
".",
"View",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"usr",
",",
"err",
":=",
"c",
".",
"findUserByName",
"(",
"ctx",
",",
"tx",
",",
"n",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"u",
"=",
"usr",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n\n",
"return",
"u",
",",
"err",
"\n",
"}"
] | // FindUserByName returns a example by name for a particular example. | [
"FindUserByName",
"returns",
"a",
"example",
"by",
"name",
"for",
"a",
"particular",
"example",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L99-L112 |
12,854 | influxdata/platform | kv/example.go | FindUser | func (c *ExampleService) FindUser(ctx context.Context, filter platform.UserFilter) (*platform.User, error) {
if filter.ID != nil {
u, err := c.FindUserByID(ctx, *filter.ID)
if err != nil {
return nil, &platform.Error{
Op: "kv/" + platform.OpFindUser,
Err: err,
}
}
return u, nil
}
if filter.Name != nil {
return c.FindUserByName(ctx, *filter.Name)
}
filterFn := filterExamplesFn(filter)
var u *platform.User
err := c.kv.View(func(tx Tx) error {
return forEachExample(ctx, tx, func(usr *platform.User) bool {
if filterFn(usr) {
u = usr
return false
}
return true
})
})
if err != nil {
return nil, err
}
if u == nil {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: "user not found",
}
}
return u, nil
} | go | func (c *ExampleService) FindUser(ctx context.Context, filter platform.UserFilter) (*platform.User, error) {
if filter.ID != nil {
u, err := c.FindUserByID(ctx, *filter.ID)
if err != nil {
return nil, &platform.Error{
Op: "kv/" + platform.OpFindUser,
Err: err,
}
}
return u, nil
}
if filter.Name != nil {
return c.FindUserByName(ctx, *filter.Name)
}
filterFn := filterExamplesFn(filter)
var u *platform.User
err := c.kv.View(func(tx Tx) error {
return forEachExample(ctx, tx, func(usr *platform.User) bool {
if filterFn(usr) {
u = usr
return false
}
return true
})
})
if err != nil {
return nil, err
}
if u == nil {
return nil, &platform.Error{
Code: platform.ENotFound,
Msg: "user not found",
}
}
return u, nil
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"FindUser",
"(",
"ctx",
"context",
".",
"Context",
",",
"filter",
"platform",
".",
"UserFilter",
")",
"(",
"*",
"platform",
".",
"User",
",",
"error",
")",
"{",
"if",
"filter",
".",
"ID",
"!=",
"nil",
"{",
"u",
",",
"err",
":=",
"c",
".",
"FindUserByID",
"(",
"ctx",
",",
"*",
"filter",
".",
"ID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"&",
"platform",
".",
"Error",
"{",
"Op",
":",
"\"",
"\"",
"+",
"platform",
".",
"OpFindUser",
",",
"Err",
":",
"err",
",",
"}",
"\n",
"}",
"\n",
"return",
"u",
",",
"nil",
"\n",
"}",
"\n\n",
"if",
"filter",
".",
"Name",
"!=",
"nil",
"{",
"return",
"c",
".",
"FindUserByName",
"(",
"ctx",
",",
"*",
"filter",
".",
"Name",
")",
"\n",
"}",
"\n\n",
"filterFn",
":=",
"filterExamplesFn",
"(",
"filter",
")",
"\n\n",
"var",
"u",
"*",
"platform",
".",
"User",
"\n",
"err",
":=",
"c",
".",
"kv",
".",
"View",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"return",
"forEachExample",
"(",
"ctx",
",",
"tx",
",",
"func",
"(",
"usr",
"*",
"platform",
".",
"User",
")",
"bool",
"{",
"if",
"filterFn",
"(",
"usr",
")",
"{",
"u",
"=",
"usr",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}",
")",
"\n",
"}",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"u",
"==",
"nil",
"{",
"return",
"nil",
",",
"&",
"platform",
".",
"Error",
"{",
"Code",
":",
"platform",
".",
"ENotFound",
",",
"Msg",
":",
"\"",
"\"",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"u",
",",
"nil",
"\n",
"}"
] | // FindUser retrives a example using an arbitrary example filter.
// Filters using ID, or Name should be efficient.
// Other filters will do a linear scan across examples until it finds a match. | [
"FindUser",
"retrives",
"a",
"example",
"using",
"an",
"arbitrary",
"example",
"filter",
".",
"Filters",
"using",
"ID",
"or",
"Name",
"should",
"be",
"efficient",
".",
"Other",
"filters",
"will",
"do",
"a",
"linear",
"scan",
"across",
"examples",
"until",
"it",
"finds",
"a",
"match",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L141-L182 |
12,855 | influxdata/platform | kv/example.go | FindUsers | func (c *ExampleService) FindUsers(ctx context.Context, filter platform.UserFilter, opt ...platform.FindOptions) ([]*platform.User, int, error) {
op := platform.OpFindUsers
if filter.ID != nil {
u, err := c.FindUserByID(ctx, *filter.ID)
if err != nil {
return nil, 0, &platform.Error{
Err: err,
Op: "kv/" + op,
}
}
return []*platform.User{u}, 1, nil
}
if filter.Name != nil {
u, err := c.FindUserByName(ctx, *filter.Name)
if err != nil {
return nil, 0, &platform.Error{
Err: err,
Op: "kv/" + op,
}
}
return []*platform.User{u}, 1, nil
}
us := []*platform.User{}
filterFn := filterExamplesFn(filter)
err := c.kv.View(func(tx Tx) error {
return forEachExample(ctx, tx, func(u *platform.User) bool {
if filterFn(u) {
us = append(us, u)
}
return true
})
})
if err != nil {
return nil, 0, err
}
return us, len(us), nil
} | go | func (c *ExampleService) FindUsers(ctx context.Context, filter platform.UserFilter, opt ...platform.FindOptions) ([]*platform.User, int, error) {
op := platform.OpFindUsers
if filter.ID != nil {
u, err := c.FindUserByID(ctx, *filter.ID)
if err != nil {
return nil, 0, &platform.Error{
Err: err,
Op: "kv/" + op,
}
}
return []*platform.User{u}, 1, nil
}
if filter.Name != nil {
u, err := c.FindUserByName(ctx, *filter.Name)
if err != nil {
return nil, 0, &platform.Error{
Err: err,
Op: "kv/" + op,
}
}
return []*platform.User{u}, 1, nil
}
us := []*platform.User{}
filterFn := filterExamplesFn(filter)
err := c.kv.View(func(tx Tx) error {
return forEachExample(ctx, tx, func(u *platform.User) bool {
if filterFn(u) {
us = append(us, u)
}
return true
})
})
if err != nil {
return nil, 0, err
}
return us, len(us), nil
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"FindUsers",
"(",
"ctx",
"context",
".",
"Context",
",",
"filter",
"platform",
".",
"UserFilter",
",",
"opt",
"...",
"platform",
".",
"FindOptions",
")",
"(",
"[",
"]",
"*",
"platform",
".",
"User",
",",
"int",
",",
"error",
")",
"{",
"op",
":=",
"platform",
".",
"OpFindUsers",
"\n",
"if",
"filter",
".",
"ID",
"!=",
"nil",
"{",
"u",
",",
"err",
":=",
"c",
".",
"FindUserByID",
"(",
"ctx",
",",
"*",
"filter",
".",
"ID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"0",
",",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"\"",
"\"",
"+",
"op",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"[",
"]",
"*",
"platform",
".",
"User",
"{",
"u",
"}",
",",
"1",
",",
"nil",
"\n",
"}",
"\n\n",
"if",
"filter",
".",
"Name",
"!=",
"nil",
"{",
"u",
",",
"err",
":=",
"c",
".",
"FindUserByName",
"(",
"ctx",
",",
"*",
"filter",
".",
"Name",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"0",
",",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"\"",
"\"",
"+",
"op",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"[",
"]",
"*",
"platform",
".",
"User",
"{",
"u",
"}",
",",
"1",
",",
"nil",
"\n",
"}",
"\n\n",
"us",
":=",
"[",
"]",
"*",
"platform",
".",
"User",
"{",
"}",
"\n",
"filterFn",
":=",
"filterExamplesFn",
"(",
"filter",
")",
"\n",
"err",
":=",
"c",
".",
"kv",
".",
"View",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"return",
"forEachExample",
"(",
"ctx",
",",
"tx",
",",
"func",
"(",
"u",
"*",
"platform",
".",
"User",
")",
"bool",
"{",
"if",
"filterFn",
"(",
"u",
")",
"{",
"us",
"=",
"append",
"(",
"us",
",",
"u",
")",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}",
")",
"\n",
"}",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"us",
",",
"len",
"(",
"us",
")",
",",
"nil",
"\n",
"}"
] | // FindUsers retrives all examples that match an arbitrary example filter.
// Filters using ID, or Name should be efficient.
// Other filters will do a linear scan across all examples searching for a match. | [
"FindUsers",
"retrives",
"all",
"examples",
"that",
"match",
"an",
"arbitrary",
"example",
"filter",
".",
"Filters",
"using",
"ID",
"or",
"Name",
"should",
"be",
"efficient",
".",
"Other",
"filters",
"will",
"do",
"a",
"linear",
"scan",
"across",
"all",
"examples",
"searching",
"for",
"a",
"match",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L203-L245 |
12,856 | influxdata/platform | kv/example.go | CreateUser | func (c *ExampleService) CreateUser(ctx context.Context, u *platform.User) error {
err := c.kv.Update(func(tx Tx) error {
unique := c.uniqueExampleName(ctx, tx, u)
if !unique {
// TODO: make standard error
return &platform.Error{
Code: platform.EConflict,
Msg: fmt.Sprintf("user with name %s already exists", u.Name),
}
}
u.ID = c.idGenerator.ID()
return c.putUser(ctx, tx, u)
})
if err != nil {
return &platform.Error{
Err: err,
Op: "kv/" + platform.OpCreateUser,
}
}
return nil
} | go | func (c *ExampleService) CreateUser(ctx context.Context, u *platform.User) error {
err := c.kv.Update(func(tx Tx) error {
unique := c.uniqueExampleName(ctx, tx, u)
if !unique {
// TODO: make standard error
return &platform.Error{
Code: platform.EConflict,
Msg: fmt.Sprintf("user with name %s already exists", u.Name),
}
}
u.ID = c.idGenerator.ID()
return c.putUser(ctx, tx, u)
})
if err != nil {
return &platform.Error{
Err: err,
Op: "kv/" + platform.OpCreateUser,
}
}
return nil
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"CreateUser",
"(",
"ctx",
"context",
".",
"Context",
",",
"u",
"*",
"platform",
".",
"User",
")",
"error",
"{",
"err",
":=",
"c",
".",
"kv",
".",
"Update",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"unique",
":=",
"c",
".",
"uniqueExampleName",
"(",
"ctx",
",",
"tx",
",",
"u",
")",
"\n\n",
"if",
"!",
"unique",
"{",
"// TODO: make standard error",
"return",
"&",
"platform",
".",
"Error",
"{",
"Code",
":",
"platform",
".",
"EConflict",
",",
"Msg",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"u",
".",
"Name",
")",
",",
"}",
"\n",
"}",
"\n\n",
"u",
".",
"ID",
"=",
"c",
".",
"idGenerator",
".",
"ID",
"(",
")",
"\n\n",
"return",
"c",
".",
"putUser",
"(",
"ctx",
",",
"tx",
",",
"u",
")",
"\n",
"}",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"\"",
"\"",
"+",
"platform",
".",
"OpCreateUser",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // CreateUser creates a platform example and sets b.ID. | [
"CreateUser",
"creates",
"a",
"platform",
"example",
"and",
"sets",
"b",
".",
"ID",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L248-L273 |
12,857 | influxdata/platform | kv/example.go | PutUser | func (c *ExampleService) PutUser(ctx context.Context, u *platform.User) error {
return c.kv.Update(func(tx Tx) error {
return c.putUser(ctx, tx, u)
})
} | go | func (c *ExampleService) PutUser(ctx context.Context, u *platform.User) error {
return c.kv.Update(func(tx Tx) error {
return c.putUser(ctx, tx, u)
})
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"PutUser",
"(",
"ctx",
"context",
".",
"Context",
",",
"u",
"*",
"platform",
".",
"User",
")",
"error",
"{",
"return",
"c",
".",
"kv",
".",
"Update",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"return",
"c",
".",
"putUser",
"(",
"ctx",
",",
"tx",
",",
"u",
")",
"\n",
"}",
")",
"\n",
"}"
] | // PutUser will put a example without setting an ID. | [
"PutUser",
"will",
"put",
"a",
"example",
"without",
"setting",
"an",
"ID",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L276-L280 |
12,858 | influxdata/platform | kv/example.go | forEachExample | func forEachExample(ctx context.Context, tx Tx, fn func(*platform.User) bool) error {
b, err := tx.Bucket(exampleBucket)
if err != nil {
return err
}
cur, err := b.Cursor()
if err != nil {
return err
}
for k, v := cur.First(); k != nil; k, v = cur.Next() {
u := &platform.User{}
if err := json.Unmarshal(v, u); err != nil {
return err
}
if !fn(u) {
break
}
}
return nil
} | go | func forEachExample(ctx context.Context, tx Tx, fn func(*platform.User) bool) error {
b, err := tx.Bucket(exampleBucket)
if err != nil {
return err
}
cur, err := b.Cursor()
if err != nil {
return err
}
for k, v := cur.First(); k != nil; k, v = cur.Next() {
u := &platform.User{}
if err := json.Unmarshal(v, u); err != nil {
return err
}
if !fn(u) {
break
}
}
return nil
} | [
"func",
"forEachExample",
"(",
"ctx",
"context",
".",
"Context",
",",
"tx",
"Tx",
",",
"fn",
"func",
"(",
"*",
"platform",
".",
"User",
")",
"bool",
")",
"error",
"{",
"b",
",",
"err",
":=",
"tx",
".",
"Bucket",
"(",
"exampleBucket",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"cur",
",",
"err",
":=",
"b",
".",
"Cursor",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"for",
"k",
",",
"v",
":=",
"cur",
".",
"First",
"(",
")",
";",
"k",
"!=",
"nil",
";",
"k",
",",
"v",
"=",
"cur",
".",
"Next",
"(",
")",
"{",
"u",
":=",
"&",
"platform",
".",
"User",
"{",
"}",
"\n",
"if",
"err",
":=",
"json",
".",
"Unmarshal",
"(",
"v",
",",
"u",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"!",
"fn",
"(",
"u",
")",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // forEachExample will iterate through all examples while fn returns true. | [
"forEachExample",
"will",
"iterate",
"through",
"all",
"examples",
"while",
"fn",
"returns",
"true",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L314-L336 |
12,859 | influxdata/platform | kv/example.go | UpdateUser | func (c *ExampleService) UpdateUser(ctx context.Context, id platform.ID, upd platform.UserUpdate) (*platform.User, error) {
var u *platform.User
err := c.kv.Update(func(tx Tx) error {
usr, err := c.updateUser(ctx, tx, id, upd)
if err != nil {
return err
}
u = usr
return nil
})
if err != nil {
return nil, &platform.Error{
Err: err,
Op: "kv/" + platform.OpUpdateUser,
}
}
return u, nil
} | go | func (c *ExampleService) UpdateUser(ctx context.Context, id platform.ID, upd platform.UserUpdate) (*platform.User, error) {
var u *platform.User
err := c.kv.Update(func(tx Tx) error {
usr, err := c.updateUser(ctx, tx, id, upd)
if err != nil {
return err
}
u = usr
return nil
})
if err != nil {
return nil, &platform.Error{
Err: err,
Op: "kv/" + platform.OpUpdateUser,
}
}
return u, nil
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"UpdateUser",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
",",
"upd",
"platform",
".",
"UserUpdate",
")",
"(",
"*",
"platform",
".",
"User",
",",
"error",
")",
"{",
"var",
"u",
"*",
"platform",
".",
"User",
"\n",
"err",
":=",
"c",
".",
"kv",
".",
"Update",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"usr",
",",
"err",
":=",
"c",
".",
"updateUser",
"(",
"ctx",
",",
"tx",
",",
"id",
",",
"upd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"u",
"=",
"usr",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"\"",
"\"",
"+",
"platform",
".",
"OpUpdateUser",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"u",
",",
"nil",
"\n",
"}"
] | // UpdateUser updates a example according the parameters set on upd. | [
"UpdateUser",
"updates",
"a",
"example",
"according",
"the",
"parameters",
"set",
"on",
"upd",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L351-L370 |
12,860 | influxdata/platform | kv/example.go | DeleteUser | func (c *ExampleService) DeleteUser(ctx context.Context, id platform.ID) error {
err := c.kv.Update(func(tx Tx) error {
return c.deleteUser(ctx, tx, id)
})
if err != nil {
return &platform.Error{
Op: "kv/" + platform.OpDeleteUser,
Err: err,
}
}
return nil
} | go | func (c *ExampleService) DeleteUser(ctx context.Context, id platform.ID) error {
err := c.kv.Update(func(tx Tx) error {
return c.deleteUser(ctx, tx, id)
})
if err != nil {
return &platform.Error{
Op: "kv/" + platform.OpDeleteUser,
Err: err,
}
}
return nil
} | [
"func",
"(",
"c",
"*",
"ExampleService",
")",
"DeleteUser",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
")",
"error",
"{",
"err",
":=",
"c",
".",
"kv",
".",
"Update",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"return",
"c",
".",
"deleteUser",
"(",
"ctx",
",",
"tx",
",",
"id",
")",
"\n",
"}",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Op",
":",
"\"",
"\"",
"+",
"platform",
".",
"OpDeleteUser",
",",
"Err",
":",
"err",
",",
"}",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // DeleteUser deletes a example and prunes it from the index. | [
"DeleteUser",
"deletes",
"a",
"example",
"and",
"prunes",
"it",
"from",
"the",
"index",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/kv/example.go#L400-L413 |
12,861 | influxdata/platform | http/dashboard_service.go | FindDashboards | func (s *DashboardService) FindDashboards(ctx context.Context, filter platform.DashboardFilter, opts platform.FindOptions) ([]*platform.Dashboard, int, error) {
dashboards := []*platform.Dashboard{}
url, err := newURL(s.Addr, dashboardsPath)
if err != nil {
return dashboards, 0, err
}
qp := url.Query()
qp.Add("sortBy", opts.SortBy)
for _, id := range filter.IDs {
qp.Add("id", id.String())
}
url.RawQuery = qp.Encode()
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return dashboards, 0, err
}
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return dashboards, 0, err
}
if err := CheckError(resp, true); err != nil {
return dashboards, 0, err
}
var dr getDashboardsResponse
if err := json.NewDecoder(resp.Body).Decode(&dr); err != nil {
return dashboards, 0, err
}
dashboards = dr.toPlatform()
return dashboards, len(dashboards), nil
} | go | func (s *DashboardService) FindDashboards(ctx context.Context, filter platform.DashboardFilter, opts platform.FindOptions) ([]*platform.Dashboard, int, error) {
dashboards := []*platform.Dashboard{}
url, err := newURL(s.Addr, dashboardsPath)
if err != nil {
return dashboards, 0, err
}
qp := url.Query()
qp.Add("sortBy", opts.SortBy)
for _, id := range filter.IDs {
qp.Add("id", id.String())
}
url.RawQuery = qp.Encode()
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return dashboards, 0, err
}
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return dashboards, 0, err
}
if err := CheckError(resp, true); err != nil {
return dashboards, 0, err
}
var dr getDashboardsResponse
if err := json.NewDecoder(resp.Body).Decode(&dr); err != nil {
return dashboards, 0, err
}
dashboards = dr.toPlatform()
return dashboards, len(dashboards), nil
} | [
"func",
"(",
"s",
"*",
"DashboardService",
")",
"FindDashboards",
"(",
"ctx",
"context",
".",
"Context",
",",
"filter",
"platform",
".",
"DashboardFilter",
",",
"opts",
"platform",
".",
"FindOptions",
")",
"(",
"[",
"]",
"*",
"platform",
".",
"Dashboard",
",",
"int",
",",
"error",
")",
"{",
"dashboards",
":=",
"[",
"]",
"*",
"platform",
".",
"Dashboard",
"{",
"}",
"\n",
"url",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"dashboardsPath",
")",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"dashboards",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"qp",
":=",
"url",
".",
"Query",
"(",
")",
"\n",
"qp",
".",
"Add",
"(",
"\"",
"\"",
",",
"opts",
".",
"SortBy",
")",
"\n",
"for",
"_",
",",
"id",
":=",
"range",
"filter",
".",
"IDs",
"{",
"qp",
".",
"Add",
"(",
"\"",
"\"",
",",
"id",
".",
"String",
"(",
")",
")",
"\n",
"}",
"\n",
"url",
".",
"RawQuery",
"=",
"qp",
".",
"Encode",
"(",
")",
"\n\n",
"req",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"url",
".",
"String",
"(",
")",
",",
"nil",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"dashboards",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"req",
")",
"\n",
"hc",
":=",
"newClient",
"(",
"url",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"dashboards",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"dashboards",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"var",
"dr",
"getDashboardsResponse",
"\n",
"if",
"err",
":=",
"json",
".",
"NewDecoder",
"(",
"resp",
".",
"Body",
")",
".",
"Decode",
"(",
"&",
"dr",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"dashboards",
",",
"0",
",",
"err",
"\n",
"}",
"\n\n",
"dashboards",
"=",
"dr",
".",
"toPlatform",
"(",
")",
"\n",
"return",
"dashboards",
",",
"len",
"(",
"dashboards",
")",
",",
"nil",
"\n",
"}"
] | // FindDashboards returns a list of dashboards that match filter and the total count of matching dashboards.
// Additional options provide pagination & sorting. | [
"FindDashboards",
"returns",
"a",
"list",
"of",
"dashboards",
"that",
"match",
"filter",
"and",
"the",
"total",
"count",
"of",
"matching",
"dashboards",
".",
"Additional",
"options",
"provide",
"pagination",
"&",
"sorting",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/dashboard_service.go#L951-L990 |
12,862 | influxdata/platform | http/dashboard_service.go | CreateDashboard | func (s *DashboardService) CreateDashboard(ctx context.Context, d *platform.Dashboard) error {
url, err := newURL(s.Addr, dashboardsPath)
if err != nil {
return err
}
b, err := json.Marshal(d)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), bytes.NewReader(b))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return err
}
if err := CheckError(resp, true); err != nil {
return err
}
if err := json.NewDecoder(resp.Body).Decode(d); err != nil {
return err
}
return nil
} | go | func (s *DashboardService) CreateDashboard(ctx context.Context, d *platform.Dashboard) error {
url, err := newURL(s.Addr, dashboardsPath)
if err != nil {
return err
}
b, err := json.Marshal(d)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), bytes.NewReader(b))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return err
}
if err := CheckError(resp, true); err != nil {
return err
}
if err := json.NewDecoder(resp.Body).Decode(d); err != nil {
return err
}
return nil
} | [
"func",
"(",
"s",
"*",
"DashboardService",
")",
"CreateDashboard",
"(",
"ctx",
"context",
".",
"Context",
",",
"d",
"*",
"platform",
".",
"Dashboard",
")",
"error",
"{",
"url",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"dashboardsPath",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"b",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"d",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"req",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"url",
".",
"String",
"(",
")",
",",
"bytes",
".",
"NewReader",
"(",
"b",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"req",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"req",
")",
"\n\n",
"hc",
":=",
"newClient",
"(",
"url",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"json",
".",
"NewDecoder",
"(",
"resp",
".",
"Body",
")",
".",
"Decode",
"(",
"d",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // CreateDashboard creates a new dashboard and sets b.ID with the new identifier. | [
"CreateDashboard",
"creates",
"a",
"new",
"dashboard",
"and",
"sets",
"b",
".",
"ID",
"with",
"the",
"new",
"identifier",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/dashboard_service.go#L993-L1028 |
12,863 | influxdata/platform | http/dashboard_service.go | AddDashboardCell | func (s *DashboardService) AddDashboardCell(ctx context.Context, id platform.ID, c *platform.Cell, opts platform.AddDashboardCellOptions) error {
url, err := newURL(s.Addr, cellPath(id))
if err != nil {
return err
}
// fixme > in case c does not contain a valid ID this errors out
b, err := json.Marshal(c)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), bytes.NewReader(b))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return err
}
if err := CheckError(resp, true); err != nil {
return err
}
// TODO (goller): deal with the dashboard cell options
return json.NewDecoder(resp.Body).Decode(c)
} | go | func (s *DashboardService) AddDashboardCell(ctx context.Context, id platform.ID, c *platform.Cell, opts platform.AddDashboardCellOptions) error {
url, err := newURL(s.Addr, cellPath(id))
if err != nil {
return err
}
// fixme > in case c does not contain a valid ID this errors out
b, err := json.Marshal(c)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), bytes.NewReader(b))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return err
}
if err := CheckError(resp, true); err != nil {
return err
}
// TODO (goller): deal with the dashboard cell options
return json.NewDecoder(resp.Body).Decode(c)
} | [
"func",
"(",
"s",
"*",
"DashboardService",
")",
"AddDashboardCell",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
",",
"c",
"*",
"platform",
".",
"Cell",
",",
"opts",
"platform",
".",
"AddDashboardCellOptions",
")",
"error",
"{",
"url",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"cellPath",
"(",
"id",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// fixme > in case c does not contain a valid ID this errors out",
"b",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"c",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"req",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"url",
".",
"String",
"(",
")",
",",
"bytes",
".",
"NewReader",
"(",
"b",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"req",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"req",
")",
"\n\n",
"hc",
":=",
"newClient",
"(",
"url",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// TODO (goller): deal with the dashboard cell options",
"return",
"json",
".",
"NewDecoder",
"(",
"resp",
".",
"Body",
")",
".",
"Decode",
"(",
"c",
")",
"\n",
"}"
] | // AddDashboardCell adds a cell to a dashboard. | [
"AddDashboardCell",
"adds",
"a",
"cell",
"to",
"a",
"dashboard",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/dashboard_service.go#L1097-L1129 |
12,864 | influxdata/platform | http/dashboard_service.go | UpdateDashboardCell | func (s *DashboardService) UpdateDashboardCell(ctx context.Context, dashboardID, cellID platform.ID, upd platform.CellUpdate) (*platform.Cell, error) {
op := s.OpPrefix + platform.OpUpdateDashboardCell
if err := upd.Valid(); err != nil {
return nil, &platform.Error{
Op: op,
Err: err,
}
}
u, err := newURL(s.Addr, dashboardCellIDPath(dashboardID, cellID))
if err != nil {
return nil, err
}
b, err := json.Marshal(upd)
if err != nil {
return nil, err
}
req, err := http.NewRequest("PATCH", u.String(), bytes.NewReader(b))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return nil, err
}
if err := CheckError(resp, true); err != nil {
return nil, err
}
var c platform.Cell
if err := json.NewDecoder(resp.Body).Decode(&c); err != nil {
return nil, err
}
defer resp.Body.Close()
return &c, nil
} | go | func (s *DashboardService) UpdateDashboardCell(ctx context.Context, dashboardID, cellID platform.ID, upd platform.CellUpdate) (*platform.Cell, error) {
op := s.OpPrefix + platform.OpUpdateDashboardCell
if err := upd.Valid(); err != nil {
return nil, &platform.Error{
Op: op,
Err: err,
}
}
u, err := newURL(s.Addr, dashboardCellIDPath(dashboardID, cellID))
if err != nil {
return nil, err
}
b, err := json.Marshal(upd)
if err != nil {
return nil, err
}
req, err := http.NewRequest("PATCH", u.String(), bytes.NewReader(b))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return nil, err
}
if err := CheckError(resp, true); err != nil {
return nil, err
}
var c platform.Cell
if err := json.NewDecoder(resp.Body).Decode(&c); err != nil {
return nil, err
}
defer resp.Body.Close()
return &c, nil
} | [
"func",
"(",
"s",
"*",
"DashboardService",
")",
"UpdateDashboardCell",
"(",
"ctx",
"context",
".",
"Context",
",",
"dashboardID",
",",
"cellID",
"platform",
".",
"ID",
",",
"upd",
"platform",
".",
"CellUpdate",
")",
"(",
"*",
"platform",
".",
"Cell",
",",
"error",
")",
"{",
"op",
":=",
"s",
".",
"OpPrefix",
"+",
"platform",
".",
"OpUpdateDashboardCell",
"\n",
"if",
"err",
":=",
"upd",
".",
"Valid",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"&",
"platform",
".",
"Error",
"{",
"Op",
":",
"op",
",",
"Err",
":",
"err",
",",
"}",
"\n",
"}",
"\n\n",
"u",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"dashboardCellIDPath",
"(",
"dashboardID",
",",
"cellID",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"b",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"upd",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"req",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"u",
".",
"String",
"(",
")",
",",
"bytes",
".",
"NewReader",
"(",
"b",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"req",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"req",
")",
"\n\n",
"hc",
":=",
"newClient",
"(",
"u",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"var",
"c",
"platform",
".",
"Cell",
"\n",
"if",
"err",
":=",
"json",
".",
"NewDecoder",
"(",
"resp",
".",
"Body",
")",
".",
"Decode",
"(",
"&",
"c",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"defer",
"resp",
".",
"Body",
".",
"Close",
"(",
")",
"\n\n",
"return",
"&",
"c",
",",
"nil",
"\n",
"}"
] | // UpdateDashboardCell replaces the dashboard cell with the provided ID. | [
"UpdateDashboardCell",
"replaces",
"the",
"dashboard",
"cell",
"with",
"the",
"provided",
"ID",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/dashboard_service.go#L1153-L1198 |
12,865 | influxdata/platform | http/dashboard_service.go | ReplaceDashboardCells | func (s *DashboardService) ReplaceDashboardCells(ctx context.Context, id platform.ID, cs []*platform.Cell) error {
u, err := newURL(s.Addr, cellPath(id))
if err != nil {
return err
}
// TODO(goller): I think this should be {"cells":[]}
b, err := json.Marshal(cs)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return err
}
if err := CheckError(resp, true); err != nil {
return err
}
cells := dashboardCellsResponse{}
if err := json.NewDecoder(resp.Body).Decode(&cells); err != nil {
return err
}
defer resp.Body.Close()
return nil
} | go | func (s *DashboardService) ReplaceDashboardCells(ctx context.Context, id platform.ID, cs []*platform.Cell) error {
u, err := newURL(s.Addr, cellPath(id))
if err != nil {
return err
}
// TODO(goller): I think this should be {"cells":[]}
b, err := json.Marshal(cs)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return err
}
if err := CheckError(resp, true); err != nil {
return err
}
cells := dashboardCellsResponse{}
if err := json.NewDecoder(resp.Body).Decode(&cells); err != nil {
return err
}
defer resp.Body.Close()
return nil
} | [
"func",
"(",
"s",
"*",
"DashboardService",
")",
"ReplaceDashboardCells",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
",",
"cs",
"[",
"]",
"*",
"platform",
".",
"Cell",
")",
"error",
"{",
"u",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"cellPath",
"(",
"id",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// TODO(goller): I think this should be {\"cells\":[]}",
"b",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"cs",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"req",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"u",
".",
"String",
"(",
")",
",",
"bytes",
".",
"NewReader",
"(",
"b",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"req",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"req",
")",
"\n\n",
"hc",
":=",
"newClient",
"(",
"u",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"cells",
":=",
"dashboardCellsResponse",
"{",
"}",
"\n",
"if",
"err",
":=",
"json",
".",
"NewDecoder",
"(",
"resp",
".",
"Body",
")",
".",
"Decode",
"(",
"&",
"cells",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"defer",
"resp",
".",
"Body",
".",
"Close",
"(",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // ReplaceDashboardCells replaces all cells in a dashboard | [
"ReplaceDashboardCells",
"replaces",
"all",
"cells",
"in",
"a",
"dashboard"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/dashboard_service.go#L1276-L1314 |
12,866 | influxdata/platform | chronograf/influx/influx.go | Query | func (c *Client) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) {
resps := make(chan (result))
go func() {
resp, err := c.query(c.URL, q)
resps <- result{resp, err}
}()
select {
case resp := <-resps:
return resp.Response, resp.Err
case <-ctx.Done():
return nil, chronograf.ErrUpstreamTimeout
}
} | go | func (c *Client) Query(ctx context.Context, q chronograf.Query) (chronograf.Response, error) {
resps := make(chan (result))
go func() {
resp, err := c.query(c.URL, q)
resps <- result{resp, err}
}()
select {
case resp := <-resps:
return resp.Response, resp.Err
case <-ctx.Done():
return nil, chronograf.ErrUpstreamTimeout
}
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"Query",
"(",
"ctx",
"context",
".",
"Context",
",",
"q",
"chronograf",
".",
"Query",
")",
"(",
"chronograf",
".",
"Response",
",",
"error",
")",
"{",
"resps",
":=",
"make",
"(",
"chan",
"(",
"result",
")",
")",
"\n",
"go",
"func",
"(",
")",
"{",
"resp",
",",
"err",
":=",
"c",
".",
"query",
"(",
"c",
".",
"URL",
",",
"q",
")",
"\n",
"resps",
"<-",
"result",
"{",
"resp",
",",
"err",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"select",
"{",
"case",
"resp",
":=",
"<-",
"resps",
":",
"return",
"resp",
".",
"Response",
",",
"resp",
".",
"Err",
"\n",
"case",
"<-",
"ctx",
".",
"Done",
"(",
")",
":",
"return",
"nil",
",",
"chronograf",
".",
"ErrUpstreamTimeout",
"\n",
"}",
"\n",
"}"
] | // Query issues a request to a configured InfluxDB instance for time series
// information specified by query. Queries must be "fully-qualified," and
// include both the database and retention policy. In-flight requests can be
// cancelled using the provided context. | [
"Query",
"issues",
"a",
"request",
"to",
"a",
"configured",
"InfluxDB",
"instance",
"for",
"time",
"series",
"information",
"specified",
"by",
"query",
".",
"Queries",
"must",
"be",
"fully",
"-",
"qualified",
"and",
"include",
"both",
"the",
"database",
"and",
"retention",
"policy",
".",
"In",
"-",
"flight",
"requests",
"can",
"be",
"cancelled",
"using",
"the",
"provided",
"context",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/chronograf/influx/influx.go#L136-L149 |
12,867 | influxdata/platform | storage/reads/table.go | hasPoints | func hasPoints(cur cursors.Cursor) bool {
if cur == nil {
return false
}
res := false
switch cur := cur.(type) {
case cursors.IntegerArrayCursor:
a := cur.Next()
res = a.Len() > 0
case cursors.FloatArrayCursor:
a := cur.Next()
res = a.Len() > 0
case cursors.UnsignedArrayCursor:
a := cur.Next()
res = a.Len() > 0
case cursors.BooleanArrayCursor:
a := cur.Next()
res = a.Len() > 0
case cursors.StringArrayCursor:
a := cur.Next()
res = a.Len() > 0
default:
panic(fmt.Sprintf("unreachable: %T", cur))
}
cur.Close()
return res
} | go | func hasPoints(cur cursors.Cursor) bool {
if cur == nil {
return false
}
res := false
switch cur := cur.(type) {
case cursors.IntegerArrayCursor:
a := cur.Next()
res = a.Len() > 0
case cursors.FloatArrayCursor:
a := cur.Next()
res = a.Len() > 0
case cursors.UnsignedArrayCursor:
a := cur.Next()
res = a.Len() > 0
case cursors.BooleanArrayCursor:
a := cur.Next()
res = a.Len() > 0
case cursors.StringArrayCursor:
a := cur.Next()
res = a.Len() > 0
default:
panic(fmt.Sprintf("unreachable: %T", cur))
}
cur.Close()
return res
} | [
"func",
"hasPoints",
"(",
"cur",
"cursors",
".",
"Cursor",
")",
"bool",
"{",
"if",
"cur",
"==",
"nil",
"{",
"return",
"false",
"\n",
"}",
"\n\n",
"res",
":=",
"false",
"\n",
"switch",
"cur",
":=",
"cur",
".",
"(",
"type",
")",
"{",
"case",
"cursors",
".",
"IntegerArrayCursor",
":",
"a",
":=",
"cur",
".",
"Next",
"(",
")",
"\n",
"res",
"=",
"a",
".",
"Len",
"(",
")",
">",
"0",
"\n",
"case",
"cursors",
".",
"FloatArrayCursor",
":",
"a",
":=",
"cur",
".",
"Next",
"(",
")",
"\n",
"res",
"=",
"a",
".",
"Len",
"(",
")",
">",
"0",
"\n",
"case",
"cursors",
".",
"UnsignedArrayCursor",
":",
"a",
":=",
"cur",
".",
"Next",
"(",
")",
"\n",
"res",
"=",
"a",
".",
"Len",
"(",
")",
">",
"0",
"\n",
"case",
"cursors",
".",
"BooleanArrayCursor",
":",
"a",
":=",
"cur",
".",
"Next",
"(",
")",
"\n",
"res",
"=",
"a",
".",
"Len",
"(",
")",
">",
"0",
"\n",
"case",
"cursors",
".",
"StringArrayCursor",
":",
"a",
":=",
"cur",
".",
"Next",
"(",
")",
"\n",
"res",
"=",
"a",
".",
"Len",
"(",
")",
">",
"0",
"\n",
"default",
":",
"panic",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"cur",
")",
")",
"\n",
"}",
"\n",
"cur",
".",
"Close",
"(",
")",
"\n",
"return",
"res",
"\n",
"}"
] | // hasPoints returns true if the next block from cur has data. If cur is not
// nil, it will be closed. | [
"hasPoints",
"returns",
"true",
"if",
"the",
"next",
"block",
"from",
"cur",
"has",
"data",
".",
"If",
"cur",
"is",
"not",
"nil",
"it",
"will",
"be",
"closed",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/storage/reads/table.go#L163-L190 |
12,868 | influxdata/platform | query/influxql/compiler.go | Compile | func (c *Compiler) Compile(ctx context.Context) (*flux.Spec, error) {
transpiler := NewTranspilerWithConfig(
c.dbrpMappingSvc,
Config{
Cluster: c.Cluster,
DefaultDatabase: c.DB,
DefaultRetentionPolicy: c.RP,
},
)
return transpiler.Transpile(ctx, c.Query)
} | go | func (c *Compiler) Compile(ctx context.Context) (*flux.Spec, error) {
transpiler := NewTranspilerWithConfig(
c.dbrpMappingSvc,
Config{
Cluster: c.Cluster,
DefaultDatabase: c.DB,
DefaultRetentionPolicy: c.RP,
},
)
return transpiler.Transpile(ctx, c.Query)
} | [
"func",
"(",
"c",
"*",
"Compiler",
")",
"Compile",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"*",
"flux",
".",
"Spec",
",",
"error",
")",
"{",
"transpiler",
":=",
"NewTranspilerWithConfig",
"(",
"c",
".",
"dbrpMappingSvc",
",",
"Config",
"{",
"Cluster",
":",
"c",
".",
"Cluster",
",",
"DefaultDatabase",
":",
"c",
".",
"DB",
",",
"DefaultRetentionPolicy",
":",
"c",
".",
"RP",
",",
"}",
",",
")",
"\n",
"return",
"transpiler",
".",
"Transpile",
"(",
"ctx",
",",
"c",
".",
"Query",
")",
"\n",
"}"
] | // Compile tranpiles the query into a specification. | [
"Compile",
"tranpiles",
"the",
"query",
"into",
"a",
"specification",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/query/influxql/compiler.go#L36-L46 |
12,869 | influxdata/platform | task/backend/inmem_store.go | DeleteUser | func (s *inmem) DeleteUser(ctx context.Context, id platform.ID) error {
return s.delete(ctx, id, getUser)
} | go | func (s *inmem) DeleteUser(ctx context.Context, id platform.ID) error {
return s.delete(ctx, id, getUser)
} | [
"func",
"(",
"s",
"*",
"inmem",
")",
"DeleteUser",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
")",
"error",
"{",
"return",
"s",
".",
"delete",
"(",
"ctx",
",",
"id",
",",
"getUser",
")",
"\n",
"}"
] | // DeleteUser synchronously deletes a user and all their tasks from a from an in-mem store store. | [
"DeleteUser",
"synchronously",
"deletes",
"a",
"user",
"and",
"all",
"their",
"tasks",
"from",
"a",
"from",
"an",
"in",
"-",
"mem",
"store",
"store",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/task/backend/inmem_store.go#L364-L366 |
12,870 | influxdata/platform | inmem/auth_service.go | CreateAuthorization | func (s *Service) CreateAuthorization(ctx context.Context, a *platform.Authorization) error {
op := OpPrefix + platform.OpCreateAuthorization
_, pErr := s.FindUserByID(ctx, a.UserID)
if pErr != nil {
return platform.ErrUnableToCreateToken
}
_, pErr = s.FindOrganizationByID(ctx, a.OrgID)
if pErr != nil {
return platform.ErrUnableToCreateToken
}
var err error
a.Token, err = s.TokenGenerator.Token()
if err != nil {
return &platform.Error{
Err: err,
Op: op,
}
}
a.ID = s.IDGenerator.ID()
a.Status = platform.Active
return s.PutAuthorization(ctx, a)
} | go | func (s *Service) CreateAuthorization(ctx context.Context, a *platform.Authorization) error {
op := OpPrefix + platform.OpCreateAuthorization
_, pErr := s.FindUserByID(ctx, a.UserID)
if pErr != nil {
return platform.ErrUnableToCreateToken
}
_, pErr = s.FindOrganizationByID(ctx, a.OrgID)
if pErr != nil {
return platform.ErrUnableToCreateToken
}
var err error
a.Token, err = s.TokenGenerator.Token()
if err != nil {
return &platform.Error{
Err: err,
Op: op,
}
}
a.ID = s.IDGenerator.ID()
a.Status = platform.Active
return s.PutAuthorization(ctx, a)
} | [
"func",
"(",
"s",
"*",
"Service",
")",
"CreateAuthorization",
"(",
"ctx",
"context",
".",
"Context",
",",
"a",
"*",
"platform",
".",
"Authorization",
")",
"error",
"{",
"op",
":=",
"OpPrefix",
"+",
"platform",
".",
"OpCreateAuthorization",
"\n\n",
"_",
",",
"pErr",
":=",
"s",
".",
"FindUserByID",
"(",
"ctx",
",",
"a",
".",
"UserID",
")",
"\n",
"if",
"pErr",
"!=",
"nil",
"{",
"return",
"platform",
".",
"ErrUnableToCreateToken",
"\n",
"}",
"\n\n",
"_",
",",
"pErr",
"=",
"s",
".",
"FindOrganizationByID",
"(",
"ctx",
",",
"a",
".",
"OrgID",
")",
"\n",
"if",
"pErr",
"!=",
"nil",
"{",
"return",
"platform",
".",
"ErrUnableToCreateToken",
"\n",
"}",
"\n\n",
"var",
"err",
"error",
"\n",
"a",
".",
"Token",
",",
"err",
"=",
"s",
".",
"TokenGenerator",
".",
"Token",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"op",
",",
"}",
"\n",
"}",
"\n\n",
"a",
".",
"ID",
"=",
"s",
".",
"IDGenerator",
".",
"ID",
"(",
")",
"\n",
"a",
".",
"Status",
"=",
"platform",
".",
"Active",
"\n\n",
"return",
"s",
".",
"PutAuthorization",
"(",
"ctx",
",",
"a",
")",
"\n",
"}"
] | // CreateAuthorization sets a.Token and a.ID and creates an platform.Authorization | [
"CreateAuthorization",
"sets",
"a",
".",
"Token",
"and",
"a",
".",
"ID",
"and",
"creates",
"an",
"platform",
".",
"Authorization"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/inmem/auth_service.go#L152-L178 |
12,871 | influxdata/platform | inmem/auth_service.go | SetAuthorizationStatus | func (s *Service) SetAuthorizationStatus(ctx context.Context, id platform.ID, status platform.Status) error {
op := OpPrefix + platform.OpSetAuthorizationStatus
a, err := s.FindAuthorizationByID(ctx, id)
if err != nil {
return &platform.Error{
Err: err,
Op: op,
}
}
switch status {
case platform.Active, platform.Inactive:
default:
return &platform.Error{
Code: platform.EInvalid,
Msg: "unknown authorization status",
Op: op,
}
}
if a.Status == status {
return nil
}
a.Status = status
return s.PutAuthorization(ctx, a)
} | go | func (s *Service) SetAuthorizationStatus(ctx context.Context, id platform.ID, status platform.Status) error {
op := OpPrefix + platform.OpSetAuthorizationStatus
a, err := s.FindAuthorizationByID(ctx, id)
if err != nil {
return &platform.Error{
Err: err,
Op: op,
}
}
switch status {
case platform.Active, platform.Inactive:
default:
return &platform.Error{
Code: platform.EInvalid,
Msg: "unknown authorization status",
Op: op,
}
}
if a.Status == status {
return nil
}
a.Status = status
return s.PutAuthorization(ctx, a)
} | [
"func",
"(",
"s",
"*",
"Service",
")",
"SetAuthorizationStatus",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
",",
"status",
"platform",
".",
"Status",
")",
"error",
"{",
"op",
":=",
"OpPrefix",
"+",
"platform",
".",
"OpSetAuthorizationStatus",
"\n",
"a",
",",
"err",
":=",
"s",
".",
"FindAuthorizationByID",
"(",
"ctx",
",",
"id",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"err",
",",
"Op",
":",
"op",
",",
"}",
"\n",
"}",
"\n\n",
"switch",
"status",
"{",
"case",
"platform",
".",
"Active",
",",
"platform",
".",
"Inactive",
":",
"default",
":",
"return",
"&",
"platform",
".",
"Error",
"{",
"Code",
":",
"platform",
".",
"EInvalid",
",",
"Msg",
":",
"\"",
"\"",
",",
"Op",
":",
"op",
",",
"}",
"\n",
"}",
"\n\n",
"if",
"a",
".",
"Status",
"==",
"status",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"a",
".",
"Status",
"=",
"status",
"\n",
"return",
"s",
".",
"PutAuthorization",
"(",
"ctx",
",",
"a",
")",
"\n",
"}"
] | // SetAuthorizationStatus updates the status of an authorization associated with id. | [
"SetAuthorizationStatus",
"updates",
"the",
"status",
"of",
"an",
"authorization",
"associated",
"with",
"id",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/inmem/auth_service.go#L194-L220 |
12,872 | influxdata/platform | http/macro_service.go | NewMacroHandler | func NewMacroHandler() *MacroHandler {
h := &MacroHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
}
h.HandlerFunc("GET", "/api/v2/macros", h.handleGetMacros)
h.HandlerFunc("POST", "/api/v2/macros", h.handlePostMacro)
h.HandlerFunc("GET", "/api/v2/macros/:id", h.handleGetMacro)
h.HandlerFunc("PATCH", "/api/v2/macros/:id", h.handlePatchMacro)
h.HandlerFunc("PUT", "/api/v2/macros/:id", h.handlePutMacro)
h.HandlerFunc("DELETE", "/api/v2/macros/:id", h.handleDeleteMacro)
return h
} | go | func NewMacroHandler() *MacroHandler {
h := &MacroHandler{
Router: NewRouter(),
Logger: zap.NewNop(),
}
h.HandlerFunc("GET", "/api/v2/macros", h.handleGetMacros)
h.HandlerFunc("POST", "/api/v2/macros", h.handlePostMacro)
h.HandlerFunc("GET", "/api/v2/macros/:id", h.handleGetMacro)
h.HandlerFunc("PATCH", "/api/v2/macros/:id", h.handlePatchMacro)
h.HandlerFunc("PUT", "/api/v2/macros/:id", h.handlePutMacro)
h.HandlerFunc("DELETE", "/api/v2/macros/:id", h.handleDeleteMacro)
return h
} | [
"func",
"NewMacroHandler",
"(",
")",
"*",
"MacroHandler",
"{",
"h",
":=",
"&",
"MacroHandler",
"{",
"Router",
":",
"NewRouter",
"(",
")",
",",
"Logger",
":",
"zap",
".",
"NewNop",
"(",
")",
",",
"}",
"\n\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"h",
".",
"handleGetMacros",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"h",
".",
"handlePostMacro",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"h",
".",
"handleGetMacro",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"h",
".",
"handlePatchMacro",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"h",
".",
"handlePutMacro",
")",
"\n",
"h",
".",
"HandlerFunc",
"(",
"\"",
"\"",
",",
"\"",
"\"",
",",
"h",
".",
"handleDeleteMacro",
")",
"\n\n",
"return",
"h",
"\n",
"}"
] | // NewMacroHandler creates a new MacroHandler | [
"NewMacroHandler",
"creates",
"a",
"new",
"MacroHandler"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/macro_service.go#L31-L45 |
12,873 | influxdata/platform | http/macro_service.go | FindMacroByID | func (s *MacroService) FindMacroByID(ctx context.Context, id platform.ID) (*platform.Macro, error) {
path := macroIDPath(id)
url, err := newURL(s.Addr, path)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return nil, err
}
if err := CheckError(resp, true); err != nil {
return nil, err
}
var mr macroResponse
if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
return nil, err
}
macro := mr.Macro
return macro, nil
} | go | func (s *MacroService) FindMacroByID(ctx context.Context, id platform.ID) (*platform.Macro, error) {
path := macroIDPath(id)
url, err := newURL(s.Addr, path)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return nil, err
}
if err := CheckError(resp, true); err != nil {
return nil, err
}
var mr macroResponse
if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
return nil, err
}
macro := mr.Macro
return macro, nil
} | [
"func",
"(",
"s",
"*",
"MacroService",
")",
"FindMacroByID",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
")",
"(",
"*",
"platform",
".",
"Macro",
",",
"error",
")",
"{",
"path",
":=",
"macroIDPath",
"(",
"id",
")",
"\n",
"url",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"path",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"req",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"url",
".",
"String",
"(",
")",
",",
"nil",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"req",
")",
"\n",
"hc",
":=",
"newClient",
"(",
"url",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"var",
"mr",
"macroResponse",
"\n",
"if",
"err",
":=",
"json",
".",
"NewDecoder",
"(",
"resp",
".",
"Body",
")",
".",
"Decode",
"(",
"&",
"mr",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"macro",
":=",
"mr",
".",
"Macro",
"\n",
"return",
"macro",
",",
"nil",
"\n",
"}"
] | // FindMacroByID finds a single macro from the store by its ID | [
"FindMacroByID",
"finds",
"a",
"single",
"macro",
"from",
"the",
"store",
"by",
"its",
"ID"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/macro_service.go#L333-L364 |
12,874 | influxdata/platform | http/macro_service.go | CreateMacro | func (s *MacroService) CreateMacro(ctx context.Context, m *platform.Macro) error {
if err := m.Valid(); err != nil {
return kerrors.InvalidDataf(err.Error())
}
url, err := newURL(s.Addr, macroPath)
if err != nil {
return err
}
octets, err := json.Marshal(m)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), bytes.NewReader(octets))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return err
}
if err := CheckError(resp, true); err != nil {
return err
}
return json.NewDecoder(resp.Body).Decode(m)
} | go | func (s *MacroService) CreateMacro(ctx context.Context, m *platform.Macro) error {
if err := m.Valid(); err != nil {
return kerrors.InvalidDataf(err.Error())
}
url, err := newURL(s.Addr, macroPath)
if err != nil {
return err
}
octets, err := json.Marshal(m)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), bytes.NewReader(octets))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(url.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return err
}
if err := CheckError(resp, true); err != nil {
return err
}
return json.NewDecoder(resp.Body).Decode(m)
} | [
"func",
"(",
"s",
"*",
"MacroService",
")",
"CreateMacro",
"(",
"ctx",
"context",
".",
"Context",
",",
"m",
"*",
"platform",
".",
"Macro",
")",
"error",
"{",
"if",
"err",
":=",
"m",
".",
"Valid",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"kerrors",
".",
"InvalidDataf",
"(",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n\n",
"url",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"macroPath",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"octets",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"m",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"req",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"url",
".",
"String",
"(",
")",
",",
"bytes",
".",
"NewReader",
"(",
"octets",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"req",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"req",
")",
"\n\n",
"hc",
":=",
"newClient",
"(",
"url",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"return",
"json",
".",
"NewDecoder",
"(",
"resp",
".",
"Body",
")",
".",
"Decode",
"(",
"m",
")",
"\n",
"}"
] | // CreateMacro creates a new macro and assigns it an platform.ID | [
"CreateMacro",
"creates",
"a",
"new",
"macro",
"and",
"assigns",
"it",
"an",
"platform",
".",
"ID"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/macro_service.go#L400-L435 |
12,875 | influxdata/platform | http/macro_service.go | UpdateMacro | func (s *MacroService) UpdateMacro(ctx context.Context, id platform.ID, update *platform.MacroUpdate) (*platform.Macro, error) {
u, err := newURL(s.Addr, macroIDPath(id))
if err != nil {
return nil, err
}
octets, err := json.Marshal(update)
if err != nil {
return nil, err
}
req, err := http.NewRequest("PATCH", u.String(), bytes.NewReader(octets))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return nil, err
}
if err := CheckError(resp, true); err != nil {
return nil, err
}
var m platform.Macro
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
return nil, err
}
defer resp.Body.Close()
return &m, nil
} | go | func (s *MacroService) UpdateMacro(ctx context.Context, id platform.ID, update *platform.MacroUpdate) (*platform.Macro, error) {
u, err := newURL(s.Addr, macroIDPath(id))
if err != nil {
return nil, err
}
octets, err := json.Marshal(update)
if err != nil {
return nil, err
}
req, err := http.NewRequest("PATCH", u.String(), bytes.NewReader(octets))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
SetToken(s.Token, req)
hc := newClient(u.Scheme, s.InsecureSkipVerify)
resp, err := hc.Do(req)
if err != nil {
return nil, err
}
if err := CheckError(resp, true); err != nil {
return nil, err
}
var m platform.Macro
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
return nil, err
}
defer resp.Body.Close()
return &m, nil
} | [
"func",
"(",
"s",
"*",
"MacroService",
")",
"UpdateMacro",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
"platform",
".",
"ID",
",",
"update",
"*",
"platform",
".",
"MacroUpdate",
")",
"(",
"*",
"platform",
".",
"Macro",
",",
"error",
")",
"{",
"u",
",",
"err",
":=",
"newURL",
"(",
"s",
".",
"Addr",
",",
"macroIDPath",
"(",
"id",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"octets",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"update",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"req",
",",
"err",
":=",
"http",
".",
"NewRequest",
"(",
"\"",
"\"",
",",
"u",
".",
"String",
"(",
")",
",",
"bytes",
".",
"NewReader",
"(",
"octets",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"req",
".",
"Header",
".",
"Set",
"(",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"SetToken",
"(",
"s",
".",
"Token",
",",
"req",
")",
"\n\n",
"hc",
":=",
"newClient",
"(",
"u",
".",
"Scheme",
",",
"s",
".",
"InsecureSkipVerify",
")",
"\n\n",
"resp",
",",
"err",
":=",
"hc",
".",
"Do",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"CheckError",
"(",
"resp",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"var",
"m",
"platform",
".",
"Macro",
"\n",
"if",
"err",
":=",
"json",
".",
"NewDecoder",
"(",
"resp",
".",
"Body",
")",
".",
"Decode",
"(",
"&",
"m",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"defer",
"resp",
".",
"Body",
".",
"Close",
"(",
")",
"\n\n",
"return",
"&",
"m",
",",
"nil",
"\n",
"}"
] | // UpdateMacro updates a single macro with a changeset | [
"UpdateMacro",
"updates",
"a",
"single",
"macro",
"with",
"a",
"changeset"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/http/macro_service.go#L438-L475 |
12,876 | influxdata/platform | tsdb/series_set.go | AndNot | func (s *SeriesIDSet) AndNot(other *SeriesIDSet) *SeriesIDSet {
s.RLock()
defer s.RUnlock()
other.RLock()
defer other.RUnlock()
return &SeriesIDSet{bitmap: roaring.AndNot(s.bitmap, other.bitmap)}
} | go | func (s *SeriesIDSet) AndNot(other *SeriesIDSet) *SeriesIDSet {
s.RLock()
defer s.RUnlock()
other.RLock()
defer other.RUnlock()
return &SeriesIDSet{bitmap: roaring.AndNot(s.bitmap, other.bitmap)}
} | [
"func",
"(",
"s",
"*",
"SeriesIDSet",
")",
"AndNot",
"(",
"other",
"*",
"SeriesIDSet",
")",
"*",
"SeriesIDSet",
"{",
"s",
".",
"RLock",
"(",
")",
"\n",
"defer",
"s",
".",
"RUnlock",
"(",
")",
"\n",
"other",
".",
"RLock",
"(",
")",
"\n",
"defer",
"other",
".",
"RUnlock",
"(",
")",
"\n\n",
"return",
"&",
"SeriesIDSet",
"{",
"bitmap",
":",
"roaring",
".",
"AndNot",
"(",
"s",
".",
"bitmap",
",",
"other",
".",
"bitmap",
")",
"}",
"\n",
"}"
] | // AndNot returns a new SeriesIDSet containing elements that were present in s,
// but not present in other. | [
"AndNot",
"returns",
"a",
"new",
"SeriesIDSet",
"containing",
"elements",
"that",
"were",
"present",
"in",
"s",
"but",
"not",
"present",
"in",
"other",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/series_set.go#L175-L182 |
12,877 | influxdata/platform | tsdb/tsm1/engine.go | NewEngine | func NewEngine(path string, idx *tsi1.Index, config Config, options ...EngineOption) *Engine {
fs := NewFileStore(path)
fs.openLimiter = limiter.NewFixed(config.MaxConcurrentOpens)
fs.tsmMMAPWillNeed = config.MADVWillNeed
cache := NewCache(uint64(config.Cache.MaxMemorySize))
c := NewCompactor()
c.Dir = path
c.FileStore = fs
c.RateLimit = limiter.NewRate(
int(config.Compaction.Throughput),
int(config.Compaction.ThroughputBurst))
// determine max concurrent compactions informed by the system
maxCompactions := config.Compaction.MaxConcurrent
if maxCompactions == 0 {
maxCompactions = runtime.GOMAXPROCS(0) / 2 // Default to 50% of cores for compactions
// On systems with more cores, cap at 4 to reduce disk utilization.
if maxCompactions > 4 {
maxCompactions = 4
}
if maxCompactions < 1 {
maxCompactions = 1
}
}
// Don't allow more compactions to run than cores.
if maxCompactions > runtime.GOMAXPROCS(0) {
maxCompactions = runtime.GOMAXPROCS(0)
}
logger := zap.NewNop()
e := &Engine{
path: path,
index: idx,
sfile: idx.SeriesFile(),
logger: logger,
traceLogger: logger,
WAL: NopWAL{},
Cache: cache,
FileStore: fs,
Compactor: c,
CompactionPlan: NewDefaultPlanner(fs,
time.Duration(config.Compaction.FullWriteColdDuration)),
CacheFlushMemorySizeThreshold: uint64(config.Cache.SnapshotMemorySize),
CacheFlushWriteColdDuration: time.Duration(config.Cache.SnapshotWriteColdDuration),
enableCompactionsOnOpen: true,
formatFileName: DefaultFormatFileName,
compactionLimiter: limiter.NewFixed(maxCompactions),
scheduler: newScheduler(maxCompactions),
}
for _, option := range options {
option(e)
}
return e
} | go | func NewEngine(path string, idx *tsi1.Index, config Config, options ...EngineOption) *Engine {
fs := NewFileStore(path)
fs.openLimiter = limiter.NewFixed(config.MaxConcurrentOpens)
fs.tsmMMAPWillNeed = config.MADVWillNeed
cache := NewCache(uint64(config.Cache.MaxMemorySize))
c := NewCompactor()
c.Dir = path
c.FileStore = fs
c.RateLimit = limiter.NewRate(
int(config.Compaction.Throughput),
int(config.Compaction.ThroughputBurst))
// determine max concurrent compactions informed by the system
maxCompactions := config.Compaction.MaxConcurrent
if maxCompactions == 0 {
maxCompactions = runtime.GOMAXPROCS(0) / 2 // Default to 50% of cores for compactions
// On systems with more cores, cap at 4 to reduce disk utilization.
if maxCompactions > 4 {
maxCompactions = 4
}
if maxCompactions < 1 {
maxCompactions = 1
}
}
// Don't allow more compactions to run than cores.
if maxCompactions > runtime.GOMAXPROCS(0) {
maxCompactions = runtime.GOMAXPROCS(0)
}
logger := zap.NewNop()
e := &Engine{
path: path,
index: idx,
sfile: idx.SeriesFile(),
logger: logger,
traceLogger: logger,
WAL: NopWAL{},
Cache: cache,
FileStore: fs,
Compactor: c,
CompactionPlan: NewDefaultPlanner(fs,
time.Duration(config.Compaction.FullWriteColdDuration)),
CacheFlushMemorySizeThreshold: uint64(config.Cache.SnapshotMemorySize),
CacheFlushWriteColdDuration: time.Duration(config.Cache.SnapshotWriteColdDuration),
enableCompactionsOnOpen: true,
formatFileName: DefaultFormatFileName,
compactionLimiter: limiter.NewFixed(maxCompactions),
scheduler: newScheduler(maxCompactions),
}
for _, option := range options {
option(e)
}
return e
} | [
"func",
"NewEngine",
"(",
"path",
"string",
",",
"idx",
"*",
"tsi1",
".",
"Index",
",",
"config",
"Config",
",",
"options",
"...",
"EngineOption",
")",
"*",
"Engine",
"{",
"fs",
":=",
"NewFileStore",
"(",
"path",
")",
"\n",
"fs",
".",
"openLimiter",
"=",
"limiter",
".",
"NewFixed",
"(",
"config",
".",
"MaxConcurrentOpens",
")",
"\n",
"fs",
".",
"tsmMMAPWillNeed",
"=",
"config",
".",
"MADVWillNeed",
"\n\n",
"cache",
":=",
"NewCache",
"(",
"uint64",
"(",
"config",
".",
"Cache",
".",
"MaxMemorySize",
")",
")",
"\n\n",
"c",
":=",
"NewCompactor",
"(",
")",
"\n",
"c",
".",
"Dir",
"=",
"path",
"\n",
"c",
".",
"FileStore",
"=",
"fs",
"\n",
"c",
".",
"RateLimit",
"=",
"limiter",
".",
"NewRate",
"(",
"int",
"(",
"config",
".",
"Compaction",
".",
"Throughput",
")",
",",
"int",
"(",
"config",
".",
"Compaction",
".",
"ThroughputBurst",
")",
")",
"\n\n",
"// determine max concurrent compactions informed by the system",
"maxCompactions",
":=",
"config",
".",
"Compaction",
".",
"MaxConcurrent",
"\n",
"if",
"maxCompactions",
"==",
"0",
"{",
"maxCompactions",
"=",
"runtime",
".",
"GOMAXPROCS",
"(",
"0",
")",
"/",
"2",
"// Default to 50% of cores for compactions",
"\n\n",
"// On systems with more cores, cap at 4 to reduce disk utilization.",
"if",
"maxCompactions",
">",
"4",
"{",
"maxCompactions",
"=",
"4",
"\n",
"}",
"\n\n",
"if",
"maxCompactions",
"<",
"1",
"{",
"maxCompactions",
"=",
"1",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Don't allow more compactions to run than cores.",
"if",
"maxCompactions",
">",
"runtime",
".",
"GOMAXPROCS",
"(",
"0",
")",
"{",
"maxCompactions",
"=",
"runtime",
".",
"GOMAXPROCS",
"(",
"0",
")",
"\n",
"}",
"\n\n",
"logger",
":=",
"zap",
".",
"NewNop",
"(",
")",
"\n",
"e",
":=",
"&",
"Engine",
"{",
"path",
":",
"path",
",",
"index",
":",
"idx",
",",
"sfile",
":",
"idx",
".",
"SeriesFile",
"(",
")",
",",
"logger",
":",
"logger",
",",
"traceLogger",
":",
"logger",
",",
"WAL",
":",
"NopWAL",
"{",
"}",
",",
"Cache",
":",
"cache",
",",
"FileStore",
":",
"fs",
",",
"Compactor",
":",
"c",
",",
"CompactionPlan",
":",
"NewDefaultPlanner",
"(",
"fs",
",",
"time",
".",
"Duration",
"(",
"config",
".",
"Compaction",
".",
"FullWriteColdDuration",
")",
")",
",",
"CacheFlushMemorySizeThreshold",
":",
"uint64",
"(",
"config",
".",
"Cache",
".",
"SnapshotMemorySize",
")",
",",
"CacheFlushWriteColdDuration",
":",
"time",
".",
"Duration",
"(",
"config",
".",
"Cache",
".",
"SnapshotWriteColdDuration",
")",
",",
"enableCompactionsOnOpen",
":",
"true",
",",
"formatFileName",
":",
"DefaultFormatFileName",
",",
"compactionLimiter",
":",
"limiter",
".",
"NewFixed",
"(",
"maxCompactions",
")",
",",
"scheduler",
":",
"newScheduler",
"(",
"maxCompactions",
")",
",",
"}",
"\n\n",
"for",
"_",
",",
"option",
":=",
"range",
"options",
"{",
"option",
"(",
"e",
")",
"\n",
"}",
"\n\n",
"return",
"e",
"\n",
"}"
] | // NewEngine returns a new instance of Engine. | [
"NewEngine",
"returns",
"a",
"new",
"instance",
"of",
"Engine",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L168-L231 |
12,878 | influxdata/platform | tsdb/tsm1/engine.go | LastModified | func (e *Engine) LastModified() time.Time {
fsTime := e.FileStore.LastModified()
if e.WAL.LastWriteTime().After(fsTime) {
return e.WAL.LastWriteTime()
}
return fsTime
} | go | func (e *Engine) LastModified() time.Time {
fsTime := e.FileStore.LastModified()
if e.WAL.LastWriteTime().After(fsTime) {
return e.WAL.LastWriteTime()
}
return fsTime
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"LastModified",
"(",
")",
"time",
".",
"Time",
"{",
"fsTime",
":=",
"e",
".",
"FileStore",
".",
"LastModified",
"(",
")",
"\n\n",
"if",
"e",
".",
"WAL",
".",
"LastWriteTime",
"(",
")",
".",
"After",
"(",
"fsTime",
")",
"{",
"return",
"e",
".",
"WAL",
".",
"LastWriteTime",
"(",
")",
"\n",
"}",
"\n",
"return",
"fsTime",
"\n",
"}"
] | // LastModified returns the time when this shard was last modified. | [
"LastModified",
"returns",
"the",
"time",
"when",
"this",
"shard",
"was",
"last",
"modified",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L481-L488 |
12,879 | influxdata/platform | tsdb/tsm1/engine.go | DiskSize | func (e *Engine) DiskSize() int64 {
walDiskSizeBytes := e.WAL.DiskSizeBytes()
return e.FileStore.DiskSizeBytes() + walDiskSizeBytes
} | go | func (e *Engine) DiskSize() int64 {
walDiskSizeBytes := e.WAL.DiskSizeBytes()
return e.FileStore.DiskSizeBytes() + walDiskSizeBytes
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"DiskSize",
"(",
")",
"int64",
"{",
"walDiskSizeBytes",
":=",
"e",
".",
"WAL",
".",
"DiskSizeBytes",
"(",
")",
"\n",
"return",
"e",
".",
"FileStore",
".",
"DiskSizeBytes",
"(",
")",
"+",
"walDiskSizeBytes",
"\n",
"}"
] | // DiskSize returns the total size in bytes of all TSM and WAL segments on disk. | [
"DiskSize",
"returns",
"the",
"total",
"size",
"in",
"bytes",
"of",
"all",
"TSM",
"and",
"WAL",
"segments",
"on",
"disk",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L496-L499 |
12,880 | influxdata/platform | tsdb/tsm1/engine.go | Free | func (e *Engine) Free() error {
e.Cache.Free()
return e.FileStore.Free()
} | go | func (e *Engine) Free() error {
e.Cache.Free()
return e.FileStore.Free()
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"Free",
"(",
")",
"error",
"{",
"e",
".",
"Cache",
".",
"Free",
"(",
")",
"\n",
"return",
"e",
".",
"FileStore",
".",
"Free",
"(",
")",
"\n",
"}"
] | // Free releases any resources held by the engine to free up memory or CPU. | [
"Free",
"releases",
"any",
"resources",
"held",
"by",
"the",
"engine",
"to",
"free",
"up",
"memory",
"or",
"CPU",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L593-L596 |
12,881 | influxdata/platform | tsdb/tsm1/engine.go | WritePoints | func (e *Engine) WritePoints(points []models.Point) error {
values := make(map[string][]Value, len(points))
var (
keyBuf []byte
baseLen int
)
for _, p := range points {
keyBuf = append(keyBuf[:0], p.Key()...)
keyBuf = append(keyBuf, keyFieldSeparator...)
baseLen = len(keyBuf)
iter := p.FieldIterator()
t := p.Time().UnixNano()
for iter.Next() {
keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)
var v Value
switch iter.Type() {
case models.Float:
fv, err := iter.FloatValue()
if err != nil {
return err
}
v = NewFloatValue(t, fv)
case models.Integer:
iv, err := iter.IntegerValue()
if err != nil {
return err
}
v = NewIntegerValue(t, iv)
case models.Unsigned:
iv, err := iter.UnsignedValue()
if err != nil {
return err
}
v = NewUnsignedValue(t, iv)
case models.String:
v = NewStringValue(t, iter.StringValue())
case models.Boolean:
bv, err := iter.BooleanValue()
if err != nil {
return err
}
v = NewBooleanValue(t, bv)
default:
return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String())
}
values[string(keyBuf)] = append(values[string(keyBuf)], v)
}
}
e.mu.RLock()
defer e.mu.RUnlock()
// first try to write to the cache
if err := e.Cache.WriteMulti(values); err != nil {
return err
}
// Then make the write durable in the cache.
if _, err := e.WAL.WriteMulti(values); err != nil {
return err
}
return nil
} | go | func (e *Engine) WritePoints(points []models.Point) error {
values := make(map[string][]Value, len(points))
var (
keyBuf []byte
baseLen int
)
for _, p := range points {
keyBuf = append(keyBuf[:0], p.Key()...)
keyBuf = append(keyBuf, keyFieldSeparator...)
baseLen = len(keyBuf)
iter := p.FieldIterator()
t := p.Time().UnixNano()
for iter.Next() {
keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)
var v Value
switch iter.Type() {
case models.Float:
fv, err := iter.FloatValue()
if err != nil {
return err
}
v = NewFloatValue(t, fv)
case models.Integer:
iv, err := iter.IntegerValue()
if err != nil {
return err
}
v = NewIntegerValue(t, iv)
case models.Unsigned:
iv, err := iter.UnsignedValue()
if err != nil {
return err
}
v = NewUnsignedValue(t, iv)
case models.String:
v = NewStringValue(t, iter.StringValue())
case models.Boolean:
bv, err := iter.BooleanValue()
if err != nil {
return err
}
v = NewBooleanValue(t, bv)
default:
return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String())
}
values[string(keyBuf)] = append(values[string(keyBuf)], v)
}
}
e.mu.RLock()
defer e.mu.RUnlock()
// first try to write to the cache
if err := e.Cache.WriteMulti(values); err != nil {
return err
}
// Then make the write durable in the cache.
if _, err := e.WAL.WriteMulti(values); err != nil {
return err
}
return nil
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"WritePoints",
"(",
"points",
"[",
"]",
"models",
".",
"Point",
")",
"error",
"{",
"values",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"[",
"]",
"Value",
",",
"len",
"(",
"points",
")",
")",
"\n",
"var",
"(",
"keyBuf",
"[",
"]",
"byte",
"\n",
"baseLen",
"int",
"\n",
")",
"\n\n",
"for",
"_",
",",
"p",
":=",
"range",
"points",
"{",
"keyBuf",
"=",
"append",
"(",
"keyBuf",
"[",
":",
"0",
"]",
",",
"p",
".",
"Key",
"(",
")",
"...",
")",
"\n",
"keyBuf",
"=",
"append",
"(",
"keyBuf",
",",
"keyFieldSeparator",
"...",
")",
"\n",
"baseLen",
"=",
"len",
"(",
"keyBuf",
")",
"\n",
"iter",
":=",
"p",
".",
"FieldIterator",
"(",
")",
"\n",
"t",
":=",
"p",
".",
"Time",
"(",
")",
".",
"UnixNano",
"(",
")",
"\n",
"for",
"iter",
".",
"Next",
"(",
")",
"{",
"keyBuf",
"=",
"append",
"(",
"keyBuf",
"[",
":",
"baseLen",
"]",
",",
"iter",
".",
"FieldKey",
"(",
")",
"...",
")",
"\n\n",
"var",
"v",
"Value",
"\n",
"switch",
"iter",
".",
"Type",
"(",
")",
"{",
"case",
"models",
".",
"Float",
":",
"fv",
",",
"err",
":=",
"iter",
".",
"FloatValue",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"v",
"=",
"NewFloatValue",
"(",
"t",
",",
"fv",
")",
"\n",
"case",
"models",
".",
"Integer",
":",
"iv",
",",
"err",
":=",
"iter",
".",
"IntegerValue",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"v",
"=",
"NewIntegerValue",
"(",
"t",
",",
"iv",
")",
"\n",
"case",
"models",
".",
"Unsigned",
":",
"iv",
",",
"err",
":=",
"iter",
".",
"UnsignedValue",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"v",
"=",
"NewUnsignedValue",
"(",
"t",
",",
"iv",
")",
"\n",
"case",
"models",
".",
"String",
":",
"v",
"=",
"NewStringValue",
"(",
"t",
",",
"iter",
".",
"StringValue",
"(",
")",
")",
"\n",
"case",
"models",
".",
"Boolean",
":",
"bv",
",",
"err",
":=",
"iter",
".",
"BooleanValue",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"v",
"=",
"NewBooleanValue",
"(",
"t",
",",
"bv",
")",
"\n",
"default",
":",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"string",
"(",
"iter",
".",
"FieldKey",
"(",
")",
")",
",",
"p",
".",
"String",
"(",
")",
")",
"\n",
"}",
"\n",
"values",
"[",
"string",
"(",
"keyBuf",
")",
"]",
"=",
"append",
"(",
"values",
"[",
"string",
"(",
"keyBuf",
")",
"]",
",",
"v",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"e",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"e",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n\n",
"// first try to write to the cache",
"if",
"err",
":=",
"e",
".",
"Cache",
".",
"WriteMulti",
"(",
"values",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Then make the write durable in the cache.",
"if",
"_",
",",
"err",
":=",
"e",
".",
"WAL",
".",
"WriteMulti",
"(",
"values",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // WritePoints writes metadata and point data into the engine.
// It returns an error if new points are added to an existing key. | [
"WritePoints",
"writes",
"metadata",
"and",
"point",
"data",
"into",
"the",
"engine",
".",
"It",
"returns",
"an",
"error",
"if",
"new",
"points",
"are",
"added",
"to",
"an",
"existing",
"key",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L600-L665 |
12,882 | influxdata/platform | tsdb/tsm1/engine.go | Attempted | func (t *compactionTracker) Attempted(level compactionLevel, success bool, duration time.Duration) {
if success {
atomic.AddUint64(&t.ok[level], 1)
labels := t.Labels(level)
t.metrics.CompactionDuration.With(labels).Observe(duration.Seconds())
labels["status"] = "ok"
t.metrics.Compactions.With(labels).Inc()
return
}
atomic.AddUint64(&t.errors[level], 1)
labels := t.Labels(level)
labels["status"] = "error"
t.metrics.Compactions.With(labels).Inc()
} | go | func (t *compactionTracker) Attempted(level compactionLevel, success bool, duration time.Duration) {
if success {
atomic.AddUint64(&t.ok[level], 1)
labels := t.Labels(level)
t.metrics.CompactionDuration.With(labels).Observe(duration.Seconds())
labels["status"] = "ok"
t.metrics.Compactions.With(labels).Inc()
return
}
atomic.AddUint64(&t.errors[level], 1)
labels := t.Labels(level)
labels["status"] = "error"
t.metrics.Compactions.With(labels).Inc()
} | [
"func",
"(",
"t",
"*",
"compactionTracker",
")",
"Attempted",
"(",
"level",
"compactionLevel",
",",
"success",
"bool",
",",
"duration",
"time",
".",
"Duration",
")",
"{",
"if",
"success",
"{",
"atomic",
".",
"AddUint64",
"(",
"&",
"t",
".",
"ok",
"[",
"level",
"]",
",",
"1",
")",
"\n\n",
"labels",
":=",
"t",
".",
"Labels",
"(",
"level",
")",
"\n\n",
"t",
".",
"metrics",
".",
"CompactionDuration",
".",
"With",
"(",
"labels",
")",
".",
"Observe",
"(",
"duration",
".",
"Seconds",
"(",
")",
")",
"\n\n",
"labels",
"[",
"\"",
"\"",
"]",
"=",
"\"",
"\"",
"\n",
"t",
".",
"metrics",
".",
"Compactions",
".",
"With",
"(",
"labels",
")",
".",
"Inc",
"(",
")",
"\n\n",
"return",
"\n",
"}",
"\n\n",
"atomic",
".",
"AddUint64",
"(",
"&",
"t",
".",
"errors",
"[",
"level",
"]",
",",
"1",
")",
"\n\n",
"labels",
":=",
"t",
".",
"Labels",
"(",
"level",
")",
"\n",
"labels",
"[",
"\"",
"\"",
"]",
"=",
"\"",
"\"",
"\n",
"t",
".",
"metrics",
".",
"Compactions",
".",
"With",
"(",
"labels",
")",
".",
"Inc",
"(",
")",
"\n",
"}"
] | // Attempted updates the number of compactions attempted for the provided level. | [
"Attempted",
"updates",
"the",
"number",
"of",
"compactions",
"attempted",
"for",
"the",
"provided",
"level",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L1141-L1160 |
12,883 | influxdata/platform | tsdb/tsm1/engine.go | compactCache | func (e *Engine) compactCache() {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
e.mu.RLock()
quit := e.snapDone
e.mu.RUnlock()
select {
case <-quit:
return
case <-t.C:
e.Cache.UpdateAge()
if e.ShouldCompactCache(time.Now()) {
start := time.Now()
e.traceLogger.Info("Compacting cache", zap.String("path", e.path))
err := e.WriteSnapshot()
if err != nil && err != errCompactionsDisabled {
e.logger.Info("Error writing snapshot", zap.Error(err))
}
e.compactionTracker.SnapshotAttempted(err == nil || err == errCompactionsDisabled, time.Since(start))
}
}
}
} | go | func (e *Engine) compactCache() {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
e.mu.RLock()
quit := e.snapDone
e.mu.RUnlock()
select {
case <-quit:
return
case <-t.C:
e.Cache.UpdateAge()
if e.ShouldCompactCache(time.Now()) {
start := time.Now()
e.traceLogger.Info("Compacting cache", zap.String("path", e.path))
err := e.WriteSnapshot()
if err != nil && err != errCompactionsDisabled {
e.logger.Info("Error writing snapshot", zap.Error(err))
}
e.compactionTracker.SnapshotAttempted(err == nil || err == errCompactionsDisabled, time.Since(start))
}
}
}
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"compactCache",
"(",
")",
"{",
"t",
":=",
"time",
".",
"NewTicker",
"(",
"time",
".",
"Second",
")",
"\n",
"defer",
"t",
".",
"Stop",
"(",
")",
"\n",
"for",
"{",
"e",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"quit",
":=",
"e",
".",
"snapDone",
"\n",
"e",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n\n",
"select",
"{",
"case",
"<-",
"quit",
":",
"return",
"\n\n",
"case",
"<-",
"t",
".",
"C",
":",
"e",
".",
"Cache",
".",
"UpdateAge",
"(",
")",
"\n",
"if",
"e",
".",
"ShouldCompactCache",
"(",
"time",
".",
"Now",
"(",
")",
")",
"{",
"start",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"e",
".",
"traceLogger",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"e",
".",
"path",
")",
")",
"\n",
"err",
":=",
"e",
".",
"WriteSnapshot",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"&&",
"err",
"!=",
"errCompactionsDisabled",
"{",
"e",
".",
"logger",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Error",
"(",
"err",
")",
")",
"\n",
"}",
"\n",
"e",
".",
"compactionTracker",
".",
"SnapshotAttempted",
"(",
"err",
"==",
"nil",
"||",
"err",
"==",
"errCompactionsDisabled",
",",
"time",
".",
"Since",
"(",
"start",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // compactCache continually checks if the WAL cache should be written to disk. | [
"compactCache",
"continually",
"checks",
"if",
"the",
"WAL",
"cache",
"should",
"be",
"written",
"to",
"disk",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L1270-L1295 |
12,884 | influxdata/platform | tsdb/tsm1/engine.go | ShouldCompactCache | func (e *Engine) ShouldCompactCache(t time.Time) bool {
sz := e.Cache.Size()
if sz == 0 {
return false
}
if sz > e.CacheFlushMemorySizeThreshold {
return true
}
return t.Sub(e.Cache.LastWriteTime()) > e.CacheFlushWriteColdDuration
} | go | func (e *Engine) ShouldCompactCache(t time.Time) bool {
sz := e.Cache.Size()
if sz == 0 {
return false
}
if sz > e.CacheFlushMemorySizeThreshold {
return true
}
return t.Sub(e.Cache.LastWriteTime()) > e.CacheFlushWriteColdDuration
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"ShouldCompactCache",
"(",
"t",
"time",
".",
"Time",
")",
"bool",
"{",
"sz",
":=",
"e",
".",
"Cache",
".",
"Size",
"(",
")",
"\n\n",
"if",
"sz",
"==",
"0",
"{",
"return",
"false",
"\n",
"}",
"\n\n",
"if",
"sz",
">",
"e",
".",
"CacheFlushMemorySizeThreshold",
"{",
"return",
"true",
"\n",
"}",
"\n\n",
"return",
"t",
".",
"Sub",
"(",
"e",
".",
"Cache",
".",
"LastWriteTime",
"(",
")",
")",
">",
"e",
".",
"CacheFlushWriteColdDuration",
"\n",
"}"
] | // ShouldCompactCache returns true if the Cache is over its flush threshold
// or if the passed in lastWriteTime is older than the write cold threshold. | [
"ShouldCompactCache",
"returns",
"true",
"if",
"the",
"Cache",
"is",
"over",
"its",
"flush",
"threshold",
"or",
"if",
"the",
"passed",
"in",
"lastWriteTime",
"is",
"older",
"than",
"the",
"write",
"cold",
"threshold",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L1299-L1311 |
12,885 | influxdata/platform | tsdb/tsm1/engine.go | compactHiPriorityLevel | func (e *Engine) compactHiPriorityLevel(grp CompactionGroup, level compactionLevel, fast bool, wg *sync.WaitGroup) bool {
s := e.levelCompactionStrategy(grp, fast, level)
if s == nil {
return false
}
// Try hi priority limiter, otherwise steal a little from the low priority if we can.
if e.compactionLimiter.TryTake() {
e.compactionTracker.IncActive(level)
wg.Add(1)
go func() {
defer wg.Done()
defer e.compactionTracker.DecActive(level)
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
// Return the unused plans
return false
} | go | func (e *Engine) compactHiPriorityLevel(grp CompactionGroup, level compactionLevel, fast bool, wg *sync.WaitGroup) bool {
s := e.levelCompactionStrategy(grp, fast, level)
if s == nil {
return false
}
// Try hi priority limiter, otherwise steal a little from the low priority if we can.
if e.compactionLimiter.TryTake() {
e.compactionTracker.IncActive(level)
wg.Add(1)
go func() {
defer wg.Done()
defer e.compactionTracker.DecActive(level)
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
// Return the unused plans
return false
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"compactHiPriorityLevel",
"(",
"grp",
"CompactionGroup",
",",
"level",
"compactionLevel",
",",
"fast",
"bool",
",",
"wg",
"*",
"sync",
".",
"WaitGroup",
")",
"bool",
"{",
"s",
":=",
"e",
".",
"levelCompactionStrategy",
"(",
"grp",
",",
"fast",
",",
"level",
")",
"\n",
"if",
"s",
"==",
"nil",
"{",
"return",
"false",
"\n",
"}",
"\n\n",
"// Try hi priority limiter, otherwise steal a little from the low priority if we can.",
"if",
"e",
".",
"compactionLimiter",
".",
"TryTake",
"(",
")",
"{",
"e",
".",
"compactionTracker",
".",
"IncActive",
"(",
"level",
")",
"\n\n",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"func",
"(",
")",
"{",
"defer",
"wg",
".",
"Done",
"(",
")",
"\n",
"defer",
"e",
".",
"compactionTracker",
".",
"DecActive",
"(",
"level",
")",
"\n",
"defer",
"e",
".",
"compactionLimiter",
".",
"Release",
"(",
")",
"\n",
"s",
".",
"Apply",
"(",
")",
"\n",
"// Release the files in the compaction plan",
"e",
".",
"CompactionPlan",
".",
"Release",
"(",
"[",
"]",
"CompactionGroup",
"{",
"s",
".",
"group",
"}",
")",
"\n",
"}",
"(",
")",
"\n",
"return",
"true",
"\n",
"}",
"\n\n",
"// Return the unused plans",
"return",
"false",
"\n",
"}"
] | // compactHiPriorityLevel kicks off compactions using the high priority policy. It returns
// true if the compaction was started | [
"compactHiPriorityLevel",
"kicks",
"off",
"compactions",
"using",
"the",
"high",
"priority",
"policy",
".",
"It",
"returns",
"true",
"if",
"the",
"compaction",
"was",
"started"
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L1385-L1409 |
12,886 | influxdata/platform | tsdb/tsm1/engine.go | compactFull | func (e *Engine) compactFull(grp CompactionGroup, wg *sync.WaitGroup) bool {
s := e.fullCompactionStrategy(grp, false)
if s == nil {
return false
}
// Try the lo priority limiter, otherwise steal a little from the high priority if we can.
if e.compactionLimiter.TryTake() {
e.compactionTracker.IncFullActive()
wg.Add(1)
go func() {
defer wg.Done()
defer e.compactionTracker.DecFullActive()
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
return false
} | go | func (e *Engine) compactFull(grp CompactionGroup, wg *sync.WaitGroup) bool {
s := e.fullCompactionStrategy(grp, false)
if s == nil {
return false
}
// Try the lo priority limiter, otherwise steal a little from the high priority if we can.
if e.compactionLimiter.TryTake() {
e.compactionTracker.IncFullActive()
wg.Add(1)
go func() {
defer wg.Done()
defer e.compactionTracker.DecFullActive()
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
return false
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"compactFull",
"(",
"grp",
"CompactionGroup",
",",
"wg",
"*",
"sync",
".",
"WaitGroup",
")",
"bool",
"{",
"s",
":=",
"e",
".",
"fullCompactionStrategy",
"(",
"grp",
",",
"false",
")",
"\n",
"if",
"s",
"==",
"nil",
"{",
"return",
"false",
"\n",
"}",
"\n\n",
"// Try the lo priority limiter, otherwise steal a little from the high priority if we can.",
"if",
"e",
".",
"compactionLimiter",
".",
"TryTake",
"(",
")",
"{",
"e",
".",
"compactionTracker",
".",
"IncFullActive",
"(",
")",
"\n",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"func",
"(",
")",
"{",
"defer",
"wg",
".",
"Done",
"(",
")",
"\n",
"defer",
"e",
".",
"compactionTracker",
".",
"DecFullActive",
"(",
")",
"\n",
"defer",
"e",
".",
"compactionLimiter",
".",
"Release",
"(",
")",
"\n",
"s",
".",
"Apply",
"(",
")",
"\n",
"// Release the files in the compaction plan",
"e",
".",
"CompactionPlan",
".",
"Release",
"(",
"[",
"]",
"CompactionGroup",
"{",
"s",
".",
"group",
"}",
")",
"\n",
"}",
"(",
")",
"\n",
"return",
"true",
"\n",
"}",
"\n",
"return",
"false",
"\n",
"}"
] | // compactFull kicks off full and optimize compactions using the lo priority policy. It returns
// the plans that were not able to be started. | [
"compactFull",
"kicks",
"off",
"full",
"and",
"optimize",
"compactions",
"using",
"the",
"lo",
"priority",
"policy",
".",
"It",
"returns",
"the",
"plans",
"that",
"were",
"not",
"able",
"to",
"be",
"started",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L1438-L1459 |
12,887 | influxdata/platform | tsdb/tsm1/engine.go | compactGroup | func (s *compactionStrategy) compactGroup() {
now := time.Now()
group := s.group
log, logEnd := logger.NewOperation(s.logger, "TSM compaction", "tsm1_compact_group")
defer logEnd()
log.Info("Beginning compaction", zap.Int("tsm1_files_n", len(group)))
for i, f := range group {
log.Info("Compacting file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
var (
err error
files []string
)
if s.fast {
files, err = s.compactor.CompactFast(group)
} else {
files, err = s.compactor.CompactFull(group)
}
if err != nil {
_, inProgress := err.(errCompactionInProgress)
if err == errCompactionsDisabled || inProgress {
log.Info("Aborted compaction", zap.Error(err))
if _, ok := err.(errCompactionInProgress); ok {
time.Sleep(time.Second)
}
return
}
log.Info("Error compacting TSM files", zap.Error(err))
s.tracker.Attempted(s.level, false, 0)
time.Sleep(time.Second)
return
}
if err := s.fileStore.ReplaceWithCallback(group, files, nil); err != nil {
log.Info("Error replacing new TSM files", zap.Error(err))
s.tracker.Attempted(s.level, false, 0)
time.Sleep(time.Second)
return
}
for i, f := range files {
log.Info("Compacted file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
log.Info("Finished compacting files", zap.Int("tsm1_files_n", len(files)))
s.tracker.Attempted(s.level, true, time.Since(now))
} | go | func (s *compactionStrategy) compactGroup() {
now := time.Now()
group := s.group
log, logEnd := logger.NewOperation(s.logger, "TSM compaction", "tsm1_compact_group")
defer logEnd()
log.Info("Beginning compaction", zap.Int("tsm1_files_n", len(group)))
for i, f := range group {
log.Info("Compacting file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
var (
err error
files []string
)
if s.fast {
files, err = s.compactor.CompactFast(group)
} else {
files, err = s.compactor.CompactFull(group)
}
if err != nil {
_, inProgress := err.(errCompactionInProgress)
if err == errCompactionsDisabled || inProgress {
log.Info("Aborted compaction", zap.Error(err))
if _, ok := err.(errCompactionInProgress); ok {
time.Sleep(time.Second)
}
return
}
log.Info("Error compacting TSM files", zap.Error(err))
s.tracker.Attempted(s.level, false, 0)
time.Sleep(time.Second)
return
}
if err := s.fileStore.ReplaceWithCallback(group, files, nil); err != nil {
log.Info("Error replacing new TSM files", zap.Error(err))
s.tracker.Attempted(s.level, false, 0)
time.Sleep(time.Second)
return
}
for i, f := range files {
log.Info("Compacted file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
log.Info("Finished compacting files", zap.Int("tsm1_files_n", len(files)))
s.tracker.Attempted(s.level, true, time.Since(now))
} | [
"func",
"(",
"s",
"*",
"compactionStrategy",
")",
"compactGroup",
"(",
")",
"{",
"now",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"group",
":=",
"s",
".",
"group",
"\n",
"log",
",",
"logEnd",
":=",
"logger",
".",
"NewOperation",
"(",
"s",
".",
"logger",
",",
"\"",
"\"",
",",
"\"",
"\"",
")",
"\n",
"defer",
"logEnd",
"(",
")",
"\n\n",
"log",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Int",
"(",
"\"",
"\"",
",",
"len",
"(",
"group",
")",
")",
")",
"\n",
"for",
"i",
",",
"f",
":=",
"range",
"group",
"{",
"log",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Int",
"(",
"\"",
"\"",
",",
"i",
")",
",",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"f",
")",
")",
"\n",
"}",
"\n\n",
"var",
"(",
"err",
"error",
"\n",
"files",
"[",
"]",
"string",
"\n",
")",
"\n\n",
"if",
"s",
".",
"fast",
"{",
"files",
",",
"err",
"=",
"s",
".",
"compactor",
".",
"CompactFast",
"(",
"group",
")",
"\n",
"}",
"else",
"{",
"files",
",",
"err",
"=",
"s",
".",
"compactor",
".",
"CompactFull",
"(",
"group",
")",
"\n",
"}",
"\n\n",
"if",
"err",
"!=",
"nil",
"{",
"_",
",",
"inProgress",
":=",
"err",
".",
"(",
"errCompactionInProgress",
")",
"\n",
"if",
"err",
"==",
"errCompactionsDisabled",
"||",
"inProgress",
"{",
"log",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Error",
"(",
"err",
")",
")",
"\n\n",
"if",
"_",
",",
"ok",
":=",
"err",
".",
"(",
"errCompactionInProgress",
")",
";",
"ok",
"{",
"time",
".",
"Sleep",
"(",
"time",
".",
"Second",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n\n",
"log",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Error",
"(",
"err",
")",
")",
"\n",
"s",
".",
"tracker",
".",
"Attempted",
"(",
"s",
".",
"level",
",",
"false",
",",
"0",
")",
"\n",
"time",
".",
"Sleep",
"(",
"time",
".",
"Second",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"s",
".",
"fileStore",
".",
"ReplaceWithCallback",
"(",
"group",
",",
"files",
",",
"nil",
")",
";",
"err",
"!=",
"nil",
"{",
"log",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Error",
"(",
"err",
")",
")",
"\n",
"s",
".",
"tracker",
".",
"Attempted",
"(",
"s",
".",
"level",
",",
"false",
",",
"0",
")",
"\n",
"time",
".",
"Sleep",
"(",
"time",
".",
"Second",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"for",
"i",
",",
"f",
":=",
"range",
"files",
"{",
"log",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Int",
"(",
"\"",
"\"",
",",
"i",
")",
",",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"f",
")",
")",
"\n",
"}",
"\n",
"log",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"Int",
"(",
"\"",
"\"",
",",
"len",
"(",
"files",
")",
")",
")",
"\n",
"s",
".",
"tracker",
".",
"Attempted",
"(",
"s",
".",
"level",
",",
"true",
",",
"time",
".",
"Since",
"(",
"now",
")",
")",
"\n",
"}"
] | // compactGroup executes the compaction strategy against a single CompactionGroup. | [
"compactGroup",
"executes",
"the",
"compaction",
"strategy",
"against",
"a",
"single",
"CompactionGroup",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L1483-L1534 |
12,888 | influxdata/platform | tsdb/tsm1/engine.go | reloadCache | func (e *Engine) reloadCache() error {
now := time.Now()
files, err := segmentFileNames(e.WAL.Path())
if err != nil {
return err
}
limit := e.Cache.MaxSize()
defer func() {
e.Cache.SetMaxSize(limit)
}()
// Disable the max size during loading
e.Cache.SetMaxSize(0)
loader := NewCacheLoader(files)
loader.WithLogger(e.logger)
if err := loader.Load(e.Cache); err != nil {
return err
}
e.traceLogger.Info("Reloaded WAL cache", zap.String("path", e.WAL.Path()), zap.Duration("duration", time.Since(now)))
return nil
} | go | func (e *Engine) reloadCache() error {
now := time.Now()
files, err := segmentFileNames(e.WAL.Path())
if err != nil {
return err
}
limit := e.Cache.MaxSize()
defer func() {
e.Cache.SetMaxSize(limit)
}()
// Disable the max size during loading
e.Cache.SetMaxSize(0)
loader := NewCacheLoader(files)
loader.WithLogger(e.logger)
if err := loader.Load(e.Cache); err != nil {
return err
}
e.traceLogger.Info("Reloaded WAL cache", zap.String("path", e.WAL.Path()), zap.Duration("duration", time.Since(now)))
return nil
} | [
"func",
"(",
"e",
"*",
"Engine",
")",
"reloadCache",
"(",
")",
"error",
"{",
"now",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"files",
",",
"err",
":=",
"segmentFileNames",
"(",
"e",
".",
"WAL",
".",
"Path",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"limit",
":=",
"e",
".",
"Cache",
".",
"MaxSize",
"(",
")",
"\n",
"defer",
"func",
"(",
")",
"{",
"e",
".",
"Cache",
".",
"SetMaxSize",
"(",
"limit",
")",
"\n",
"}",
"(",
")",
"\n\n",
"// Disable the max size during loading",
"e",
".",
"Cache",
".",
"SetMaxSize",
"(",
"0",
")",
"\n\n",
"loader",
":=",
"NewCacheLoader",
"(",
"files",
")",
"\n",
"loader",
".",
"WithLogger",
"(",
"e",
".",
"logger",
")",
"\n",
"if",
"err",
":=",
"loader",
".",
"Load",
"(",
"e",
".",
"Cache",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"e",
".",
"traceLogger",
".",
"Info",
"(",
"\"",
"\"",
",",
"zap",
".",
"String",
"(",
"\"",
"\"",
",",
"e",
".",
"WAL",
".",
"Path",
"(",
")",
")",
",",
"zap",
".",
"Duration",
"(",
"\"",
"\"",
",",
"time",
".",
"Since",
"(",
"now",
")",
")",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // reloadCache reads the WAL segment files and loads them into the cache. | [
"reloadCache",
"reads",
"the",
"WAL",
"segment",
"files",
"and",
"loads",
"them",
"into",
"the",
"cache",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L1572-L1595 |
12,889 | influxdata/platform | tsdb/tsm1/engine.go | SeriesAndFieldFromCompositeKey | func SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) {
sep := bytes.Index(key, keyFieldSeparatorBytes)
if sep == -1 {
// No field???
return key, nil
}
return key[:sep], key[sep+len(keyFieldSeparator):]
} | go | func SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) {
sep := bytes.Index(key, keyFieldSeparatorBytes)
if sep == -1 {
// No field???
return key, nil
}
return key[:sep], key[sep+len(keyFieldSeparator):]
} | [
"func",
"SeriesAndFieldFromCompositeKey",
"(",
"key",
"[",
"]",
"byte",
")",
"(",
"[",
"]",
"byte",
",",
"[",
"]",
"byte",
")",
"{",
"sep",
":=",
"bytes",
".",
"Index",
"(",
"key",
",",
"keyFieldSeparatorBytes",
")",
"\n",
"if",
"sep",
"==",
"-",
"1",
"{",
"// No field???",
"return",
"key",
",",
"nil",
"\n",
"}",
"\n",
"return",
"key",
"[",
":",
"sep",
"]",
",",
"key",
"[",
"sep",
"+",
"len",
"(",
"keyFieldSeparator",
")",
":",
"]",
"\n",
"}"
] | // SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key. | [
"SeriesAndFieldFromCompositeKey",
"returns",
"the",
"series",
"key",
"and",
"the",
"field",
"key",
"extracted",
"from",
"the",
"composite",
"key",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsm1/engine.go#L1740-L1747 |
12,890 | influxdata/platform | tsdb/tsi1/index.go | Bytes | func (i *Index) Bytes() int {
var b int
i.mu.RLock()
b += 24 // mu RWMutex is 24 bytes
b += int(unsafe.Sizeof(i.partitions))
for _, p := range i.partitions {
b += int(unsafe.Sizeof(p)) + p.bytes()
}
b += int(unsafe.Sizeof(i.opened))
b += int(unsafe.Sizeof(i.path)) + len(i.path)
b += int(unsafe.Sizeof(i.disableCompactions))
b += int(unsafe.Sizeof(i.maxLogFileSize))
b += int(unsafe.Sizeof(i.logger))
b += int(unsafe.Sizeof(i.sfile))
// Do not count SeriesFile because it belongs to the code that constructed this Index.
b += int(unsafe.Sizeof(i.version))
b += int(unsafe.Sizeof(i.PartitionN))
i.mu.RUnlock()
return b
} | go | func (i *Index) Bytes() int {
var b int
i.mu.RLock()
b += 24 // mu RWMutex is 24 bytes
b += int(unsafe.Sizeof(i.partitions))
for _, p := range i.partitions {
b += int(unsafe.Sizeof(p)) + p.bytes()
}
b += int(unsafe.Sizeof(i.opened))
b += int(unsafe.Sizeof(i.path)) + len(i.path)
b += int(unsafe.Sizeof(i.disableCompactions))
b += int(unsafe.Sizeof(i.maxLogFileSize))
b += int(unsafe.Sizeof(i.logger))
b += int(unsafe.Sizeof(i.sfile))
// Do not count SeriesFile because it belongs to the code that constructed this Index.
b += int(unsafe.Sizeof(i.version))
b += int(unsafe.Sizeof(i.PartitionN))
i.mu.RUnlock()
return b
} | [
"func",
"(",
"i",
"*",
"Index",
")",
"Bytes",
"(",
")",
"int",
"{",
"var",
"b",
"int",
"\n",
"i",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"b",
"+=",
"24",
"// mu RWMutex is 24 bytes",
"\n",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"partitions",
")",
")",
"\n",
"for",
"_",
",",
"p",
":=",
"range",
"i",
".",
"partitions",
"{",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"p",
")",
")",
"+",
"p",
".",
"bytes",
"(",
")",
"\n",
"}",
"\n",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"opened",
")",
")",
"\n",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"path",
")",
")",
"+",
"len",
"(",
"i",
".",
"path",
")",
"\n",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"disableCompactions",
")",
")",
"\n",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"maxLogFileSize",
")",
")",
"\n",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"logger",
")",
")",
"\n",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"sfile",
")",
")",
"\n",
"// Do not count SeriesFile because it belongs to the code that constructed this Index.",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"version",
")",
")",
"\n",
"b",
"+=",
"int",
"(",
"unsafe",
".",
"Sizeof",
"(",
"i",
".",
"PartitionN",
")",
")",
"\n",
"i",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"b",
"\n",
"}"
] | // Bytes estimates the memory footprint of this Index, in bytes. | [
"Bytes",
"estimates",
"the",
"memory",
"footprint",
"of",
"this",
"Index",
"in",
"bytes",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/index.go#L164-L183 |
12,891 | influxdata/platform | tsdb/tsi1/index.go | measurementSeriesByExprIterator | func (i *Index) measurementSeriesByExprIterator(name []byte, expr influxql.Expr) (tsdb.SeriesIDIterator, error) {
// Return all series for the measurement if there are no tag expressions.
release := i.sfile.Retain()
defer release()
if expr == nil {
itr, err := i.measurementSeriesIDIterator(name)
if err != nil {
return nil, err
}
return tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr), nil
}
itr, err := i.seriesByExprIterator(name, expr)
if err != nil {
return nil, err
}
return tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr), nil
} | go | func (i *Index) measurementSeriesByExprIterator(name []byte, expr influxql.Expr) (tsdb.SeriesIDIterator, error) {
// Return all series for the measurement if there are no tag expressions.
release := i.sfile.Retain()
defer release()
if expr == nil {
itr, err := i.measurementSeriesIDIterator(name)
if err != nil {
return nil, err
}
return tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr), nil
}
itr, err := i.seriesByExprIterator(name, expr)
if err != nil {
return nil, err
}
return tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr), nil
} | [
"func",
"(",
"i",
"*",
"Index",
")",
"measurementSeriesByExprIterator",
"(",
"name",
"[",
"]",
"byte",
",",
"expr",
"influxql",
".",
"Expr",
")",
"(",
"tsdb",
".",
"SeriesIDIterator",
",",
"error",
")",
"{",
"// Return all series for the measurement if there are no tag expressions.",
"release",
":=",
"i",
".",
"sfile",
".",
"Retain",
"(",
")",
"\n",
"defer",
"release",
"(",
")",
"\n\n",
"if",
"expr",
"==",
"nil",
"{",
"itr",
",",
"err",
":=",
"i",
".",
"measurementSeriesIDIterator",
"(",
"name",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"tsdb",
".",
"FilterUndeletedSeriesIDIterator",
"(",
"i",
".",
"sfile",
",",
"itr",
")",
",",
"nil",
"\n",
"}",
"\n\n",
"itr",
",",
"err",
":=",
"i",
".",
"seriesByExprIterator",
"(",
"name",
",",
"expr",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"return",
"tsdb",
".",
"FilterUndeletedSeriesIDIterator",
"(",
"i",
".",
"sfile",
",",
"itr",
")",
",",
"nil",
"\n",
"}"
] | // measurementSeriesByExprIterator returns a series iterator for a measurement
// that is filtered by expr. See MeasurementSeriesByExprIterator for more details.
//
// measurementSeriesByExprIterator guarantees to never take any locks on the
// series file. | [
"measurementSeriesByExprIterator",
"returns",
"a",
"series",
"iterator",
"for",
"a",
"measurement",
"that",
"is",
"filtered",
"by",
"expr",
".",
"See",
"MeasurementSeriesByExprIterator",
"for",
"more",
"details",
".",
"measurementSeriesByExprIterator",
"guarantees",
"to",
"never",
"take",
"any",
"locks",
"on",
"the",
"series",
"file",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/index.go#L517-L537 |
12,892 | influxdata/platform | tsdb/tsi1/index.go | MeasurementSeriesIDIterator | func (i *Index) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) {
itr, err := i.measurementSeriesIDIterator(name)
if err != nil {
return nil, err
}
release := i.sfile.Retain()
defer release()
return tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr), nil
} | go | func (i *Index) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) {
itr, err := i.measurementSeriesIDIterator(name)
if err != nil {
return nil, err
}
release := i.sfile.Retain()
defer release()
return tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr), nil
} | [
"func",
"(",
"i",
"*",
"Index",
")",
"MeasurementSeriesIDIterator",
"(",
"name",
"[",
"]",
"byte",
")",
"(",
"tsdb",
".",
"SeriesIDIterator",
",",
"error",
")",
"{",
"itr",
",",
"err",
":=",
"i",
".",
"measurementSeriesIDIterator",
"(",
"name",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n\n",
"release",
":=",
"i",
".",
"sfile",
".",
"Retain",
"(",
")",
"\n",
"defer",
"release",
"(",
")",
"\n",
"return",
"tsdb",
".",
"FilterUndeletedSeriesIDIterator",
"(",
"i",
".",
"sfile",
",",
"itr",
")",
",",
"nil",
"\n",
"}"
] | // MeasurementSeriesIDIterator returns an iterator over all non-tombstoned series
// for the provided measurement. | [
"MeasurementSeriesIDIterator",
"returns",
"an",
"iterator",
"over",
"all",
"non",
"-",
"tombstoned",
"series",
"for",
"the",
"provided",
"measurement",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/index.go#L541-L550 |
12,893 | influxdata/platform | tsdb/tsi1/index.go | tagValueSeriesIDIterator | func (i *Index) tagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) {
// Check series ID set cache...
if i.config.SeriesIDSetCacheSize > 0 { // Cache enabled.
if ss := i.tagValueCache.Get(name, key, value); ss != nil {
// Return a clone because the set is mutable.
return tsdb.NewSeriesIDSetIterator(ss.Clone()), nil
}
}
a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions))
for _, p := range i.partitions {
itr, err := p.TagValueSeriesIDIterator(name, key, value)
if err != nil {
return nil, err
} else if itr != nil {
a = append(a, itr)
}
}
itr := tsdb.MergeSeriesIDIterators(a...)
if i.config.SeriesIDSetCacheSize == 0 { // Cache disabled.
return itr, nil
}
// Check if the iterator contains only series id sets. Cache them...
if ssitr, ok := itr.(tsdb.SeriesIDSetIterator); ok {
ss := ssitr.SeriesIDSet()
ss.SetCOW(true) // This is important to speed the clone up.
i.tagValueCache.Put(name, key, value, ss)
}
return itr, nil
} | go | func (i *Index) tagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) {
// Check series ID set cache...
if i.config.SeriesIDSetCacheSize > 0 { // Cache enabled.
if ss := i.tagValueCache.Get(name, key, value); ss != nil {
// Return a clone because the set is mutable.
return tsdb.NewSeriesIDSetIterator(ss.Clone()), nil
}
}
a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions))
for _, p := range i.partitions {
itr, err := p.TagValueSeriesIDIterator(name, key, value)
if err != nil {
return nil, err
} else if itr != nil {
a = append(a, itr)
}
}
itr := tsdb.MergeSeriesIDIterators(a...)
if i.config.SeriesIDSetCacheSize == 0 { // Cache disabled.
return itr, nil
}
// Check if the iterator contains only series id sets. Cache them...
if ssitr, ok := itr.(tsdb.SeriesIDSetIterator); ok {
ss := ssitr.SeriesIDSet()
ss.SetCOW(true) // This is important to speed the clone up.
i.tagValueCache.Put(name, key, value, ss)
}
return itr, nil
} | [
"func",
"(",
"i",
"*",
"Index",
")",
"tagValueSeriesIDIterator",
"(",
"name",
",",
"key",
",",
"value",
"[",
"]",
"byte",
")",
"(",
"tsdb",
".",
"SeriesIDIterator",
",",
"error",
")",
"{",
"// Check series ID set cache...",
"if",
"i",
".",
"config",
".",
"SeriesIDSetCacheSize",
">",
"0",
"{",
"// Cache enabled.",
"if",
"ss",
":=",
"i",
".",
"tagValueCache",
".",
"Get",
"(",
"name",
",",
"key",
",",
"value",
")",
";",
"ss",
"!=",
"nil",
"{",
"// Return a clone because the set is mutable.",
"return",
"tsdb",
".",
"NewSeriesIDSetIterator",
"(",
"ss",
".",
"Clone",
"(",
")",
")",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n\n",
"a",
":=",
"make",
"(",
"[",
"]",
"tsdb",
".",
"SeriesIDIterator",
",",
"0",
",",
"len",
"(",
"i",
".",
"partitions",
")",
")",
"\n",
"for",
"_",
",",
"p",
":=",
"range",
"i",
".",
"partitions",
"{",
"itr",
",",
"err",
":=",
"p",
".",
"TagValueSeriesIDIterator",
"(",
"name",
",",
"key",
",",
"value",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"else",
"if",
"itr",
"!=",
"nil",
"{",
"a",
"=",
"append",
"(",
"a",
",",
"itr",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"itr",
":=",
"tsdb",
".",
"MergeSeriesIDIterators",
"(",
"a",
"...",
")",
"\n",
"if",
"i",
".",
"config",
".",
"SeriesIDSetCacheSize",
"==",
"0",
"{",
"// Cache disabled.",
"return",
"itr",
",",
"nil",
"\n",
"}",
"\n\n",
"// Check if the iterator contains only series id sets. Cache them...",
"if",
"ssitr",
",",
"ok",
":=",
"itr",
".",
"(",
"tsdb",
".",
"SeriesIDSetIterator",
")",
";",
"ok",
"{",
"ss",
":=",
"ssitr",
".",
"SeriesIDSet",
"(",
")",
"\n",
"ss",
".",
"SetCOW",
"(",
"true",
")",
"// This is important to speed the clone up.",
"\n",
"i",
".",
"tagValueCache",
".",
"Put",
"(",
"name",
",",
"key",
",",
"value",
",",
"ss",
")",
"\n",
"}",
"\n",
"return",
"itr",
",",
"nil",
"\n",
"}"
] | // tagValueSeriesIDIterator returns a series iterator for a single tag value. | [
"tagValueSeriesIDIterator",
"returns",
"a",
"series",
"iterator",
"for",
"a",
"single",
"tag",
"value",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/index.go#L924-L955 |
12,894 | influxdata/platform | tsdb/tsi1/index.go | RetainFileSet | func (i *Index) RetainFileSet() (*FileSet, error) {
i.mu.RLock()
defer i.mu.RUnlock()
fs, _ := NewFileSet(nil, i.sfile, nil)
for _, p := range i.partitions {
pfs, err := p.RetainFileSet()
if err != nil {
fs.Close()
return nil, err
}
fs.files = append(fs.files, pfs.files...)
}
return fs, nil
} | go | func (i *Index) RetainFileSet() (*FileSet, error) {
i.mu.RLock()
defer i.mu.RUnlock()
fs, _ := NewFileSet(nil, i.sfile, nil)
for _, p := range i.partitions {
pfs, err := p.RetainFileSet()
if err != nil {
fs.Close()
return nil, err
}
fs.files = append(fs.files, pfs.files...)
}
return fs, nil
} | [
"func",
"(",
"i",
"*",
"Index",
")",
"RetainFileSet",
"(",
")",
"(",
"*",
"FileSet",
",",
"error",
")",
"{",
"i",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"i",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n\n",
"fs",
",",
"_",
":=",
"NewFileSet",
"(",
"nil",
",",
"i",
".",
"sfile",
",",
"nil",
")",
"\n",
"for",
"_",
",",
"p",
":=",
"range",
"i",
".",
"partitions",
"{",
"pfs",
",",
"err",
":=",
"p",
".",
"RetainFileSet",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"fs",
".",
"Close",
"(",
")",
"\n",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"fs",
".",
"files",
"=",
"append",
"(",
"fs",
".",
"files",
",",
"pfs",
".",
"files",
"...",
")",
"\n",
"}",
"\n",
"return",
"fs",
",",
"nil",
"\n",
"}"
] | // RetainFileSet returns the set of all files across all partitions.
// This is only needed when all files need to be retained for an operation. | [
"RetainFileSet",
"returns",
"the",
"set",
"of",
"all",
"files",
"across",
"all",
"partitions",
".",
"This",
"is",
"only",
"needed",
"when",
"all",
"files",
"need",
"to",
"be",
"retained",
"for",
"an",
"operation",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/index.go#L1134-L1148 |
12,895 | influxdata/platform | tsdb/tsi1/index.go | MatchTagValueSeriesIDIterator | func (i *Index) MatchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (tsdb.SeriesIDIterator, error) {
release := i.sfile.Retain()
defer release()
itr, err := i.matchTagValueSeriesIDIterator(name, key, value, matches)
if err != nil {
return nil, err
}
return tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr), nil
} | go | func (i *Index) MatchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (tsdb.SeriesIDIterator, error) {
release := i.sfile.Retain()
defer release()
itr, err := i.matchTagValueSeriesIDIterator(name, key, value, matches)
if err != nil {
return nil, err
}
return tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr), nil
} | [
"func",
"(",
"i",
"*",
"Index",
")",
"MatchTagValueSeriesIDIterator",
"(",
"name",
",",
"key",
"[",
"]",
"byte",
",",
"value",
"*",
"regexp",
".",
"Regexp",
",",
"matches",
"bool",
")",
"(",
"tsdb",
".",
"SeriesIDIterator",
",",
"error",
")",
"{",
"release",
":=",
"i",
".",
"sfile",
".",
"Retain",
"(",
")",
"\n",
"defer",
"release",
"(",
")",
"\n\n",
"itr",
",",
"err",
":=",
"i",
".",
"matchTagValueSeriesIDIterator",
"(",
"name",
",",
"key",
",",
"value",
",",
"matches",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"tsdb",
".",
"FilterUndeletedSeriesIDIterator",
"(",
"i",
".",
"sfile",
",",
"itr",
")",
",",
"nil",
"\n",
"}"
] | // MatchTagValueSeriesIDIterator returns a series iterator for tags which match value.
// If matches is false, returns iterators which do not match value. | [
"MatchTagValueSeriesIDIterator",
"returns",
"a",
"series",
"iterator",
"for",
"tags",
"which",
"match",
"value",
".",
"If",
"matches",
"is",
"false",
"returns",
"iterators",
"which",
"do",
"not",
"match",
"value",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/tsdb/tsi1/index.go#L1378-L1387 |
12,896 | influxdata/platform | gather/storage.go | Process | func (h *StorageHandler) Process(s nats.Subscription, m nats.Message) {
defer m.Ack()
ms := make([]Metrics, 0)
err := json.Unmarshal(m.Data(), &ms)
if err != nil {
h.Logger.Error(fmt.Sprintf("storage handler process err: %v", err))
return
}
err = h.Storage.Record(ms)
if err != nil {
h.Logger.Error(fmt.Sprintf("storage handler store err: %v", err))
}
} | go | func (h *StorageHandler) Process(s nats.Subscription, m nats.Message) {
defer m.Ack()
ms := make([]Metrics, 0)
err := json.Unmarshal(m.Data(), &ms)
if err != nil {
h.Logger.Error(fmt.Sprintf("storage handler process err: %v", err))
return
}
err = h.Storage.Record(ms)
if err != nil {
h.Logger.Error(fmt.Sprintf("storage handler store err: %v", err))
}
} | [
"func",
"(",
"h",
"*",
"StorageHandler",
")",
"Process",
"(",
"s",
"nats",
".",
"Subscription",
",",
"m",
"nats",
".",
"Message",
")",
"{",
"defer",
"m",
".",
"Ack",
"(",
")",
"\n",
"ms",
":=",
"make",
"(",
"[",
"]",
"Metrics",
",",
"0",
")",
"\n",
"err",
":=",
"json",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
"(",
")",
",",
"&",
"ms",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"h",
".",
"Logger",
".",
"Error",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"err",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"err",
"=",
"h",
".",
"Storage",
".",
"Record",
"(",
"ms",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"h",
".",
"Logger",
".",
"Error",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"err",
")",
")",
"\n",
"}",
"\n",
"}"
] | // Process consumes job queue, and use storage to record. | [
"Process",
"consumes",
"job",
"queue",
"and",
"use",
"storage",
"to",
"record",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/gather/storage.go#L24-L36 |
12,897 | influxdata/platform | bolt/bucket.go | CreateBucket | func (c *Client) CreateBucket(ctx context.Context, b *platform.Bucket) error {
var err error
op := getOp(platform.OpCreateBucket)
return c.db.Update(func(tx *bolt.Tx) error {
if b.OrganizationID.Valid() {
_, pe := c.findOrganizationByID(ctx, tx, b.OrganizationID)
if pe != nil {
return &platform.Error{
Err: pe,
Op: op,
}
}
} else {
o, pe := c.findOrganizationByName(ctx, tx, b.Organization)
if pe != nil {
return &platform.Error{
Err: pe,
Op: op,
}
}
b.OrganizationID = o.ID
}
unique := c.uniqueBucketName(ctx, tx, b)
if !unique {
// TODO: make standard error
return &platform.Error{
Code: platform.EConflict,
Op: op,
Msg: fmt.Sprintf("bucket with name %s already exists", b.Name),
}
}
b.ID = c.IDGenerator.ID()
if err = c.appendBucketEventToLog(ctx, tx, b.ID, bucketCreatedEvent); err != nil {
return &platform.Error{
Op: op,
Err: err,
}
}
if pe := c.putBucket(ctx, tx, b); pe != nil {
pe.Op = op
err = pe
}
if pe := c.createBucketUserResourceMappings(ctx, tx, b); pe != nil {
pe.Op = op
err = pe
}
return nil
})
} | go | func (c *Client) CreateBucket(ctx context.Context, b *platform.Bucket) error {
var err error
op := getOp(platform.OpCreateBucket)
return c.db.Update(func(tx *bolt.Tx) error {
if b.OrganizationID.Valid() {
_, pe := c.findOrganizationByID(ctx, tx, b.OrganizationID)
if pe != nil {
return &platform.Error{
Err: pe,
Op: op,
}
}
} else {
o, pe := c.findOrganizationByName(ctx, tx, b.Organization)
if pe != nil {
return &platform.Error{
Err: pe,
Op: op,
}
}
b.OrganizationID = o.ID
}
unique := c.uniqueBucketName(ctx, tx, b)
if !unique {
// TODO: make standard error
return &platform.Error{
Code: platform.EConflict,
Op: op,
Msg: fmt.Sprintf("bucket with name %s already exists", b.Name),
}
}
b.ID = c.IDGenerator.ID()
if err = c.appendBucketEventToLog(ctx, tx, b.ID, bucketCreatedEvent); err != nil {
return &platform.Error{
Op: op,
Err: err,
}
}
if pe := c.putBucket(ctx, tx, b); pe != nil {
pe.Op = op
err = pe
}
if pe := c.createBucketUserResourceMappings(ctx, tx, b); pe != nil {
pe.Op = op
err = pe
}
return nil
})
} | [
"func",
"(",
"c",
"*",
"Client",
")",
"CreateBucket",
"(",
"ctx",
"context",
".",
"Context",
",",
"b",
"*",
"platform",
".",
"Bucket",
")",
"error",
"{",
"var",
"err",
"error",
"\n",
"op",
":=",
"getOp",
"(",
"platform",
".",
"OpCreateBucket",
")",
"\n",
"return",
"c",
".",
"db",
".",
"Update",
"(",
"func",
"(",
"tx",
"*",
"bolt",
".",
"Tx",
")",
"error",
"{",
"if",
"b",
".",
"OrganizationID",
".",
"Valid",
"(",
")",
"{",
"_",
",",
"pe",
":=",
"c",
".",
"findOrganizationByID",
"(",
"ctx",
",",
"tx",
",",
"b",
".",
"OrganizationID",
")",
"\n",
"if",
"pe",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"pe",
",",
"Op",
":",
"op",
",",
"}",
"\n",
"}",
"\n",
"}",
"else",
"{",
"o",
",",
"pe",
":=",
"c",
".",
"findOrganizationByName",
"(",
"ctx",
",",
"tx",
",",
"b",
".",
"Organization",
")",
"\n",
"if",
"pe",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Err",
":",
"pe",
",",
"Op",
":",
"op",
",",
"}",
"\n",
"}",
"\n",
"b",
".",
"OrganizationID",
"=",
"o",
".",
"ID",
"\n",
"}",
"\n\n",
"unique",
":=",
"c",
".",
"uniqueBucketName",
"(",
"ctx",
",",
"tx",
",",
"b",
")",
"\n\n",
"if",
"!",
"unique",
"{",
"// TODO: make standard error",
"return",
"&",
"platform",
".",
"Error",
"{",
"Code",
":",
"platform",
".",
"EConflict",
",",
"Op",
":",
"op",
",",
"Msg",
":",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"b",
".",
"Name",
")",
",",
"}",
"\n",
"}",
"\n\n",
"b",
".",
"ID",
"=",
"c",
".",
"IDGenerator",
".",
"ID",
"(",
")",
"\n\n",
"if",
"err",
"=",
"c",
".",
"appendBucketEventToLog",
"(",
"ctx",
",",
"tx",
",",
"b",
".",
"ID",
",",
"bucketCreatedEvent",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"&",
"platform",
".",
"Error",
"{",
"Op",
":",
"op",
",",
"Err",
":",
"err",
",",
"}",
"\n",
"}",
"\n\n",
"if",
"pe",
":=",
"c",
".",
"putBucket",
"(",
"ctx",
",",
"tx",
",",
"b",
")",
";",
"pe",
"!=",
"nil",
"{",
"pe",
".",
"Op",
"=",
"op",
"\n",
"err",
"=",
"pe",
"\n",
"}",
"\n\n",
"if",
"pe",
":=",
"c",
".",
"createBucketUserResourceMappings",
"(",
"ctx",
",",
"tx",
",",
"b",
")",
";",
"pe",
"!=",
"nil",
"{",
"pe",
".",
"Op",
"=",
"op",
"\n",
"err",
"=",
"pe",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}",
")",
"\n",
"}"
] | // CreateBucket creates a platform bucket and sets b.ID. | [
"CreateBucket",
"creates",
"a",
"platform",
"bucket",
"and",
"sets",
"b",
".",
"ID",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/bolt/bucket.go#L305-L359 |
12,898 | influxdata/platform | id.go | UnmarshalJSON | func (i *ID) UnmarshalJSON(b []byte) error {
if b[0] == '"' {
b = b[1:]
}
if b[len(b)-1] == '"' {
b = b[:len(b)-1]
}
return i.Decode(b)
} | go | func (i *ID) UnmarshalJSON(b []byte) error {
if b[0] == '"' {
b = b[1:]
}
if b[len(b)-1] == '"' {
b = b[:len(b)-1]
}
return i.Decode(b)
} | [
"func",
"(",
"i",
"*",
"ID",
")",
"UnmarshalJSON",
"(",
"b",
"[",
"]",
"byte",
")",
"error",
"{",
"if",
"b",
"[",
"0",
"]",
"==",
"'\"'",
"{",
"b",
"=",
"b",
"[",
"1",
":",
"]",
"\n",
"}",
"\n\n",
"if",
"b",
"[",
"len",
"(",
"b",
")",
"-",
"1",
"]",
"==",
"'\"'",
"{",
"b",
"=",
"b",
"[",
":",
"len",
"(",
"b",
")",
"-",
"1",
"]",
"\n",
"}",
"\n\n",
"return",
"i",
".",
"Decode",
"(",
"b",
")",
"\n",
"}"
] | // UnmarshalJSON implements JSON unmarshaller for IDs. | [
"UnmarshalJSON",
"implements",
"JSON",
"unmarshaller",
"for",
"IDs",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/id.go#L123-L133 |
12,899 | influxdata/platform | id.go | MarshalJSON | func (i ID) MarshalJSON() ([]byte, error) {
enc, err := i.Encode()
if err != nil {
return nil, err
}
return json.Marshal(string(enc))
} | go | func (i ID) MarshalJSON() ([]byte, error) {
enc, err := i.Encode()
if err != nil {
return nil, err
}
return json.Marshal(string(enc))
} | [
"func",
"(",
"i",
"ID",
")",
"MarshalJSON",
"(",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"enc",
",",
"err",
":=",
"i",
".",
"Encode",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"json",
".",
"Marshal",
"(",
"string",
"(",
"enc",
")",
")",
"\n",
"}"
] | // MarshalJSON implements JSON marshaller for IDs. | [
"MarshalJSON",
"implements",
"JSON",
"marshaller",
"for",
"IDs",
"."
] | d500d3cf55899337bc03259b46c58bae9c06f1eb | https://github.com/influxdata/platform/blob/d500d3cf55899337bc03259b46c58bae9c06f1eb/id.go#L136-L142 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.