element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | d9b7a6fc-96bb-4f8c-93f2-d32b1344cb07 | newLokiClient | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func newLokiClient(routeAddress string) *lokiClient {
client := &lokiClient{}
client.address = routeAddress
client.retries = 5
client.quiet = true
return client
} | logging | ||||
function | openshift/openshift-tests-private | 17ddf4e3-acd0-477d-a326-178c34aa0930 | retry | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) retry(retry int) *lokiClient {
nc := *c
nc.retries = retry
return &nc
} | logging | ||||
function | openshift/openshift-tests-private | ba8c89b8-b1e5-4068-8921-6a76b7b82afa | withToken | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) withToken(bearerToken string) *lokiClient {
nc := *c
nc.bearerToken = bearerToken
return &nc
} | logging | ||||
function | openshift/openshift-tests-private | 13238890-ff2a-4303-81ee-46e3ef911e01 | withBasicAuth | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) withBasicAuth(username string, password string) *lokiClient {
nc := *c
nc.username = username
nc.password = password
return &nc
} | logging | ||||
function | openshift/openshift-tests-private | 588ac9d5-3751-49ca-9ec0-04f7f60ff357 | getHTTPRequestHeader | ['"encoding/base64"', '"fmt"', '"net/http"', '"os"', '"strings"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) getHTTPRequestHeader() (http.Header, error) {
h := make(http.Header)
if c.username != "" && c.password != "" {
h.Set(
"Authorization",
"Basic "+base64.StdEncoding.EncodeToString([]byte(c.username+":"+c.password)),
)
}
h.Set("User-Agent", "loki-logcli")
if c.orgID != "" {
h.Set("X-Scope-OrgID", c.orgID)
}
if c.queryTags != "" {
h.Set("X-Query-Tags", c.queryTags)
}
if (c.username != "" || c.password != "") && (len(c.bearerToken) > 0 || len(c.bearerTokenFile) > 0) {
return nil, fmt.Errorf("at most one of HTTP basic auth (username/password), bearer-token & bearer-token-file is allowed to be configured")
}
if len(c.bearerToken) > 0 && len(c.bearerTokenFile) > 0 {
return nil, fmt.Errorf("at most one of the options bearer-token & bearer-token-file is allowed to be configured")
}
if c.bearerToken != "" {
h.Set("Authorization", "Bearer "+c.bearerToken)
}
if c.bearerTokenFile != "" {
b, err := os.ReadFile(c.bearerTokenFile)
if err != nil {
return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", c.bearerTokenFile, err)
}
bearerToken := strings.TrimSpace(string(b))
h.Set("Authorization", "Bearer "+bearerToken)
}
return h, nil
} | logging | |||
function | openshift/openshift-tests-private | 10ee892d-5a7f-4ab7-9a00-aef66382ba4c | doRequest | ['"encoding/json"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) doRequest(path, query string, out interface{}) error {
h, err := c.getHTTPRequestHeader()
if err != nil {
return err
}
resp, err := doHTTPRequest(h, c.address, path, query, "GET", c.quiet, c.retries, nil, 200)
if err != nil {
return err
}
return json.Unmarshal(resp, out)
} | logging | |||
function | openshift/openshift-tests-private | 2f8b1569-94f7-41f6-a02d-ac2337732765 | doQuery | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) doQuery(path string, query string) (*lokiQueryResponse, error) {
var err error
var r lokiQueryResponse
if err = c.doRequest(path, query, &r); err != nil {
return nil, err
}
return &r, nil
} | logging | ||||
function | openshift/openshift-tests-private | b401d2d7-d3da-4bae-a1c9-3e0eb69d54f3 | query | ['"time"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) query(tenant string, queryStr string, limit int, forward bool, time time.Time) (*lokiQueryResponse, error) {
direction := func() string {
if forward {
return "FORWARD"
}
return "BACKWARD"
}
qsb := newQueryStringBuilder()
qsb.setString("query", queryStr)
qsb.setInt("limit", int64(limit))
qsb.setInt("time", time.UnixNano())
qsb.setString("direction", direction())
var logPath string
if len(tenant) > 0 {
logPath = apiPath + tenant + queryRangePath
} else {
logPath = queryRangePath
}
return c.doQuery(logPath, qsb.encode())
} | logging | |||
function | openshift/openshift-tests-private | 31d2347a-dc2f-44e8-92d4-ae62d166385a | queryRange | ['"time"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) queryRange(tenant string, queryStr string, limit int, start, end time.Time, forward bool) (*lokiQueryResponse, error) {
direction := func() string {
if forward {
return "FORWARD"
}
return "BACKWARD"
}
params := newQueryStringBuilder()
params.setString("query", queryStr)
params.setInt32("limit", limit)
params.setInt("start", start.UnixNano())
params.setInt("end", end.UnixNano())
params.setString("direction", direction())
var logPath string
if len(tenant) > 0 {
logPath = apiPath + tenant + queryRangePath
} else {
logPath = queryRangePath
}
return c.doQuery(logPath, params.encode())
} | logging | |||
function | openshift/openshift-tests-private | 8fe25391-2b2a-46fd-8f66-46a739dadfd2 | searchLogsInLoki | ['"time"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) searchLogsInLoki(tenant, query string) (*lokiQueryResponse, error) {
res, err := c.queryRange(tenant, query, 5, time.Now().Add(time.Duration(-1)*time.Hour), time.Now(), false)
return res, err
} | logging | |||
function | openshift/openshift-tests-private | 39045001-5ace-44f1-9c70-50222916da73 | waitForLogsAppearByQuery | ['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) waitForLogsAppearByQuery(tenant, query string) error {
return wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := c.searchLogsInLoki(tenant, query)
if err != nil {
e2e.Logf("\ngot err when searching logs: %v, retrying...\n", err)
return false, nil
}
if len(logs.Data.Result) > 0 {
e2e.Logf(`find logs by %s`, query)
return true, nil
}
return false, nil
})
} | logging | |||
function | openshift/openshift-tests-private | 583150c1-2677-4455-b4ef-58da52bdcf76 | searchByKey | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) searchByKey(tenant, key, value string) (*lokiQueryResponse, error) {
res, err := c.searchLogsInLoki(tenant, "{"+key+"=\""+value+"\"}")
return res, err
} | logging | ||||
function | openshift/openshift-tests-private | b604d73e-c25d-458f-a733-7a3a772b789a | waitForLogsAppearByKey | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) waitForLogsAppearByKey(tenant, key, value string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := c.searchByKey(tenant, key, value)
if err != nil {
e2e.Logf("\ngot err when searching logs: %v, retrying...\n", err)
return false, nil
}
if len(logs.Data.Result) > 0 {
e2e.Logf(`find logs by {%s="%s"}`, key, value)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf(`can't find logs by {%s="%s"} in last 5 minutes`, key, value))
} | logging | |||
function | openshift/openshift-tests-private | c883a5b4-2249-4503-9118-cb8c21dc82a0 | searchByNamespace | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) searchByNamespace(tenant, projectName string) (*lokiQueryResponse, error) {
res, err := c.searchLogsInLoki(tenant, "{kubernetes_namespace_name=\""+projectName+"\"}")
return res, err
} | logging | ||||
function | openshift/openshift-tests-private | 601672dd-6543-4ee5-b295-2692c6aba6f0 | waitForLogsAppearByProject | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) waitForLogsAppearByProject(tenant, projectName string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := c.searchByNamespace(tenant, projectName)
if err != nil {
e2e.Logf("\ngot err when searching logs: %v, retrying...\n", err)
return false, nil
}
if len(logs.Data.Result) > 0 {
e2e.Logf("find logs from %s project", projectName)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't find logs from %s project in last 5 minutes", projectName))
} | logging | |||
function | openshift/openshift-tests-private | 94e839a2-85bf-4971-bce1-0e2c1eaced87 | extractLogEntities | ['"encoding/json"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func extractLogEntities(lokiQueryResult *lokiQueryResponse) []LogEntity {
var lokiLogs []LogEntity
for _, res := range lokiQueryResult.Data.Result {
for _, value := range res.Values {
lokiLog := LogEntity{}
// only process log data, drop timestamp
json.Unmarshal([]byte(convertInterfaceToArray(value)[1]), &lokiLog)
lokiLogs = append(lokiLogs, lokiLog)
}
}
return lokiLogs
} | logging | ||||
function | openshift/openshift-tests-private | 81aaf324-2efd-422d-9bcb-073883658a87 | listLabelValues | ['"fmt"', '"net/url"', '"time"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) listLabelValues(tenant, name string, start, end time.Time) (*labelResponse, error) {
lpath := fmt.Sprintf(labelValuesPath, url.PathEscape(name))
var labelResponse labelResponse
params := newQueryStringBuilder()
params.setInt("start", start.UnixNano())
params.setInt("end", end.UnixNano())
path := ""
if len(tenant) > 0 {
path = apiPath + tenant + lpath
} else {
path = lpath
}
if err := c.doRequest(path, params.encode(), &labelResponse); err != nil {
return nil, err
}
return &labelResponse, nil
} | logging | |||
function | openshift/openshift-tests-private | 6f41a929-1c7b-440d-920f-5bba2d599f69 | listLabelNames | ['"time"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) listLabelNames(tenant string, start, end time.Time) (*labelResponse, error) {
var labelResponse labelResponse
params := newQueryStringBuilder()
params.setInt("start", start.UnixNano())
params.setInt("end", end.UnixNano())
path := ""
if len(tenant) > 0 {
path = apiPath + tenant + labelsPath
} else {
path = labelsPath
}
if err := c.doRequest(path, params.encode(), &labelResponse); err != nil {
return nil, err
}
return &labelResponse, nil
} | logging | |||
function | openshift/openshift-tests-private | 2a82d557-232e-4d9b-bf50-ca4c310f4732 | listLabels | ['"time"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) listLabels(tenant, labelName string) ([]string, error) {
var labelResponse *labelResponse
var err error
start := time.Now().Add(time.Duration(-2) * time.Hour)
end := time.Now()
if len(labelName) > 0 {
labelResponse, err = c.listLabelValues(tenant, labelName, start, end)
} else {
labelResponse, err = c.listLabelNames(tenant, start, end)
}
return labelResponse.Data, err
} | logging | |||
function | openshift/openshift-tests-private | e7206dc3-78a7-4d40-88d6-4f6b77ade7de | queryRules | ['"net/url"', '"strings"'] | ['lokiClient'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (c *lokiClient) queryRules(tenant, ns string) ([]byte, error) {
path := apiPath + tenant + rulesPath
params := url.Values{}
if ns != "" {
params.Add("kubernetes_namespace_name", ns)
}
h, err := c.getHTTPRequestHeader()
if err != nil {
return nil, err
}
resp, err := doHTTPRequest(h, c.address, path, params.Encode(), "GET", c.quiet, c.retries, nil, 200)
if err != nil {
/*
Ignore error "unexpected EOF", adding `h.Add("Accept-Encoding", "identity")` doesn't resolve the error.
This seems to be an issue in lokistack when tenant=application, recording rules are not in the response.
No error when tenant=infrastructure
*/
if strings.Contains(err.Error(), "unexpected EOF") && len(resp) > 0 {
e2e.Logf("got error %s when reading the response, but ignore it", err.Error())
return resp, nil
}
return nil, err
}
return resp, nil
} | logging | |||
function | openshift/openshift-tests-private | d0d5010a-d0f6-486e-a5f4-cdca7468fecf | newQueryStringBuilder | ['"net/url"'] | ['queryStringBuilder'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func newQueryStringBuilder() *queryStringBuilder {
return &queryStringBuilder{
values: url.Values{},
}
} | logging | |||
function | openshift/openshift-tests-private | be2a7ec8-3bcf-4d23-a201-4249694e22e1 | setString | ['queryStringBuilder'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (b *queryStringBuilder) setString(name, value string) {
b.values.Set(name, value)
} | logging | ||||
function | openshift/openshift-tests-private | 2bd0fd3e-b2e4-4207-9932-0d142a58be4b | setInt | ['"strconv"'] | ['queryStringBuilder'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (b *queryStringBuilder) setInt(name string, value int64) {
b.setString(name, strconv.FormatInt(value, 10))
} | logging | |||
function | openshift/openshift-tests-private | d291a5e6-5662-48e4-88cc-01b98e905093 | setInt32 | ['"strconv"'] | ['queryStringBuilder'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (b *queryStringBuilder) setInt32(name string, value int) {
b.setString(name, strconv.Itoa(value))
} | logging | |||
function | openshift/openshift-tests-private | f0ce2e9a-3791-45fd-896b-95d556f99bdd | encode | ['queryStringBuilder'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (b *queryStringBuilder) encode() string {
return b.values.Encode()
} | logging | ||||
function | openshift/openshift-tests-private | 01916b52-50db-4ba1-aec4-20533df2872f | compareClusterResources | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func compareClusterResources(oc *exutil.CLI, cpu, memory string) bool {
nodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
var remainingCPU, remainingMemory int64
re := exutil.GetRemainingResourcesNodesMap(oc, nodes)
for _, node := range nodes {
remainingCPU += re[node.Name].CPU
remainingMemory += re[node.Name].Memory
}
requiredCPU, _ := k8sresource.ParseQuantity(cpu)
requiredMemory, _ := k8sresource.ParseQuantity(memory)
e2e.Logf("the required cpu is: %d, and the required memory is: %d", requiredCPU.MilliValue(), requiredMemory.MilliValue())
e2e.Logf("the remaining cpu is: %d, and the remaning memory is: %d", remainingCPU, remainingMemory)
return remainingCPU > requiredCPU.MilliValue() && remainingMemory > requiredMemory.MilliValue()
} | logging | |||||
function | openshift/openshift-tests-private | fb1f299e-00c9-48a9-a382-3c176a5cf734 | validateInfraForLoki | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func validateInfraForLoki(oc *exutil.CLI, supportedPlatforms ...string) bool {
currentPlatform := exutil.CheckPlatform(oc)
if len(supportedPlatforms) > 0 {
return contain(supportedPlatforms, currentPlatform)
}
return true
} | logging | |||||
function | openshift/openshift-tests-private | 200d0c24-3400-4d15-8c06-1e1e882ba56c | validateInfraAndResourcesForLoki | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func validateInfraAndResourcesForLoki(oc *exutil.CLI, reqMemory, reqCPU string, supportedPlatforms ...string) bool {
return validateInfraForLoki(oc, supportedPlatforms...) && compareClusterResources(oc, reqCPU, reqMemory)
} | logging | |||||
function | openshift/openshift-tests-private | 1ae35d25-0fab-45bd-b700-10f03b13b68d | deployLoki | ['k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | ['externalLoki'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l externalLoki) deployLoki(oc *exutil.CLI) {
//Create configmap for Loki
cmTemplate := exutil.FixturePath("testdata", "logging", "external-log-stores", "loki", "loki-configmap.yaml")
lokiCM := resource{"configmap", l.name, l.namespace}
err := lokiCM.applyFromTemplate(oc, "-n", l.namespace, "-f", cmTemplate, "-p", "LOKINAMESPACE="+l.namespace, "-p", "LOKICMNAME="+l.name)
o.Expect(err).NotTo(o.HaveOccurred())
//Create Deployment for Loki
deployTemplate := exutil.FixturePath("testdata", "logging", "external-log-stores", "loki", "loki-deployment.yaml")
lokiDeploy := resource{"deployment", l.name, l.namespace}
err = lokiDeploy.applyFromTemplate(oc, "-n", l.namespace, "-f", deployTemplate, "-p", "LOKISERVERNAME="+l.name, "-p", "LOKINAMESPACE="+l.namespace, "-p", "LOKICMNAME="+l.name)
o.Expect(err).NotTo(o.HaveOccurred())
//Expose Loki as a Service
WaitForDeploymentPodsToBeReady(oc, l.namespace, l.name)
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("-n", l.namespace, "deployment", l.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// expose loki route
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("-n", l.namespace, "svc", l.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | |||
function | openshift/openshift-tests-private | 2cafcde7-8c2a-40e8-b7b7-378dedd28c39 | remove | ['k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | ['externalLoki'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l externalLoki) remove(oc *exutil.CLI) {
resource{"configmap", l.name, l.namespace}.clear(oc)
resource{"deployment", l.name, l.namespace}.clear(oc)
resource{"svc", l.name, l.namespace}.clear(oc)
resource{"route", l.name, l.namespace}.clear(oc)
} | logging | |||
function | openshift/openshift-tests-private | e07f66c4-b6a3-41f0-b1d7-8b90fec8fabd | deployMinIO | ['"context"', '"os"', 'k8sresource "k8s.io/apimachinery/pkg/api/resource"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func deployMinIO(oc *exutil.CLI) {
// create namespace
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), minioNS, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", minioNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// create secret
_, err = oc.AdminKubeClient().CoreV1().Secrets(minioNS).Get(context.Background(), minioSecret, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", minioSecret, "-n", minioNS, "--from-literal=access_key_id="+getRandomString(), "--from-literal=secret_access_key=passwOOrd"+getRandomString()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// deploy minIO
deployTemplate := exutil.FixturePath("testdata", "logging", "minIO", "deploy.yaml")
deployFile, err := processTemplate(oc, "-n", minioNS, "-f", deployTemplate, "-p", "NAMESPACE="+minioNS, "NAME=minio", "SECRET_NAME="+minioSecret)
defer os.Remove(deployFile)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().Run("apply").Args("-f", deployFile, "-n", minioNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// wait for minio to be ready
for _, rs := range []string{"deployment", "svc", "route"} {
resource{rs, "minio", minioNS}.WaitForResourceToAppear(oc)
}
WaitForDeploymentPodsToBeReady(oc, minioNS, "minio")
} | logging | ||||
function | openshift/openshift-tests-private | 91e82357-ecc5-4418-85c7-c83a957d06f1 | queryAlertManagerForActiveAlerts | ['"context"', '"encoding/json"', '"fmt"', '"net/http"', '"net/url"', '"strings"', '"time"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"', 'g "github.com/onsi/ginkgo/v2"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func queryAlertManagerForActiveAlerts(oc *exutil.CLI, token string, isUserWorkloadAM bool, alertName string, timeInMinutes int) {
var err error
if !isUserWorkloadAM {
alertManagerRoute := getRouteAddress(oc, "openshift-monitoring", "alertmanager-main")
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add("Authorization", "Bearer "+token)
params := url.Values{}
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, time.Duration(timeInMinutes)*time.Minute, true, func(context.Context) (done bool, err error) {
resp, err := doHTTPRequest(h, "https://"+alertManagerRoute, "/api/v2/alerts", params.Encode(), "GET", true, 5, nil, 200)
if err != nil {
return false, err
}
if strings.Contains(string(resp), alertName) {
return true, nil
}
e2e.Logf("Waiting for alert %s to be in Firing state", alertName)
return false, nil
})
} else {
userWorkloadAlertManagerURL := "https://alertmanager-user-workload.openshift-user-workload-monitoring.svc:9095/api/v2/alerts"
authBearer := " \"Authorization: Bearer " + token + "\""
cmd := "curl -k -H" + authBearer + " " + userWorkloadAlertManagerURL
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, time.Duration(timeInMinutes)*time.Minute, true, func(context.Context) (done bool, err error) {
alerts, err := exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "/bin/sh", "-x", "-c", cmd)
if err != nil {
return false, err
}
if strings.Contains(string(alerts), alertName) {
return true, nil
}
e2e.Logf("Waiting for alert %s to be in Firing state", alertName)
return false, nil
})
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Alert %s is not firing after %d minutes", alertName, timeInMinutes))
} | logging | ||||
function | openshift/openshift-tests-private | 33629419-8e2f-41c9-9f79-84075419e374 | enableUserWorkloadMonitoringForLogging | ['"github.com/aws/aws-sdk-go-v2/config"', 'k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func enableUserWorkloadMonitoringForLogging(oc *exutil.CLI) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Execute()
clusterMonitoringConfigPath := exutil.FixturePath("testdata", "logging", "loki-log-alerts", "cluster-monitoring-config.yaml")
clusterMonitoringConfig := resource{"configmap", "cluster-monitoring-config", "openshift-monitoring"}
err := clusterMonitoringConfig.applyFromTemplate(oc, "-n", clusterMonitoringConfig.namespace, "-f", clusterMonitoringConfigPath)
o.Expect(err).NotTo(o.HaveOccurred())
oc.AsAdmin().WithoutNamespace().Run("delete").Args("ConfigMap", "user-workload-monitoring-config", "-n", "openshift-user-workload-monitoring", "--ignore-not-found").Execute()
userWorkloadMConfigPath := exutil.FixturePath("testdata", "logging", "loki-log-alerts", "user-workload-monitoring-config.yaml")
userworkloadConfig := resource{"configmap", "user-workload-monitoring-config", "openshift-user-workload-monitoring"}
err = userworkloadConfig.applyFromTemplate(oc, "-n", userworkloadConfig.namespace, "-f", userWorkloadMConfigPath)
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | ||||
function | openshift/openshift-tests-private | ae776cd6-dbc5-4e76-82de-2f1cea703eca | deleteUserWorkloadManifests | ['"github.com/aws/aws-sdk-go-v2/config"', 'k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func deleteUserWorkloadManifests(oc *exutil.CLI) {
clusterMonitoringConfig := resource{"configmap", "cluster-monitoring-config", "openshift-monitoring"}
clusterMonitoringConfig.clear(oc)
userworkloadConfig := resource{"configmap", "user-workload-monitoring-config", "openshift-user-workload-monitoring"}
userworkloadConfig.clear(oc)
} | logging | ||||
function | openshift/openshift-tests-private | 63e3c552-ef41-49bb-a734-1fe6cc496be6 | validateCredentialsRequestGenerationOnSTS | ['"fmt"', '"cloud.google.com/go/storage"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func validateCredentialsRequestGenerationOnSTS(oc *exutil.CLI, lokiStackName, lokiNamespace string) {
exutil.By("Validate that Loki Operator creates a CredentialsRequest object")
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", lokiStackName, "-n", lokiNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cloudTokenPath, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", lokiStackName, "-n", lokiNamespace, `-o=jsonpath={.spec.cloudTokenPath}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cloudTokenPath).Should(o.Equal("/var/run/secrets/storage/serviceaccount/token"))
serviceAccountNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", lokiStackName, "-n", lokiNamespace, `-o=jsonpath={.spec.serviceAccountNames}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(serviceAccountNames).Should(o.Equal(fmt.Sprintf(`["%s","%s-ruler"]`, lokiStackName, lokiStackName)))
} | logging | ||||
function | openshift/openshift-tests-private | 10f4cbb4-eb82-445d-937d-4a188a9360d5 | validatesIfLogsArePushedToGCSBucket | ['"context"', '"strings"', '"time"', '"cloud.google.com/go/storage"', '"google.golang.org/api/iterator"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func validatesIfLogsArePushedToGCSBucket(bucketName string, tenants []string) {
// Create a new GCS client
client, err := storage.NewClient(context.Background())
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create GCS client")
// Get a reference to the bucket
bucket := client.Bucket(bucketName)
// Create a query to list objects in the bucket
query := &storage.Query{}
// List objects in the bucket and check for tenant object
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
itr := bucket.Objects(context.Background(), query)
for {
objAttrs, err := itr.Next()
if err == iterator.Done {
break
}
if err != nil {
return false, err
}
for _, tenantName := range tenants {
if strings.Contains(objAttrs.Name, tenantName) {
e2e.Logf("Logs %s found under the bucket: %s", objAttrs.Name, bucketName)
return true, nil
}
}
}
e2e.Logf("Waiting for data to be available under bucket: %s", bucketName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Timed out...No data is available under the bucket: "+bucketName)
} | logging | ||||
function | openshift/openshift-tests-private | 9d0f88ca-bf74-4ede-8ca6-e6636b358da7 | validateExternalObjectStorageForLogs | ['"context"', '"os"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | ['lokiStack'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l lokiStack) validateExternalObjectStorageForLogs(oc *exutil.CLI, tenants []string) {
switch l.storageType {
case "s3":
{
// For Amazon S3
var cfg aws.Config
if exutil.IsSTSCluster(oc) {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cfg = readDefaultSDKExternalConfigurations(context.TODO(), region)
} else {
cred := getAWSCredentialFromCluster(oc)
cfg = generateS3Config(cred)
}
s3Client := newS3Client(cfg)
validatesIfLogsArePushedToS3Bucket(s3Client, l.bucketName, tenants)
}
case "azure":
{
// For Azure Container Storage
var accountName string
var err error
_, storageAccountURISuffix := getStorageAccountURISuffixAndEnvForAzure(oc)
if exutil.IsSTSCluster(oc) {
accountName = os.Getenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT")
} else {
_, err = exutil.GetAzureCredentialFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
accountName, _, err = exutil.GetAzureStorageAccountFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
}
validatesIfLogsArePushedToAzureContainer(storageAccountURISuffix, accountName, l.bucketName, tenants)
}
case "gcs":
{
// For Google Cloud Storage Bucket
validatesIfLogsArePushedToGCSBucket(l.bucketName, tenants)
}
case "swift":
{
e2e.Logf("Currently swift is not supported")
// TODO swift code here
}
default:
{
e2e.Logf("Currently minio is not supported")
// TODO minio code here
}
}
} | logging | |||
function | openshift/openshift-tests-private | d0af5f89-6ece-4365-990f-b4a2c0a4d199 | createLokiClusterRolesForReadAccess | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createLokiClusterRolesForReadAccess(oc *exutil.CLI) {
rbacFile := exutil.FixturePath("testdata", "logging", "lokistack", "fine-grained-access-roles.yaml")
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", rbacFile).Output()
o.Expect(err).NotTo(o.HaveOccurred(), msg)
} | logging | |||||
function | openshift/openshift-tests-private | 4bd320f4-181d-4cdc-9daa-e66e4d7a7dc4 | deleteLokiClusterRolesForReadAccess | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func deleteLokiClusterRolesForReadAccess(oc *exutil.CLI) {
roles := []string{"cluster-logging-application-view", "cluster-logging-infrastructure-view", "cluster-logging-audit-view"}
for _, role := range roles {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrole", role).Output()
if err != nil {
e2e.Logf("Failed to delete Loki RBAC role '%s': %s", role, msg)
}
}
} | logging | |||||
function | openshift/openshift-tests-private | 0484e36f-5f4b-4d4f-8392-30247f69caba | patchLokiOperatorOnGCPSTSforCCO | ['"fmt"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func patchLokiOperatorOnGCPSTSforCCO(oc *exutil.CLI, namespace string, projectNumber string, poolID string, serviceAccount string) {
patchConfig := `{
"spec": {
"config": {
"env": [
{
"name": "PROJECT_NUMBER",
"value": "%s"
},
{
"name": "POOL_ID",
"value": "%s"
},
{
"name": "PROVIDER_ID",
"value": "%s"
},
{
"name": "SERVICE_ACCOUNT_EMAIL",
"value": "%s"
}
]
}
}
}`
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("patch").Args("sub", "loki-operator", "-n", namespace, "-p", fmt.Sprintf(patchConfig, projectNumber, poolID, poolID, serviceAccount), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
} | logging | ||||
test | openshift/openshift-tests-private | 1e915dee-58d4-4c7e-9682-7afd2e0e7457 | otlp | import (
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/otlp.go | package logging
import (
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease otlp output testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-otlp", exutil.KubeConfigPath())
loggingBaseDir string
)
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
exutil.By("deploy CLO")
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("Author:qitang-CPaasrunOnly-Critical-68961-Forward logs to OTEL collector", func() {
var (
expectedCSV string
operatorInstalled bool
)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", "openshift-operators", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
csvs := strings.Split(output, " ")
for _, csv := range csvs {
if strings.Contains(csv, "opentelemetry-operator.v") {
expectedCSV = csv
break
}
}
if len(expectedCSV) > 0 {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", "openshift-operators", expectedCSV, "-ojsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output == "Succeeded" {
operatorInstalled = true
}
}
if !operatorInstalled {
exutil.By("Deploy opentelemetry-operator")
otelOperator := SubscriptionObjects{
OperatorName: "opentelemetry-operator",
Namespace: "openshift-opentelemetry-operator",
PackageName: "opentelemetry-product",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: CatalogSourceObjects{
Channel: "stable",
},
OperatorPodLabel: "app.kubernetes.io/name=opentelemetry-operator",
}
defer otelOperator.uninstallOperator(oc)
otelOperator.SubscribeOperator(oc)
}
exutil.By("Deploy OTEL collector")
otelTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "otel", "otel-collector.yaml")
otel := resource{
kind: "opentelemetrycollectors",
name: "otel",
namespace: oc.Namespace(),
}
defer otel.clear(oc)
err = otel.applyFromTemplate(oc, "-f", otelTemplate, "-n", otel.namespace, "-p", "NAMESPACE="+otel.namespace, "-p", "NAME="+otel.name)
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, otel.namespace, "app.kubernetes.io/component=opentelemetry-collector")
svc := "http://" + otel.name + "-collector." + otel.namespace + ".svc:4318"
exutil.By("Deploy clusterlogforwarder")
clf := clusterlogforwarder{
name: "otlp-68961",
namespace: oc.Namespace(),
serviceAccountName: "logcollector-68961",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "otlp.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
defer clf.delete(oc)
clf.create(oc, "URL="+svc)
//exclude logs from project otel.namespace because the OTEL collector writes received logs to stdout
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"excludes": [{"namespace":"` + otel.namespace + `"}]}}]},{"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app", "infrastructure", "audit"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("check collector configurations")
expectedConfigs := []string{
"compression = \"gzip\"",
`[sinks.output_otlp.batch]
max_bytes = 10000000`,
`[sinks.output_otlp.buffer]
type = "disk"
when_full = "block"
max_size = 268435488`,
`[sinks.output_otlp.request]
retry_initial_backoff_secs = 5
retry_max_duration_secs = 20`,
}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
exutil.By("check log data in OTEL collector")
time.Sleep(1 * time.Minute)
otelCollector, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", otel.namespace, "pod", "-l", "app.kubernetes.io/component=opentelemetry-collector", "-ojsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
logs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", otel.namespace, otelCollector, "--tail=60").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(logs, "LogRecord")).Should(o.BeTrue())
})
//author: [email protected]
g.It("Author:qitang-CPaasrunOnly-ConnectedOnly-High-76728-Add stream info to data model OTEL[Serial][Slow]", func() {
s := getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ := getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
exutil.By("deploy Loki Operator")
LO.SubscribeOperator(oc)
multilineLogs := []string{
javaExc, complexJavaExc, nestedJavaExc,
goExc, goOnGaeExc, goSignalExc, goHTTP,
rubyExc, railsExc,
clientJsExc, nodeJsExc, v8JsExc,
csharpAsyncExc, csharpNestedExc, csharpExc,
pythonExc,
phpOnGaeExc, phpExc,
dartAbstractClassErr,
dartArgumentErr,
dartAssertionErr,
dartAsyncErr,
dartConcurrentModificationErr,
dartDivideByZeroErr,
dartErr,
dartTypeErr,
dartExc,
dartUnsupportedErr,
dartUnimplementedErr,
dartOOMErr,
dartRangeErr,
dartReadStaticErr,
dartStackOverflowErr,
dartFallthroughErr,
dartFormatErr,
dartFormatWithCodeErr,
dartNoMethodErr,
dartNoMethodGlobalErr,
}
exutil.By("Deploying LokiStack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-76727",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76727",
storageClass: sc,
bucketName: "logging-loki-76727-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
lokiGatewaySVC := "https://" + ls.name + "-gateway-http." + ls.namespace + ".svc:8080"
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "otlp-76727",
namespace: loggingNS,
serviceAccountName: "logcollector-76727",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "otlp-lokistack.yaml"),
secretName: "lokistack-secret-76727",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "URL="+lokiGatewaySVC)
exutil.By("create some pods to generate multiline errors")
multilineLogFile := filepath.Join(loggingBaseDir, "generatelog", "multiline-error-log.yaml")
ioStreams := []string{"stdout", "stderr"}
for _, ioStream := range ioStreams {
ns := "multiline-log-" + ioStream + "-76727"
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns, "deploy/multiline-log", "cm/multiline-log").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", ns, "-f", multilineLogFile, "-p", "OUT_STREAM="+ioStream, "-p", "RATE=60.00").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("checking app, infra and audit logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
for _, ioStream := range ioStreams {
lc.waitForLogsAppearByProject("application", "multiline-log-"+ioStream+"-76727")
dataInLoki, _ := lc.searchByNamespace("application", "multiline-log-"+ioStream+"-76727")
for _, log := range dataInLoki.Data.Result {
o.Expect(log.Stream.LogIOStream == ioStream).Should(o.BeTrue(), `iostream is wrong, expected: `+ioStream+`, got: `+log.Stream.LogIOStream)
for _, value := range log.Values {
message := convertInterfaceToArray(value)
o.Expect(containSubstring(multilineLogs, message[1])).Should(o.BeTrue(), fmt.Sprintf("Parse multiline error failed, iostream: %s, message: \n%s", ioStream, message[1]))
}
}
}
})
g.It("Author:qitang-CPaasrunOnly-Medium-75351-Tech preview annotation should be enabled when forwarding logs via otlp", func() {
exutil.By("Deploy collector pods")
clf := clusterlogforwarder{
name: "otlp-68961",
namespace: oc.Namespace(),
serviceAccountName: "logcollector-68961",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "otlp.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
defer clf.delete(oc)
clf.create(oc, "URL=http://fake-otel-collector."+clf.namespace+".svc:4318")
exutil.By("remove the tech-preview annotation from CLF")
patch := `[{"op": "remove", "path": "/metadata/annotations"}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, `output "otlp" requires a valid tech-preview annotation`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.outputConditions[*].message}"})
exutil.By("Add back the annotations, then set the value to disabled")
clf.update(oc, "", `{"metadata": {"annotations": {"observability.openshift.io/tech-preview-otlp-output": "enabled"}}}`, "--type=merge")
checkResource(oc, false, false, `output "otlp" requires a valid tech-preview annotation`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.outputConditions[*].message}"})
clf.update(oc, "", `{"metadata": {"annotations": {"observability.openshift.io/tech-preview-otlp-output": "disabled"}}}`, "--type=merge")
checkResource(oc, true, false, `output "otlp" requires a valid tech-preview annotation`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.outputConditions[*].message}"})
})
})
| package logging | ||||
test case | openshift/openshift-tests-private | fedc1bce-7fca-4f60-8360-f63bd6fbf646 | Author:qitang-CPaasrunOnly-Critical-68961-Forward logs to OTEL collector | ['"path/filepath"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/logging/otlp.go | g.It("Author:qitang-CPaasrunOnly-Critical-68961-Forward logs to OTEL collector", func() {
var (
expectedCSV string
operatorInstalled bool
)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", "openshift-operators", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
csvs := strings.Split(output, " ")
for _, csv := range csvs {
if strings.Contains(csv, "opentelemetry-operator.v") {
expectedCSV = csv
break
}
}
if len(expectedCSV) > 0 {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", "openshift-operators", expectedCSV, "-ojsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output == "Succeeded" {
operatorInstalled = true
}
}
if !operatorInstalled {
exutil.By("Deploy opentelemetry-operator")
otelOperator := SubscriptionObjects{
OperatorName: "opentelemetry-operator",
Namespace: "openshift-opentelemetry-operator",
PackageName: "opentelemetry-product",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: CatalogSourceObjects{
Channel: "stable",
},
OperatorPodLabel: "app.kubernetes.io/name=opentelemetry-operator",
}
defer otelOperator.uninstallOperator(oc)
otelOperator.SubscribeOperator(oc)
}
exutil.By("Deploy OTEL collector")
otelTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "otel", "otel-collector.yaml")
otel := resource{
kind: "opentelemetrycollectors",
name: "otel",
namespace: oc.Namespace(),
}
defer otel.clear(oc)
err = otel.applyFromTemplate(oc, "-f", otelTemplate, "-n", otel.namespace, "-p", "NAMESPACE="+otel.namespace, "-p", "NAME="+otel.name)
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, otel.namespace, "app.kubernetes.io/component=opentelemetry-collector")
svc := "http://" + otel.name + "-collector." + otel.namespace + ".svc:4318"
exutil.By("Deploy clusterlogforwarder")
clf := clusterlogforwarder{
name: "otlp-68961",
namespace: oc.Namespace(),
serviceAccountName: "logcollector-68961",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "otlp.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
defer clf.delete(oc)
clf.create(oc, "URL="+svc)
//exclude logs from project otel.namespace because the OTEL collector writes received logs to stdout
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"excludes": [{"namespace":"` + otel.namespace + `"}]}}]},{"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app", "infrastructure", "audit"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("check collector configurations")
expectedConfigs := []string{
"compression = \"gzip\"",
`[sinks.output_otlp.batch]
max_bytes = 10000000`,
`[sinks.output_otlp.buffer]
type = "disk"
when_full = "block"
max_size = 268435488`,
`[sinks.output_otlp.request]
retry_initial_backoff_secs = 5
retry_max_duration_secs = 20`,
}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
exutil.By("check log data in OTEL collector")
time.Sleep(1 * time.Minute)
otelCollector, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", otel.namespace, "pod", "-l", "app.kubernetes.io/component=opentelemetry-collector", "-ojsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
logs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", otel.namespace, otelCollector, "--tail=60").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(logs, "LogRecord")).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | c1014719-0c4a-441d-9565-55ffbd38f35b | Author:qitang-CPaasrunOnly-ConnectedOnly-High-76728-Add stream info to data model OTEL[Serial][Slow] | ['"fmt"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/otlp.go | g.It("Author:qitang-CPaasrunOnly-ConnectedOnly-High-76728-Add stream info to data model OTEL[Serial][Slow]", func() {
s := getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ := getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
exutil.By("deploy Loki Operator")
LO.SubscribeOperator(oc)
multilineLogs := []string{
javaExc, complexJavaExc, nestedJavaExc,
goExc, goOnGaeExc, goSignalExc, goHTTP,
rubyExc, railsExc,
clientJsExc, nodeJsExc, v8JsExc,
csharpAsyncExc, csharpNestedExc, csharpExc,
pythonExc,
phpOnGaeExc, phpExc,
dartAbstractClassErr,
dartArgumentErr,
dartAssertionErr,
dartAsyncErr,
dartConcurrentModificationErr,
dartDivideByZeroErr,
dartErr,
dartTypeErr,
dartExc,
dartUnsupportedErr,
dartUnimplementedErr,
dartOOMErr,
dartRangeErr,
dartReadStaticErr,
dartStackOverflowErr,
dartFallthroughErr,
dartFormatErr,
dartFormatWithCodeErr,
dartNoMethodErr,
dartNoMethodGlobalErr,
}
exutil.By("Deploying LokiStack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-76727",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76727",
storageClass: sc,
bucketName: "logging-loki-76727-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
lokiGatewaySVC := "https://" + ls.name + "-gateway-http." + ls.namespace + ".svc:8080"
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "otlp-76727",
namespace: loggingNS,
serviceAccountName: "logcollector-76727",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "otlp-lokistack.yaml"),
secretName: "lokistack-secret-76727",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "URL="+lokiGatewaySVC)
exutil.By("create some pods to generate multiline errors")
multilineLogFile := filepath.Join(loggingBaseDir, "generatelog", "multiline-error-log.yaml")
ioStreams := []string{"stdout", "stderr"}
for _, ioStream := range ioStreams {
ns := "multiline-log-" + ioStream + "-76727"
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns, "deploy/multiline-log", "cm/multiline-log").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", ns, "-f", multilineLogFile, "-p", "OUT_STREAM="+ioStream, "-p", "RATE=60.00").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("checking app, infra and audit logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
for _, ioStream := range ioStreams {
lc.waitForLogsAppearByProject("application", "multiline-log-"+ioStream+"-76727")
dataInLoki, _ := lc.searchByNamespace("application", "multiline-log-"+ioStream+"-76727")
for _, log := range dataInLoki.Data.Result {
o.Expect(log.Stream.LogIOStream == ioStream).Should(o.BeTrue(), `iostream is wrong, expected: `+ioStream+`, got: `+log.Stream.LogIOStream)
for _, value := range log.Values {
message := convertInterfaceToArray(value)
o.Expect(containSubstring(multilineLogs, message[1])).Should(o.BeTrue(), fmt.Sprintf("Parse multiline error failed, iostream: %s, message: \n%s", ioStream, message[1]))
}
}
}
}) | |||||
test case | openshift/openshift-tests-private | 96d8d752-4956-40e0-b422-17d94b74fa0d | Author:qitang-CPaasrunOnly-Medium-75351-Tech preview annotation should be enabled when forwarding logs via otlp | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/otlp.go | g.It("Author:qitang-CPaasrunOnly-Medium-75351-Tech preview annotation should be enabled when forwarding logs via otlp", func() {
exutil.By("Deploy collector pods")
clf := clusterlogforwarder{
name: "otlp-68961",
namespace: oc.Namespace(),
serviceAccountName: "logcollector-68961",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "otlp.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
defer clf.delete(oc)
clf.create(oc, "URL=http://fake-otel-collector."+clf.namespace+".svc:4318")
exutil.By("remove the tech-preview annotation from CLF")
patch := `[{"op": "remove", "path": "/metadata/annotations"}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, `output "otlp" requires a valid tech-preview annotation`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.outputConditions[*].message}"})
exutil.By("Add back the annotations, then set the value to disabled")
clf.update(oc, "", `{"metadata": {"annotations": {"observability.openshift.io/tech-preview-otlp-output": "enabled"}}}`, "--type=merge")
checkResource(oc, false, false, `output "otlp" requires a valid tech-preview annotation`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.outputConditions[*].message}"})
clf.update(oc, "", `{"metadata": {"annotations": {"observability.openshift.io/tech-preview-otlp-output": "disabled"}}}`, "--type=merge")
checkResource(oc, true, false, `output "otlp" requires a valid tech-preview annotation`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.outputConditions[*].message}"})
}) | |||||
file | openshift/openshift-tests-private | ed307ba7-61ac-49d3-ba5c-b8b70440b652 | types | import (
"encoding/xml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) | github.com/openshift/openshift-tests-private/test/extended/logging/types.go | package logging
import (
"encoding/xml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// SearchResult example
/*
{
"took" : 75,
"timed_out" : false,
"_shards" : {
"total" : 14,
"successful" : 14,
"skipped" : 0,
"failed" : 0
},
"hits" : {
"total" : 63767,
"max_score" : 1.0,
"hits" : [
{
"_index" : "app-centos-logtest-000001",
"_type" : "_doc",
"_id" : "ODlhMmYzZDgtMTc4NC00M2Q0LWIyMGQtMThmMGY3NTNlNWYw",
"_score" : 1.0,
"_source" : {
"kubernetes" : {
"container_image_id" : "quay.io/openshifttest/ocp-logtest@sha256:f23bea6f669d125f2f317e3097a0a4da48e8792746db32838725b45efa6c64a4",
"container_name" : "centos-logtest",
"namespace_id" : "c74f42bb-3407-418a-b483-d5f33e08f6a5",
"flat_labels" : [
"run=centos-logtest",
"test=centos-logtest"
],
"host" : "ip-10-0-174-131.us-east-2.compute.internal",
"master_url" : "https://kubernetes.default.svc",
"pod_id" : "242e7eb4-47ca-4708-9993-db0390d18268",
"namespace_labels" : {
"kubernetes_io/metadata_name" : "e2e-test--lg56q"
},
"container_image" : "quay.io/openshifttest/ocp-logtest@sha256:f23bea6f669d125f2f317e3097a0a4da48e8792746db32838725b45efa6c64a4",
"namespace_name" : "e2e-test--lg56q",
"pod_name" : "centos-logtest-vnwjn"
},
"viaq_msg_id" : "ODlhMmYzZDgtMTc4NC00M2Q0LWIyMGQtMThmMGY3NTNlNWYw",
"level" : "unknown",
"message" : "{\"message\": \"MERGE_JSON_LOG=true\", \"level\": \"debug\",\"Layer1\": \"layer1 0\", \"layer2\": {\"name\":\"Layer2 1\", \"tips\":\"Decide by PRESERVE_JSON_LOG\"}, \"StringNumber\":\"10\", \"Number\": 10,\"foo.bar\":\"Dot Item\",\"{foobar}\":\"Brace Item\",\"[foobar]\":\"Bracket Item\", \"foo:bar\":\"Colon Item\",\"foo bar\":\"Space Item\" }",
"docker" : {
"container_id" : "b3b84d9f11cefa8abf335e8257e394414133b853dc65c8bc1d50120fc3f86da5"
},
"hostname" : "ip-10-0-174-131.us-east-2.compute.internal",
"@timestamp" : "2021-07-09T01:57:44.400169+00:00",
"pipeline_metadata" : {
"collector" : {
"received_at" : "2021-07-09T01:57:44.688935+00:00",
"name" : "fluentd",
"inputname" : "fluent-plugin-systemd",
"version" : "1.7.4 1.6.0",
"ipaddr4" : "10.0.174.131"
}
},
"structured" : {
"foo:bar" : "Colon Item",
"foo.bar" : "Dot Item",
"Number" : 10,
"level" : "debug",
"{foobar}" : "Brace Item",
"foo bar" : "Space Item",
"StringNumber" : "10",
"layer2" : {
"name" : "Layer2 1",
"tips" : "Decide by PRESERVE_JSON_LOG"
},
"message" : "MERGE_JSON_LOG=true",
"Layer1" : "layer1 0",
"[foobar]" : "Bracket Item"
}
}
}
]
}
}
*/
type SearchResult struct {
Took int64 `json:"took"`
TimedOut bool `json:"timed_out"`
Shards struct {
Total int64 `json:"total"`
Successful int64 `json:"successful"`
Skipped int64 `json:"skipped"`
Failed int64 `json:"failed"`
} `json:"_shards"`
Hits struct {
Total int64 `json:"total"`
MaxScore float32 `json:"max_score"`
DataHits []struct {
Index string `json:"_index"`
Type string `json:"_type"`
ID string `json:"_id"`
Score float32 `json:"_score"`
Source LogEntity `json:"_source"`
} `json:"hits"`
} `json:"hits"`
Aggregations struct {
LoggingAggregations struct {
DocCount int64 `json:"doc_count,omitempty"`
InnerAggregations struct {
DocCountErrorUpperBound int64 `json:"doc_count_error_upper_bound,omitempty"`
SumOtherDocCount int64 `json:"sum_other_doc_count,omitempty"`
Buckets []struct {
Key string `json:"key,omitempty"`
DocCount int64 `json:"doc_count,omitempty"`
} `json:"buckets,omitempty"`
} `json:"inner_aggregations,omitempty"`
} `json:"logging_aggregations,omitempty"`
} `json:"aggregations,omitempty"`
}
/*
The aggregation query string must be set as:
{
"aggs" : {
"logging_aggregations": {
"filter": {
"exists": {
"field":"kubernetes"
}
},
"aggs" : {
"inner_aggregations": {
"terms" : {
"field" : "hostname"
}
}
}
}
}
}
AggregationResult example
{
"aggregations": {
"logging_aggregations": {
"doc_count": 13089,
"inner_aggregations": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "ip-10-0-202-141",
"doc_count": 3250
},
{
"key": "ip-10-0-147-235",
"doc_count": 3064
},
{
"key": "ip-10-0-210-50",
"doc_count": 2515
},
{
"key": "ip-10-0-167-109",
"doc_count": 1832
},
{
"key": "ip-10-0-186-71",
"doc_count": 1321
},
{
"key": "ip-10-0-143-89",
"doc_count": 1107
}
]
}
}
}
}
*/
// LogEntity the entity of log data
type LogEntity struct {
Kubernetes struct {
Annotations map[string]string `json:"annotations,omitempty"`
ContainerID string `json:"container_id,omitempty"`
ContainerImage string `json:"container_image"`
ContainerImageID string `json:"container_image_id,omitempty"`
ContainerIOStream string `json:"container_iostream,omitempty"`
ContainerName string `json:"container_name"`
FlatLabels []string `json:"flat_labels"`
Host string `json:"host"`
Lables map[string]string `json:"labels,omitempty"`
MasterURL string `json:"master_url,omitempty"`
NamespaceID string `json:"namespace_id"`
NamespaceLabels map[string]string `json:"namespace_labels,omitempty"`
NamespaceName string `json:"namespace_name"`
PodID string `json:"pod_id"`
PodIP string `json:"pod_ip,omitempty"`
PodName string `json:"pod_name"`
PodOwner string `json:"pod_owner"`
} `json:"kubernetes,omitempty"`
Systemd struct {
SystemdT struct {
SystemdInvocationID string `json:"SYSTEMD_INVOCATION_ID"`
BootID string `json:"BOOT_ID"`
GID string `json:"GID"`
CmdLine string `json:"CMDLINE"`
PID string `json:"PID"`
SystemSlice string `json:"SYSTEMD_SLICE"`
SelinuxContext string `json:"SELINUX_CONTEXT"`
UID string `json:"UID"`
StreamID string `json:"STREAM_ID"`
Transport string `json:"TRANSPORT"`
Comm string `json:"COMM"`
EXE string
SystemdUnit string `json:"SYSTEMD_UNIT"`
CapEffective string `json:"CAP_EFFECTIVE"`
MachineID string `json:"MACHINE_ID"`
SystemdCgroup string `json:"SYSTEMD_CGROUP"`
} `json:"t"`
SystemdU struct {
SyslogIdntifier string `json:"SYSLOG_IDENTIFIER"`
SyslogFacility string `json:"SYSLOG_FACILITY"`
} `json:"u"`
} `json:"systemd,omitempty"`
ViaqMsgID string `json:"viaq_msg_id,omitempty"`
Level string `json:"level"`
LogSource string `json:"log_source"`
LogType string `json:"log_type,omitempty"`
Message string `json:"message"`
Docker struct {
ContainerID string `json:"container_id"`
} `json:"docker,omitempty"`
HostName string `json:"hostname"`
TimeStamp string `json:"@timestamp"`
File string `json:"file,omitempty"`
OpenShift struct {
ClusterID string `json:"cluster_id,omitempty"`
Sequence int64 `json:"sequence"`
Labels map[string]string `json:"labels,omitempty"`
} `json:"openshift,omitempty"`
PipelineMetadata struct {
Collector struct {
ReceivedAt string `json:"received_at"`
Name string `json:"name"`
InputName string `json:"inputname"`
Version string `json:"version"`
IPaddr4 string `json:"ipaddr4"`
} `json:"collector"`
} `json:"pipeline_metadata,omitempty"`
Structured struct {
Level string `json:"level,omitempty"`
StringNumber string `json:"StringNumber,omitempty"`
Message string `json:"message,omitempty"`
Number int `json:"Number,omitempty"`
Layer1 string `json:"Layer1,omitempty"`
FooColonBar string `json:"foo:bar,omitempty"`
FooDotBar string `json:"foo.bar,omitempty"`
BraceItem string `json:"{foobar},omitempty"`
BracketItem string `json:"[foobar],omitempty"`
Layer2 struct {
Name string `json:"name,omitempty"`
Tips string `json:"tips,omitempty"`
} `json:"layer2,omitempty"`
} `json:"structured,omitempty"`
}
// CountResult example
/*
{
"count" : 453558,
"_shards" : {
"total" : 39,
"successful" : 39,
"skipped" : 0,
"failed" : 0
}
}
*/
type CountResult struct {
Count int64 `json:"count"`
Shards struct {
Total int64 `json:"total"`
Successful int64 `json:"successful"`
Skipped int64 `json:"skipped"`
Failed int64 `json:"failed"`
} `json:"_shards"`
}
// ESIndex example
/*
{
"health": "green",
"status": "open",
"index": "infra-000015",
"uuid": "uHqlf91RQAqit072gI9LaA",
"pri": "3",
"rep": "1",
"docs.count": "37323",
"docs.deleted": "0",
"store.size": "58.8mb",
"pri.store.size": "29.3mb"
}
*/
type ESIndex struct {
Health string `json:"health"`
Status string `json:"status"`
Index string `json:"index"`
UUID string `json:"uuid"`
PrimaryCount string `json:"pri"`
ReplicaCount string `json:"rep"`
DocsCount string `json:"docs.count"`
DocsDeleted string `json:"docs.deleted"`
StoreSize string `json:"store.size"`
PriStoreSize string `json:"pri.store.size"`
}
// PackageManifest gets the status filed of a packagemanifest
type PackageManifest struct {
metav1.ObjectMeta `json:"metadata"`
Status struct {
CatalogSource string `json:"catalogSource"`
CatalogSourceNamespace string `json:"catalogSourceNamespace"`
Channels []struct {
CurrentCSV string `json:"currentCSV"`
Name string `json:"name"`
} `json:"channels"`
DefaultChannel string `json:"defaultChannel"`
} `json:"status"`
}
// OperatorHub gets the status field of an operatorhub object
type OperatorHub struct {
Status struct {
Sources []struct {
Disabled bool `json:"disabled"`
Name string `json:"name"`
Status string `json:"status"`
} `json:"sources"`
} `json:"status"`
}
//LokiLogQuery result example
/*
{
"status": "success",
"data": {
"resultType": "streams",
"result": [{
"stream": {
"kubernetes_container_name": "centos-logtest",
"kubernetes_host": "ip-10-0-161-168.us-east-2.compute.internal",
"kubernetes_namespace_name": "test",
"kubernetes_pod_name": "centos-logtest-qt6pz",
"log_type": "application",
"tag": "kubernetes.var.log.containers.centos-logtest-qt6pz_test_centos-logtest-da3cf8c0493625dc4f42c8592954aad95f3f4ce2a2098ab97ab6a4ad58276617.log",
"fluentd_thread": "flush_thread_0"
},
"values": [
["1637005525936482085", "{\"docker\":{\"container_id\":\"da3cf8c0493625dc4f42c8592954aad95f3f4ce2a2098ab97ab6a4ad58276617\"},\"kubernetes\":{\"container_name\":\"centos-logtest\",\"namespace_name\":\"test\",\"pod_name\":\"centos-logtest-qt6pz\",\"container_image\":\"quay.io/openshifttest/ocp-logtest@sha256:f23bea6f669d125f2f317e3097a0a4da48e8792746db32838725b45efa6c64a4\",\"container_image_id\":\"quay.io/openshifttest/ocp-logtest@sha256:f23bea6f669d125f2f317e3097a0a4da48e8792746db32838725b45efa6c64a4\",\"pod_id\":\"d77cae4f-2b8a-4c30-8142-417756aa3daf\",\"pod_ip\":\"10.129.2.66\",\"host\":\"ip-10-0-161-168.us-east-2.compute.internal\",\"labels\":{\"run\":\"centos-logtest\",\"test\":\"centos-logtest\"},\"master_url\":\"https://kubernetes.default.svc\",\"namespace_id\":\"18a06953-fdca-4760-b265-a4ef9b98128e\",\"namespace_labels\":{\"kubernetes_io/metadata_name\":\"test\"}},\"message\":\"{\\\"message\\\": \\\"MERGE_JSON_LOG=true\\\", \\\"level\\\": \\\"debug\\\",\\\"Layer1\\\": \\\"layer1 0\\\", \\\"layer2\\\": {\\\"name\\\":\\\"Layer2 1\\\", \\\"tips\\\":\\\"Decide by PRESERVE_JSON_LOG\\\"}, \\\"StringNumber\\\":\\\"10\\\", \\\"Number\\\": 10,\\\"foo.bar\\\":\\\"Dot Item\\\",\\\"{foobar}\\\":\\\"Brace Item\\\",\\\"[foobar]\\\":\\\"Bracket Item\\\", \\\"foo:bar\\\":\\\"Colon Item\\\",\\\"foo bar\\\":\\\"Space Item\\\" }\",\"level\":\"unknown\",\"hostname\":\"ip-10-0-161-168.us-east-2.compute.internal\",\"pipeline_metadata\":{\"collector\":{\"ipaddr4\":\"10.0.161.168\",\"inputname\":\"fluent-plugin-systemd\",\"name\":\"fluentd\",\"received_at\":\"2021-11-15T19:45:26.753126+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-11-15T19:45:25.936482+00:00\",\"viaq_index_name\":\"app-write\",\"viaq_msg_id\":\"NmM5YWIyOGMtM2M4MS00MTFkLWJjNjEtZGIxZDE4MWViNzk0\",\"log_type\":\"application\"}"]
]
}, {
"stream": {
"kubernetes_host": "ip-10-0-161-168.us-east-2.compute.internal",
"kubernetes_namespace_name": "test",
"kubernetes_pod_name": "centos-logtest-qt6pz",
"log_type": "application",
"tag": "kubernetes.var.log.containers.centos-logtest-qt6pz_test_centos-logtest-da3cf8c0493625dc4f42c8592954aad95f3f4ce2a2098ab97ab6a4ad58276617.log",
"fluentd_thread": "flush_thread_1",
"kubernetes_container_name": "centos-logtest"
},
"values": [
["1637005500907904677", "{\"docker\":{\"container_id\":\"da3cf8c0493625dc4f42c8592954aad95f3f4ce2a2098ab97ab6a4ad58276617\"},\"kubernetes\":{\"container_name\":\"centos-logtest\",\"namespace_name\":\"test\",\"pod_name\":\"centos-logtest-qt6pz\",\"container_image\":\"quay.io/openshifttest/ocp-logtest@sha256:f23bea6f669d125f2f317e3097a0a4da48e8792746db32838725b45efa6c64a4\",\"container_image_id\":\"quay.io/openshifttest/ocp-logtest@sha256:f23bea6f669d125f2f317e3097a0a4da48e8792746db32838725b45efa6c64a4\",\"pod_id\":\"d77cae4f-2b8a-4c30-8142-417756aa3daf\",\"pod_ip\":\"10.129.2.66\",\"host\":\"ip-10-0-161-168.us-east-2.compute.internal\",\"labels\":{\"run\":\"centos-logtest\",\"test\":\"centos-logtest\"},\"master_url\":\"https://kubernetes.default.svc\",\"namespace_id\":\"18a06953-fdca-4760-b265-a4ef9b98128e\",\"namespace_labels\":{\"kubernetes_io/metadata_name\":\"test\"}},\"message\":\"{\\\"message\\\": \\\"MERGE_JSON_LOG=true\\\", \\\"level\\\": \\\"debug\\\",\\\"Layer1\\\": \\\"layer1 0\\\", \\\"layer2\\\": {\\\"name\\\":\\\"Layer2 1\\\", \\\"tips\\\":\\\"Decide by PRESERVE_JSON_LOG\\\"}, \\\"StringNumber\\\":\\\"10\\\", \\\"Number\\\": 10,\\\"foo.bar\\\":\\\"Dot Item\\\",\\\"{foobar}\\\":\\\"Brace Item\\\",\\\"[foobar]\\\":\\\"Bracket Item\\\", \\\"foo:bar\\\":\\\"Colon Item\\\",\\\"foo bar\\\":\\\"Space Item\\\" }\",\"level\":\"unknown\",\"hostname\":\"ip-10-0-161-168.us-east-2.compute.internal\",\"pipeline_metadata\":{\"collector\":{\"ipaddr4\":\"10.0.161.168\",\"inputname\":\"fluent-plugin-systemd\",\"name\":\"fluentd\",\"received_at\":\"2021-11-15T19:45:01.753261+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-11-15T19:45:00.907904+00:00\",\"viaq_index_name\":\"app-write\",\"viaq_msg_id\":\"Yzc1MmJkZDQtNzk4NS00NzA5LWFkN2ItNTlmZmE3NTgxZmUy\",\"log_type\":\"application\"}"]
]
}],
"stats": {
"summary": {
"bytesProcessedPerSecond": 48439028,
"linesProcessedPerSecond": 39619,
"totalBytesProcessed": 306872,
"totalLinesProcessed": 251,
"execTime": 0.006335222
},
"store": {
"totalChunksRef": 0,
"totalChunksDownloaded": 0,
"chunksDownloadTime": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"compressedBytes": 0,
"totalDuplicates": 0
},
"ingester": {
"totalReached": 1,
"totalChunksMatched": 2,
"totalBatches": 1,
"totalLinesSent": 28,
"headChunkBytes": 41106,
"headChunkLines": 85,
"decompressedBytes": 265766,
"decompressedLines": 166,
"compressedBytes": 13457,
"totalDuplicates": 0
}
}
}
}
*/
// OTEL data module
/*
{
"status": "success",
"data": {
"resultType": "streams",
"result": [
{
"stream": {
"detected_level": "debug",
"k8s_container_name": "logging-centos-logtest",
"k8s_namespace_name": "e2e-test-vector-otlp-pzpdm",
"k8s_node_name": "ip-10-0-70-97.us-east-2.compute.internal",
"k8s_pod_label_run": "centos-logtest",
"k8s_pod_label_test": "centos-logtest",
"k8s_pod_name": "logging-centos-logtest-9bbn2",
"k8s_pod_uid": "de4948fe-07bb-42c2-986b-227a24f38a8b",
"kubernetes_container_name": "logging-centos-logtest",
"kubernetes_host": "ip-10-0-70-97.us-east-2.compute.internal",
"kubernetes_namespace_name": "e2e-test-vector-otlp-pzpdm",
"kubernetes_pod_name": "logging-centos-logtest-9bbn2",
"log_iostream": "stdout",
"log_source": "container",
"log_type": "application",
"observed_timestamp": "1730165206237598776",
"openshift_cluster_id": "de026959-72d3-4924-ada8-d6f935c0cdf7",
"openshift_cluster_uid": "de026959-72d3-4924-ada8-d6f935c0cdf7",
"openshift_log_source": "container",
"openshift_log_type": "application",
"severity_text": "default"
},
"values": [
[
"1730165205734904307",
"{\"message\": \"MERGE_JSON_LOG=true\", \"level\": \"debug\",\"Layer1\": \"layer1 0\", \"layer2\": {\"name\":\"Layer2 1\", \"tips\":\"Decide by PRESERVE_JSON_LOG\"}, \"StringNumber\":\"10\", \"Number\": 10,\"foo.bar\":\"Dot Item\",\"{foobar}\":\"Brace Item\",\"[foobar]\":\"Bracket Item\", \"foo:bar\":\"Colon Item\",\"foo bar\":\"Space Item\" }"
]
]
}
]
}
}
*/
type lokiQueryResponse struct {
Status string `json:"status"`
Data struct {
ResultType string `json:"resultType"`
Result []struct {
Stream *struct {
DetectedLevel string `json:"detected_level,omitempty"`
K8sContainerName string `json:"k8s_container_name,omitempty"`
K8sNamespaceName string `json:"k8s_namespace_name,omitempty"`
K8sNodeName string `json:"k8s_node_name,omitempty"`
K8sPodName string `json:"k8s_pod_name,omitempty"`
K8sPodUID string `json:"k8s_pod_uid,omitempty"`
LogType string `json:"log_type,omitempty"`
Tag string `json:"tag,omitempty"`
FluentdThread string `json:"fluentd_thread,omitempty"`
KubernetesContainerName string `json:"kubernetes_container_name,omitempty"`
KubernetesHost string `json:"kubernetes_host,omitempty"`
KubernetesNamespaceName string `json:"kubernetes_namespace_name,omitempty"`
KubernetesPodName string `json:"kubernetes_pod_name,omitempty"`
LogIOStream string `json:"log_iostream,omitempty"`
LogSource string `json:"log_source,omitempty"`
ObservedTimestamp string `json:"observed_timestamp,omitempty"`
OpenshiftClusterID string `json:"openshift_cluster_id,omitempty"`
OpenshiftClusterUID string `json:"openshift_cluster_uid,omitempty"`
OpenshiftLogSource string `json:"openshift_log_source,omitempty"`
OpenshiftLogType string `json:"openshift_log_type,omitempty"`
SeverityText string `json:"severity_text,omitempty"`
} `json:"stream,omitempty"`
Metric *struct {
LogType string `json:"log_type,omitempty"`
KubernetesContainerName string `json:"kubernetes_container_name,omitempty"`
KubernetesHost string `json:"kubernetes_host,omitempty"`
KubernetesNamespaceName string `json:"kubernetes_namespace_name,omitempty"`
KubernetesPodName string `json:"kubernetes_pod_name,omitempty"`
} `json:"metric,omitempty"`
Values []interface{} `json:"values,omitempty"`
Value interface{} `json:"value,omitempty"`
} `json:"result"`
Stats struct {
Summary struct {
BytesProcessedPerSecond int `json:"bytesProcessedPerSecond"`
LinesProcessedPerSecond int `json:"linesProcessedPerSecond"`
TotalBytesProcessed int `json:"totalBytesProcessed"`
TotalLinesProcessed int `json:"totalLinesProcessed"`
ExecTime float32 `json:"execTime"`
} `json:"summary"`
Store struct {
TotalChunksRef int `json:"totalChunksRef"`
TotalChunksDownloaded int `json:"totalChunksDownloaded"`
ChunksDownloadTime int `json:"chunksDownloadTime"`
HeadChunkBytes int `json:"headChunkBytes"`
HeadChunkLines int `json:"headChunkLines"`
DecompressedBytes int `json:"decompressedBytes"`
DecompressedLines int `json:"decompressedLines"`
CompressedBytes int `json:"compressedBytes"`
TotalDuplicates int `json:"totalDuplicates"`
} `json:"store"`
Ingester struct {
TotalReached int `json:"totalReached"`
TotalChunksMatched int `json:"totalChunksMatched"`
TotalBatches int `json:"totalBatches"`
TotalLinesSent int `json:"totalLinesSent"`
HeadChunkBytes int `json:"headChunkBytes"`
HeadChunkLines int `json:"headChunkLines"`
DecompressedBytes int `json:"decompressedBytes"`
DecompressedLines int `json:"decompressedLines"`
CompressedBytes int `json:"compressedBytes"`
TotalDuplicates int `json:"totalDuplicates"`
} `json:"ingester"`
} `json:"stats"`
} `json:"data"`
}
//labelResponse result example
/*
{
"status": "success",
"data": ["__name__", "fluentd_thread", "kubernetes_container_name", "kubernetes_host", "kubernetes_namespace_name", "kubernetes_pod_name", "log_type", "tag"]
}
*/
type labelResponse struct {
SearchStatus string `json:"status"`
Data []string `json:"data"`
}
// prometheusQueryResult the response of querying prometheus APIs
type prometheusQueryResult struct {
Data struct {
Result []metric `json:"result"`
ResultType string `json:"resultType"`
Alerts []alert `json:"alerts,omitempty"`
} `json:"data"`
Status string `json:"status"`
}
// metric the prometheus metric
type metric struct {
Metric struct {
Name string `json:"__name__"`
Cluster string `json:"cluster,omitempty"`
Container string `json:"container,omitempty"`
ContainerName string `json:"containername,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Instance string `json:"instance,omitempty"`
Job string `json:"job,omitempty"`
Namespace string `json:"namespace,omitempty"`
Path string `json:"path,omitempty"`
Pod string `json:"pod,omitempty"`
PodName string `json:"podname,omitempty"`
Service string `json:"service,omitempty"`
ExportedNamespace string `json:"exported_namespace,omitempty"`
State string `json:"state,omitempty"`
} `json:"metric"`
Value []interface{} `json:"value"`
}
// alert the pending/firing alert
type alert struct {
Labels struct {
AlertName string `json:"alertname,omitempty"`
Condition string `json:"condition,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Namespace string `json:"namespace,omitempty"`
Pod string `json:"pod,omitempty"`
Instance string `json:"instance,omitempty"`
Severity string `json:"severity,omitempty"`
} `json:"labels,omitempty"`
Annotations struct {
Message string `json:"message,omitempty"`
RunBookURL string `json:"runbook_url,omitempty"`
Summary string `json:"summary,omitempty"`
} `json:"annotations,omitempty"`
State string `json:"state,omitempty"`
ActiveAt string `json:"activeAt,omitempty"`
Value string `json:"value,omitempty"`
}
// The splunkPod search which deploy on same Openshift Server
type splunkPodServer struct {
name string // The splunk name, default: splunk-s1-standalone
namespace string // The namespace where splunk is deployed in, default: splunk-aosqe
authType string // http(insecure http),tls_mutual,tls_serveronly. Note: when authType==http, you can still access splunk via https://${splunk_route}
version string // The splunk version: 8.2 or 9.0, default: 9.0
hecToken string // hec_token
adminUser string // admin user
adminPassword string // admin password
serviceName string // http service name
serviceURL string // http service URL
hecRoute string // hec route
webRoute string // web route
splunkdRoute string // splunkd route
caFile string // The ca File
keyFile string // The Key File
certFile string // The cert File
passphrase string // The passphase
}
// The secret used in CLF to splunk server
type toSplunkSecret struct {
name string // The secret name
namespace string // The namespace where secret will be created
hecToken string // The Splunk hec_token
caFile string // The collector ca_file
keyFile string // The collector Key File
certFile string // The collector cert File
passphrase string // The passphase for the collect key
}
// The splunk response for a search request. It includes batch id which can be used to fetch log records
type splunkSearchResp struct {
XMLName xml.Name `xml:"response"`
Sid string `xml:"sid"`
}
// The log Record in splunk server which is sent out by collector
type splunkLogRecord struct {
Bkt string `json:"_bkt"`
Cd string `json:"_cd"`
IndexTime string `json:"_indextime"`
Raw string `json:"_raw"`
Serial string `json:"_serial"`
Si []string `json:"_si"`
TagSourceType string `json:"_sourcetype"`
SubSecond string `json:"_subsecond"`
Time string `json:"_time"`
Host string `json:"host"`
Index string `json:"index"`
LineCount string `json:"lincount"`
LogType string `json:"log_type"`
Source string `json:"source"`
SourceType string `json:"souretype"`
SplunkServer string `json:"splunk_sever"`
}
// The splunk search result
type splunkSearchResult struct {
Preview bool `json:"preview"`
InitOffset float64 `json:"init_offset"`
Fields []interface{} `json:"fields"`
Messages []interface{} `json:"messages"`
Results []splunkLogRecord `json:"results"`
}
/* runtime-config.yaml for Loki when overriding spec's in LokiStack CR
---
overrides:
application:
ingestion_rate_mb: 10
ingestion_burst_size_mb: 6
max_label_name_length: 1024
max_label_value_length: 2048
max_label_names_per_series: 30
max_line_size: 256000
per_stream_rate_limit: 3MB
per_stream_rate_limit_burst: 15MB
max_entries_limit_per_query: 5000
max_chunks_per_query: 2000000
max_query_series: 500
query_timeout: 3m
cardinality_limit: 100000
retention_period: 1d
retention_stream:
- selector: '{kubernetes_namespace_name=~"test.+"}'
priority: 1
period: 1d
ruler_alertmanager_config:
alertmanager_url: https://_web._tcp.alertmanager-operated.openshift-user-workload-monitoring.svc
enable_alertmanager_v2: true
enable_alertmanager_discovery: true
alertmanager_refresh_interval: 1m
alertmanager_client:
tls_ca_path: /var/run/ca/alertmanager/service-ca.crt
tls_server_name: alertmanager-user-workload.openshift-user-workload-monitoring.svc.cluster.local
type: Bearer
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
audit:
ingestion_rate_mb: 20
ingestion_burst_size_mb: 6
max_label_name_length: 1024
max_label_value_length: 2048
max_label_names_per_series: 30
max_line_size: 256000
per_stream_rate_limit: 3MB
per_stream_rate_limit_burst: 15MB
max_entries_limit_per_query: 5000
max_chunks_per_query: 2000000
max_query_series: 500
query_timeout: 3m
cardinality_limit: 100000
retention_period: 1d
retention_stream:
- selector: '{kubernetes_namespace_name=~"openshift-logging.+"}'
priority: 1
period: 10d
infrastructure:
ingestion_rate_mb: 15
ingestion_burst_size_mb: 6
max_label_name_length: 1024
max_label_value_length: 2048
max_label_names_per_series: 30
max_line_size: 256000
per_stream_rate_limit: 3MB
per_stream_rate_limit_burst: 15MB
max_entries_limit_per_query: 5000
max_chunks_per_query: 2000000
max_query_series: 500
query_timeout: 3m
cardinality_limit: 100000
retention_period: 5d
retention_stream:
- selector: '{kubernetes_namespace_name=~"openshift-cluster.+"}'
priority: 1
period: 1d
*/
type RuntimeConfig struct {
Overrides *Overrides `yaml:"overrides,omitempty"`
}
type Overrides struct {
Application *OverridesConfig `yaml:"application,omitempty"`
Audit *OverridesConfig `yaml:"audit,omitempty"`
Infrastructure *OverridesConfig `yaml:"infrastructure,omitempty"`
}
type RetentionStream struct {
Selector string `yaml:"selector"`
Priority *int `yaml:"priority,omitempty"`
Period string `yaml:"period"`
}
type RulerAlertmanagerConfig struct {
AlertmanagerURL string `yaml:"alertmanager_url"`
EnableAlertmanagerV2 bool `yaml:"enable_alertmanager_v2"`
EnableAlertmanagerDiscovery bool `yaml:"enable_alertmanager_discovery"`
AlertmanagerRefreshInterval string `yaml:"alertmanager_refresh_interval"`
AlertmanagerClient AlertmanagerClient `yaml:"alertmanager_client"`
}
type AlertmanagerClient struct {
TLSCaPath string `yaml:"tls_ca_path"`
TLSServerName string `yaml:"tls_server_name"`
Type string `yaml:"type"`
CredentialsFile string `yaml:"credentials_file"`
}
type OverridesConfig struct {
IngestionRateMb *int `yaml:"ingestion_rate_mb,omitempty"`
IngestionBurstSizeMb *int `yaml:"ingestion_burst_size_mb,omitempty"`
MaxLabelNameLength *int `yaml:"max_label_name_length,omitempty"`
MaxLabelValueLength *int `yaml:"max_label_value_length,omitempty"`
MaxLabelNamesPerSeries *int `yaml:"max_label_names_per_series,omitempty"`
MaxLineSize *int `yaml:"max_line_size,omitempty"`
MaxGlobalStreamsPerUser *int `yaml:"max_global_streams_per_user,omitempty"`
PerStreamRateLimit *string `yaml:"per_stream_rate_limit,omitempty"`
PerStreamRateLimitBurst *string `yaml:"per_stream_rate_limit_burst,omitempty"`
MaxEntriesLimitPerQuery *int `yaml:"max_entries_limit_per_query,omitempty"`
MaxChunksPerQuery *int `yaml:"max_chunks_per_query,omitempty"`
MaxQuerySeries *int `yaml:"max_query_series,omitempty"`
QueryTimeout *string `yaml:"query_timeout,omitempty"`
CardinalityLimit *int `yaml:"cardinality_limit,omitempty"`
RetentionPeriod *string `yaml:"retention_period,omitempty"`
RetentionStream *[]RetentionStream `yaml:"retention_stream,omitempty"`
RulerAlertmanagerConfig *RulerAlertmanagerConfig `yaml:"ruler_alertmanager_config,omitempty"`
ShardStreams struct {
Enabled bool `yaml:"enabled"`
DesiredRate string `yaml:"desired_rate"`
} `yaml:"shard_streams"`
OtlpConfig OtlpConfig `yaml:"otlp_config,omitempty"`
}
/*
Loki Schema Config
schema_config:
configs:
- from: "2023-10-15"
index:
period: 24h
prefix: index_
object_store: s3
schema: v13
store: tsdb
*/
type StorageSchemaConfig struct {
SchemaConfig SchemaConfig `yaml:"schema_config"`
}
type SchemaConfig struct {
Configs []ConfigEntry `yaml:"configs"`
}
type ConfigEntry struct {
From string `yaml:"from"`
Index Index `yaml:"index"`
ObjectStore string `yaml:"object_store"`
Schema string `yaml:"schema"`
Store string `yaml:"store"`
}
type Index struct {
Period string `yaml:"period"`
Prefix string `yaml:"prefix"`
}
/*
Loki limits config
*/
type LokiLimitsConfig struct {
LimitsConfig LimitsConfig `yaml:"limits_config"`
}
type LimitsConfig struct {
IngestionRateStrategy string `yaml:"ingestion_rate_strategy"`
IngestionRateMB int `yaml:"ingestion_rate_mb"`
IngestionBurstSizeMB int `yaml:"ingestion_burst_size_mb"`
MaxLabelNameLength int `yaml:"max_label_name_length"`
MaxLabelValueLength int `yaml:"max_label_value_length"`
MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series"`
RejectOldSamples bool `yaml:"reject_old_samples"`
RejectOldSamplesMaxAge string `yaml:"reject_old_samples_max_age"`
CreationGracePeriod string `yaml:"creation_grace_period"`
MaxStreamsPerUser int `yaml:"max_streams_per_user"`
MaxLineSize int `yaml:"max_line_size"`
MaxEntriesLimitPerQuery int `yaml:"max_entries_limit_per_query"`
DiscoverServiceName []string `yaml:"discover_service_name"`
DiscoverLogLevels bool `yaml:"discover_log_levels"`
MaxGlobalStreamsPerUser int `yaml:"max_global_streams_per_user"`
MaxChunksPerQuery int `yaml:"max_chunks_per_query"`
MaxQueryLength string `yaml:"max_query_length"`
MaxQueryParallelism int `yaml:"max_query_parallelism"`
TsdbMaxQueryParallelism int `yaml:"tsdb_max_query_parallelism"`
MaxQuerySeries int `yaml:"max_query_series"`
CardinalityLimit int `yaml:"cardinality_limit"`
MaxStreamsMatchersPerQuery int `yaml:"max_streams_matchers_per_query"`
QueryTimeout string `yaml:"query_timeout"`
VolumeEnabled bool `yaml:"volume_enabled"`
VolumeMaxSeries int `yaml:"volume_max_series"`
RetentionPeriod string `yaml:"retention_period"`
RetentionStream []RetentionStream `yaml:"retention_stream"`
MaxCacheFreshnessPerQuery string `yaml:"max_cache_freshness_per_query"`
PerStreamRateLimit string `yaml:"per_stream_rate_limit"`
PerStreamRateLimitBurst string `yaml:"per_stream_rate_limit_burst"`
SplitQueriesByInterval string `yaml:"split_queries_by_interval"`
ShardStreams struct {
Enabled bool `yaml:"enabled"`
DesiredRate string `yaml:"desired_rate"`
} `yaml:"shard_streams"`
OtlpConfig OtlpConfig `yaml:"otlp_config,omitempty"`
AllowStructuredMetadata bool `yaml:"allow_structured_metadata"`
}
type OtlpConfig struct {
ResourceAttributes struct {
AttributesConfig []struct {
Action string `yaml:"action"`
Attributes []string `yaml:"attributes"`
Regex string `yaml:"regex,omitempty"`
} `yaml:"attributes_config"`
} `yaml:"resource_attributes"`
LogAttributes []struct {
Action string `yaml:"action"`
Attributes []string `yaml:"attributes,omitempty"`
Regex string `yaml:"regex,omitempty"`
} `yaml:"log_attributes,omitempty"`
}
/*
A amqstream instance struct which can share data in kafka intance, CLF and cases
*/
type amqInstance struct {
name string // amqstream kakfa clustername
namespace string // amqstream kafka namespace
user string // amqstream user for topicPrefix
password string // amqstream user password
service string // amqstream kafka service name for internal service using sasl Plain auth
route string // amqstream kakfa broker external route using sasl ssl auth
routeCA string // amqstream kafka route ca
topicPrefix string // amqstream topicPrefix, only topic with this prefix are allowed.
instanceType string // the my kafka instance type: kafka-no-auth-cluster,kafka-sasl-cluster
}
| package logging | ||||
test | openshift/openshift-tests-private | 52a94a7c-702e-4b1c-bc2b-567ce5ed5220 | utils | import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"cloud.google.com/go/logging"
"cloud.google.com/go/logging/logadmin"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"google.golang.org/api/iterator"
"google.golang.org/protobuf/types/known/structpb"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
) | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | package logging
import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"cloud.google.com/go/logging"
"cloud.google.com/go/logging/logadmin"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"google.golang.org/api/iterator"
"google.golang.org/protobuf/types/known/structpb"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
// SubscriptionObjects objects are used to create operators via OLM
type SubscriptionObjects struct {
OperatorName string
Namespace string
OperatorGroup string // the file used to create operator group
Subscription string // the file used to create subscription
PackageName string
OperatorPodLabel string //The operator pod label which is used to select pod
CatalogSource CatalogSourceObjects `json:",omitempty"`
SkipCaseWhenFailed bool // if true, the case will be skipped when operator is not ready, otherwise, the case will be marked as failed
}
// CatalogSourceObjects defines the source used to subscribe an operator
type CatalogSourceObjects struct {
Channel string `json:",omitempty"`
SourceName string `json:",omitempty"`
SourceNamespace string `json:",omitempty"`
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
// contain checks if b is an elememt of a
func contain(a []string, b string) bool {
for _, c := range a {
if c == b {
return true
}
}
return false
}
// containSubstring checks if b is a's element's substring
func containSubstring(a interface{}, b string) bool {
switch reflect.TypeOf(a).Kind() {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(a)
for i := 0; i < s.Len(); i++ {
if strings.Contains(fmt.Sprintln(s.Index(i)), b) {
return true
}
}
}
return false
}
func processTemplate(oc *exutil.CLI, parameters ...string) (string, error) {
var configFile string
filename := getRandomString() + ".json"
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 15*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(filename)
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
if err != nil {
return configFile, fmt.Errorf("failed to process template with the provided parameters")
}
return configFile, nil
}
func getProxyFromEnv() string {
var proxy string
if os.Getenv("http_proxy") != "" {
proxy = os.Getenv("http_proxy")
} else if os.Getenv("http_proxy") != "" {
proxy = os.Getenv("https_proxy")
}
return proxy
}
func getClusterID(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-ojsonpath={.spec.clusterID}").Output()
}
func isFipsEnabled(oc *exutil.CLI) bool {
nodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
fips, err := exutil.DebugNodeWithChroot(oc, nodes[0].Name, "bash", "-c", "fips-mode-setup --check")
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Contains(fips, "FIPS mode is enabled.")
}
// waitForPackagemanifestAppear waits for the packagemanifest to appear in the cluster
// chSource: bool value, true means the packagemanifests' source name must match the so.CatalogSource.SourceName, e.g.: oc get packagemanifests xxxx -l catalog=$source-name
func (so *SubscriptionObjects) waitForPackagemanifestAppear(oc *exutil.CLI, chSource bool) {
args := []string{"-n", so.CatalogSource.SourceNamespace, "packagemanifests"}
if chSource {
args = append(args, "-l", "catalog="+so.CatalogSource.SourceName)
} else {
args = append(args, so.PackageName)
}
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
packages, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
if err != nil {
msg := fmt.Sprintf("%v", err)
if strings.Contains(msg, "No resources found") || strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
if strings.Contains(packages, so.PackageName) {
return true, nil
}
e2e.Logf("Waiting for packagemanifest/%s to appear", so.PackageName)
return false, nil
})
if err != nil {
if so.SkipCaseWhenFailed {
g.Skip(fmt.Sprintf("Skip the case for can't find packagemanifest/%s", so.PackageName))
} else {
e2e.Failf("Packagemanifest %s is not available", so.PackageName)
}
}
//check channel
args = append(args, `-ojsonpath={.items[?(@.metadata.name=="`+so.PackageName+`")].status.channels[*].name}`)
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
channels := strings.Split(output, " ")
if !contain(channels, so.CatalogSource.Channel) {
e2e.Logf("Find channels %v from packagemanifest/%s", channels, so.PackageName)
if so.SkipCaseWhenFailed {
g.Skip(fmt.Sprintf("Skip the case for packagemanifest/%s doesn't have target channel %s", so.PackageName, so.CatalogSource.Channel))
} else {
e2e.Failf("Packagemanifest %s doesn't have target channel %s", so.PackageName, so.CatalogSource.Channel)
}
}
}
// setCatalogSourceObjects set the default values of channel, source namespace and source name if they're not specified
func (so *SubscriptionObjects) setCatalogSourceObjects(oc *exutil.CLI) {
// set channel
if so.CatalogSource.Channel == "" {
so.CatalogSource.Channel = "stable-6.2"
}
// set source namespace
if so.CatalogSource.SourceNamespace == "" {
so.CatalogSource.SourceNamespace = "openshift-marketplace"
}
// set source and check if the packagemanifest exists or not
if so.CatalogSource.SourceName != "" {
so.waitForPackagemanifestAppear(oc, true)
} else {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("catsrc", "-n", so.CatalogSource.SourceNamespace, "-ojsonpath={.items[*].metadata.name}").Output()
if err != nil {
e2e.Logf("can't list catalog source in project %s: %v", so.CatalogSource.SourceNamespace, err)
}
catsrcs := strings.Split(output, " ")
if contain(catsrcs, "auto-release-app-registry") {
if contain(catsrcs, "redhat-operators") {
// do not subscribe source auto-release-app-registry as we want to test GAed logging in auto release jobs
so.CatalogSource.SourceName = "redhat-operators"
so.waitForPackagemanifestAppear(oc, true)
} else {
if so.SkipCaseWhenFailed {
g.Skip("skip the case because the cluster doesn't have proper catalog source for logging")
}
}
} else if contain(catsrcs, "qe-app-registry") {
so.CatalogSource.SourceName = "qe-app-registry"
so.waitForPackagemanifestAppear(oc, true)
} else {
so.waitForPackagemanifestAppear(oc, false)
source, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests", so.PackageName, "-o", "jsonpath={.status.catalogSource}").Output()
if err != nil {
e2e.Logf("error getting catalog source name: %v", err)
}
so.CatalogSource.SourceName = source
}
}
}
// SubscribeOperator is used to deploy operators
func (so *SubscriptionObjects) SubscribeOperator(oc *exutil.CLI) {
// check if the namespace exists, if it doesn't exist, create the namespace
if so.OperatorPodLabel == "" {
so.OperatorPodLabel = "name=" + so.OperatorName
}
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), so.Namespace, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("The project %s is not found, create it now...", so.Namespace)
namespaceTemplate := exutil.FixturePath("testdata", "logging", "subscription", "namespace.yaml")
namespaceFile, err := processTemplate(oc, "-f", namespaceTemplate, "-p", "NAMESPACE_NAME="+so.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.Remove(namespaceFile)
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", namespaceFile).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create project %s", so.Namespace))
}
}
// check the operator group, if no object found, then create an operator group in the project
og, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "og").Output()
o.Expect(err).NotTo(o.HaveOccurred())
msg := fmt.Sprintf("%v", og)
if strings.Contains(msg, "No resources found") {
// create operator group
ogFile, err := processTemplate(oc, "-n", so.Namespace, "-f", so.OperatorGroup, "-p", "OG_NAME="+so.Namespace, "NAMESPACE="+so.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.Remove(ogFile)
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", ogFile, "-n", so.Namespace).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create operatorgroup %s in %s project", so.Namespace, so.Namespace))
}
// check subscription, if there is no subscription objets, then create one
sub, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", so.Namespace, so.PackageName).Output()
if err != nil {
msg := fmt.Sprint("v%", sub)
if strings.Contains(msg, "NotFound") {
so.setCatalogSourceObjects(oc)
//create subscription object
subscriptionFile, err := processTemplate(oc, "-n", so.Namespace, "-f", so.Subscription, "-p", "PACKAGE_NAME="+so.PackageName, "NAMESPACE="+so.Namespace, "CHANNEL="+so.CatalogSource.Channel, "SOURCE="+so.CatalogSource.SourceName, "SOURCE_NAMESPACE="+so.CatalogSource.SourceNamespace)
if err != nil {
if so.SkipCaseWhenFailed {
g.Skip("hit error when processing subscription template: " + err.Error() + ", skip the case")
} else {
e2e.Failf("hit error when processing subscription template: %v", err)
}
}
defer os.Remove(subscriptionFile)
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", subscriptionFile, "-n", so.Namespace).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
if err != nil {
if so.SkipCaseWhenFailed {
g.Skip("hit error when creating subscription, skip the case")
} else {
e2e.Failf("hit error when creating subscription")
}
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create subscription %s in %s project", so.PackageName, so.Namespace))
// check status in subscription
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 120*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "sub", so.PackageName, `-ojsonpath={.status.state}`).Output()
if err != nil {
e2e.Logf("error getting subscription/%s: %v", so.PackageName, err)
return false, nil
}
return strings.Contains(output, "AtLatestKnown"), nil
})
if err != nil {
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "sub", so.PackageName, `-ojsonpath={.status.conditions}`).Output()
e2e.Logf("subscription/%s is not ready, conditions: %v", so.PackageName, out)
if so.SkipCaseWhenFailed {
g.Skip(fmt.Sprintf("Skip the case for the operator %s is not ready", so.OperatorName))
} else {
e2e.Failf("can't deploy operator %s", so.OperatorName)
}
}
}
}
// check pod status
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 240*time.Second, true, func(context.Context) (done bool, err error) {
pods, err := oc.AdminKubeClient().CoreV1().Pods(so.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: so.OperatorPodLabel})
if err != nil {
e2e.Logf("Hit error %v when getting pods", err)
return false, nil
}
if len(pods.Items) == 0 {
e2e.Logf("Waiting for pod with label %s to appear\n", so.OperatorPodLabel)
return false, nil
}
ready := true
for _, pod := range pods.Items {
if pod.Status.Phase != "Running" {
ready = false
e2e.Logf("Pod %s is not running: %v", pod.Name, pod.Status.Phase)
break
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
ready = false
e2e.Logf("Container %s in pod %s is not ready", containerStatus.Name, pod.Name)
break
}
}
}
return ready, nil
})
if err != nil {
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", so.Namespace, "-l", so.OperatorPodLabel).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", so.Namespace, "-l", so.OperatorPodLabel, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", so.Namespace, "-l", so.OperatorPodLabel, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Logf("pod with label %s is not ready:\nconditions: %s\ncontainer status: %s", so.OperatorPodLabel, podStatus, containerStatus)
if so.SkipCaseWhenFailed {
g.Skip(fmt.Sprintf("Skip the case for the operator %s is not ready", so.OperatorName))
} else {
e2e.Failf("can't deploy operator %s", so.OperatorName)
}
}
}
func (so *SubscriptionObjects) uninstallOperator(oc *exutil.CLI) {
//csv, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "sub/"+so.PackageName, "-ojsonpath={.status.installedCSV}").Output()
resource{"subscription", so.PackageName, so.Namespace}.clear(oc)
//_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", so.Namespace, "csv", csv).Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", so.Namespace, "csv", "-l", "operators.coreos.com/"+so.PackageName+"."+so.Namespace+"=").Execute()
// do not remove namespace openshift-logging and openshift-operators-redhat, and preserve the operatorgroup as there may have several operators deployed in one namespace
// for example: loki-operator
if so.Namespace != "openshift-logging" && so.Namespace != "openshift-operators-redhat" && !strings.HasPrefix(so.Namespace, "e2e-test-") {
deleteNamespace(oc, so.Namespace)
}
}
func (so *SubscriptionObjects) getInstalledCSV(oc *exutil.CLI) string {
installedCSV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "sub", so.PackageName, "-ojsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return installedCSV
}
// WaitForDeploymentPodsToBeReady waits for the specific deployment to be ready
func WaitForDeploymentPodsToBeReady(oc *exutil.CLI, namespace string, name string) {
var selectors map[string]string
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
deployment, err := oc.AdminKubeClient().AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for deployment/%s to appear\n", name)
return false, nil
}
return false, err
}
selectors = deployment.Spec.Selector.MatchLabels
if deployment.Status.AvailableReplicas == *deployment.Spec.Replicas && deployment.Status.UpdatedReplicas == *deployment.Spec.Replicas {
e2e.Logf("Deployment %s available (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s deployment (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return false, nil
})
if err != nil && len(selectors) > 0 {
var labels []string
for k, v := range selectors {
labels = append(labels, k+"="+v)
}
label := strings.Join(labels, ",")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("deployment %s is not ready:\nconditions: %s\ncontainer status: %s", name, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("deployment %s is not available", name))
}
func waitForStatefulsetReady(oc *exutil.CLI, namespace string, name string) {
var selectors map[string]string
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
ss, err := oc.AdminKubeClient().AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for statefulset/%s to appear\n", name)
return false, nil
}
return false, err
}
selectors = ss.Spec.Selector.MatchLabels
if ss.Status.ReadyReplicas == *ss.Spec.Replicas && ss.Status.UpdatedReplicas == *ss.Spec.Replicas {
e2e.Logf("statefulset %s available (%d/%d)\n", name, ss.Status.ReadyReplicas, *ss.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s statefulset (%d/%d)\n", name, ss.Status.ReadyReplicas, *ss.Spec.Replicas)
return false, nil
})
if err != nil && len(selectors) > 0 {
var labels []string
for k, v := range selectors {
labels = append(labels, k+"="+v)
}
label := strings.Join(labels, ",")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("statefulset %s is not ready:\nconditions: %s\ncontainer status: %s", name, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("statefulset %s is not available", name))
}
// WaitForDaemonsetPodsToBeReady waits for all the pods controlled by the ds to be ready
func WaitForDaemonsetPodsToBeReady(oc *exutil.CLI, ns string, name string) {
var selectors map[string]string
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
daemonset, err := oc.AdminKubeClient().AppsV1().DaemonSets(ns).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for daemonset/%s to appear\n", name)
return false, nil
}
return false, err
}
selectors = daemonset.Spec.Selector.MatchLabels
if daemonset.Status.DesiredNumberScheduled > 0 && daemonset.Status.NumberReady == daemonset.Status.DesiredNumberScheduled && daemonset.Status.UpdatedNumberScheduled == daemonset.Status.DesiredNumberScheduled {
e2e.Logf("Daemonset/%s is available (%d/%d)\n", name, daemonset.Status.NumberReady, daemonset.Status.DesiredNumberScheduled)
return true, nil
}
e2e.Logf("Waiting for full availability of %s daemonset (%d/%d)\n", name, daemonset.Status.NumberReady, daemonset.Status.DesiredNumberScheduled)
return false, nil
})
if err != nil && len(selectors) > 0 {
var labels []string
for k, v := range selectors {
labels = append(labels, k+"="+v)
}
label := strings.Join(labels, ",")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("daemonset %s is not ready:\nconditions: %s\ncontainer status: %s", name, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Daemonset %s is not available", name))
}
func waitForPodReadyWithLabel(oc *exutil.CLI, ns string, label string) {
var count int
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
pods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: label})
if err != nil {
return false, err
}
count = len(pods.Items)
if count == 0 {
e2e.Logf("Waiting for pod with label %s to appear\n", label)
return false, nil
}
ready := true
for _, pod := range pods.Items {
if pod.Status.Phase != "Running" {
ready = false
break
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
ready = false
break
}
}
}
if !ready {
e2e.Logf("Waiting for pod with label %s to be ready...\n", label)
}
return ready, nil
})
if err != nil && count != 0 {
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("pod with label %s is not ready:\nconditions: %s\ncontainer status: %s", label, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("pod with label %s is not ready", label))
}
func getPodNames(oc *exutil.CLI, ns, label string) ([]string, error) {
var names []string
pods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: label})
if err != nil {
return names, err
}
if len(pods.Items) == 0 {
return names, fmt.Errorf("no pod(s) match label %s in namespace %s", label, ns)
}
for _, pod := range pods.Items {
names = append(names, pod.Name)
}
return names, nil
}
type resource struct {
kind string
name string
namespace string
}
// WaitUntilResourceIsGone waits for the resource to be removed cluster
func (r resource) WaitUntilResourceIsGone(oc *exutil.CLI) error {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", output)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") {
return true, nil
}
return true, err
}
return false, nil
})
if err != nil {
return fmt.Errorf("can't remove %s/%s in %s project", r.kind, r.name, r.namespace)
}
return nil
}
// delete the objects in the cluster
func (r resource) clear(oc *exutil.CLI) error {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", msg)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") {
return nil
}
return err
}
err = r.WaitUntilResourceIsGone(oc)
return err
}
func (r resource) WaitForResourceToAppear(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
e2e.Logf("wait %s %s ready ... ", r.kind, r.name)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
msg := fmt.Sprintf("%v", output)
if strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
e2e.Logf("found %s %s", r.kind, r.name)
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource %s/%s is not appear", r.kind, r.name))
}
func (r resource) applyFromTemplate(oc *exutil.CLI, parameters ...string) error {
parameters = append(parameters, "-n", r.namespace)
file, err := processTemplate(oc, parameters...)
defer os.Remove(file)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not process %v", parameters))
output, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", r.namespace).Output()
if err != nil {
return fmt.Errorf("can't apply resource: %s", output)
}
r.WaitForResourceToAppear(oc)
return nil
}
type clusterlogforwarder struct {
collectApplicationLogs bool // optional, if true, will add cluster-role/collect-application-logs to the serviceAccount
collectAuditLogs bool // optional, if true, will add cluster-role/collect-audit-logs to the serviceAccount
collectInfrastructureLogs bool // optional, if true, will add cluster-role/collect-infrastructure-logs to the serviceAccount
enableMonitoring bool // optional, if true, will add label `openshift.io/cluster-monitoring: "true"` to the project, and create role/prometheus-k8s rolebinding/prometheus-k8s in the namespace, works when when !(clf.namespace == "openshift-operators-redhat" || clf.namespace == "openshift-logging")
name string
namespace string
serviceAccountName string
templateFile string // the template used to create clusterlogforwarder, no default value
secretName string // optional, if it's specified, when creating CLF, the parameter `"SECRET_NAME="+clf.secretName` will be added automatically
waitForPodReady bool // optional, if true, will check daemonset stats
}
// create clusterlogforwarder CR from a template
func (clf *clusterlogforwarder) create(oc *exutil.CLI, optionalParameters ...string) {
//parameters := []string{"-f", clf.templateFile, "--ignore-unknown-parameters=true", "-p", "NAME=" + clf.name, "NAMESPACE=" + clf.namespace}
parameters := []string{"-f", clf.templateFile, "-p", "NAME=" + clf.name, "NAMESPACE=" + clf.namespace}
if clf.secretName != "" {
parameters = append(parameters, "SECRET_NAME="+clf.secretName)
}
if clf.serviceAccountName != "" {
clf.createServiceAccount(oc)
parameters = append(parameters, "SERVICE_ACCOUNT_NAME="+clf.serviceAccountName)
}
if len(optionalParameters) > 0 {
parameters = append(parameters, optionalParameters...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", clf.namespace).Execute()
if err != nil {
e2e.Failf("error creating clusterlogforwarder: %v", err)
}
resource{"clusterlogforwarders.observability.openshift.io", clf.name, clf.namespace}.WaitForResourceToAppear(oc)
if clf.waitForPodReady {
clf.waitForCollectorPodsReady(oc)
}
if clf.namespace != cloNS && clf.namespace != loNS && clf.enableMonitoring {
enableClusterMonitoring(oc, clf.namespace)
}
}
// createServiceAccount creates the serviceaccount and add the required clusterroles to the serviceaccount
func (clf *clusterlogforwarder) createServiceAccount(oc *exutil.CLI) {
_, err := oc.AdminKubeClient().CoreV1().ServiceAccounts(clf.namespace).Get(context.Background(), clf.serviceAccountName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = createServiceAccount(oc, clf.namespace, clf.serviceAccountName)
if err != nil {
e2e.Failf("can't create the serviceaccount: %v", err)
}
}
if clf.collectApplicationLogs {
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-application-logs")
o.Expect(err).NotTo(o.HaveOccurred())
}
if clf.collectInfrastructureLogs {
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-infrastructure-logs")
o.Expect(err).NotTo(o.HaveOccurred())
}
if clf.collectAuditLogs {
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-audit-logs")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
func createServiceAccount(oc *exutil.CLI, namespace, name string) error {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("serviceaccount", name, "-n", namespace).Execute()
return err
}
func addClusterRoleToServiceAccount(oc *exutil.CLI, namespace, serviceAccountName, clusterRole string) error {
return oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", clusterRole, fmt.Sprintf("system:serviceaccount:%s:%s", namespace, serviceAccountName)).Execute()
}
func removeClusterRoleFromServiceAccount(oc *exutil.CLI, namespace, serviceAccountName, clusterRole string) error {
return oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-user", clusterRole, fmt.Sprintf("system:serviceaccount:%s:%s", namespace, serviceAccountName)).Execute()
}
// update existing clusterlogforwarder CR
// if template is specified, then run command `oc process -f template -p patches | oc apply -f -`
// if template is not specified, then run command `oc patch clusterlogforwarder/${clf.name} -p patches`
// if use patch, should add `--type=` in the end of patches
func (clf *clusterlogforwarder) update(oc *exutil.CLI, template string, patches ...string) {
var err error
if template != "" {
//parameters := []string{"-f", template, "--ignore-unknown-parameters=true", "-p", "NAME=" + clf.name, "NAMESPACE=" + clf.namespace}
parameters := []string{"-f", template, "-p", "NAME=" + clf.name, "NAMESPACE=" + clf.namespace}
if clf.secretName != "" {
parameters = append(parameters, "SECRET_NAME="+clf.secretName)
}
parameters = append(parameters, "SERVICE_ACCOUNT_NAME="+clf.serviceAccountName)
if len(patches) > 0 {
parameters = append(parameters, patches...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", clf.namespace).Execute()
} else {
parameters := []string{"clusterlogforwarders.observability.openshift.io/" + clf.name, "-n", clf.namespace, "-p"}
parameters = append(parameters, patches...)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(parameters...).Execute()
}
if err != nil {
e2e.Failf("error updating clusterlogforwarder: %v", err)
}
}
// patch existing clusterlogforwarder CR and return the output,
// return patch_output and error
func (clf *clusterlogforwarder) patch(oc *exutil.CLI, patch_string string) (string, error) {
parameters := []string{"clusterlogforwarders.observability.openshift.io/" + clf.name, "-n", clf.namespace, "-p"}
parameters = append(parameters, patch_string, "--type=json")
return oc.AsAdmin().WithoutNamespace().Run("patch").Args(parameters...).Output()
}
// delete the clusterlogforwarder CR
func (clf *clusterlogforwarder) delete(oc *exutil.CLI) {
err := resource{"clusterlogforwarders.observability.openshift.io", clf.name, clf.namespace}.clear(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("clusterlogforwarder/%s in project/%s is not deleted", clf.name, clf.namespace))
if len(clf.serviceAccountName) > 0 {
if clf.collectApplicationLogs {
removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-application-logs")
}
if clf.collectInfrastructureLogs {
removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-infrastructure-logs")
}
if clf.collectAuditLogs {
removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-audit-logs")
}
resource{"serviceaccount", clf.serviceAccountName, clf.namespace}.clear(oc)
}
err = resource{"daemonset", clf.name, clf.namespace}.WaitUntilResourceIsGone(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("daemonset/%s in project/%s is not deleted", clf.name, clf.namespace))
}
func (clf *clusterlogforwarder) waitForCollectorPodsReady(oc *exutil.CLI) {
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
}
func (clf *clusterlogforwarder) getCollectorNodeNames(oc *exutil.CLI) ([]string, error) {
var nodes []string
pods, err := oc.AdminKubeClient().CoreV1().Pods(clf.namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/component=collector,app.kubernetes.io/instance=" + clf.name})
for _, pod := range pods.Items {
nodes = append(nodes, pod.Spec.NodeName)
}
return nodes, err
}
type logFileMetricExporter struct {
name string
namespace string
template string
waitPodsReady bool
}
func (lfme *logFileMetricExporter) create(oc *exutil.CLI, optionalParameters ...string) {
if lfme.name == "" {
lfme.name = "instance"
}
if lfme.namespace == "" {
lfme.namespace = loggingNS
}
if lfme.template == "" {
lfme.template = exutil.FixturePath("testdata", "logging", "logfilemetricexporter", "lfme.yaml")
}
parameters := []string{"-f", lfme.template, "-p", "NAME=" + lfme.name, "NAMESPACE=" + lfme.namespace}
if len(optionalParameters) > 0 {
parameters = append(parameters, optionalParameters...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", lfme.namespace).Execute()
if err != nil {
e2e.Failf("error creating logfilemetricexporter: %v", err)
}
resource{"logfilemetricexporter", lfme.name, lfme.namespace}.WaitForResourceToAppear(oc)
if lfme.waitPodsReady {
WaitForDaemonsetPodsToBeReady(oc, lfme.namespace, "logfilesmetricexporter")
}
}
func (lfme *logFileMetricExporter) delete(oc *exutil.CLI) {
err := resource{"logfilemetricexporter", lfme.name, lfme.namespace}.clear(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("logfilemetricexporter/%s in project/%s is not deleted", lfme.name, lfme.namespace))
err = resource{"daemonset", "logfilesmetricexporter", lfme.namespace}.WaitUntilResourceIsGone(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("ds/logfilesmetricexporter in project/%s is not deleted", lfme.namespace))
}
func deleteNamespace(oc *exutil.CLI, ns string) {
err := oc.AdminKubeClient().CoreV1().Namespaces().Delete(context.Background(), ns, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
_, err = oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), ns, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Namespace %s is not deleted in 3 minutes", ns))
}
func getStorageClassName(oc *exutil.CLI) (string, error) {
scs, err := oc.AdminKubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{})
if err != nil {
return "", err
}
if len(scs.Items) == 0 {
return "", fmt.Errorf("there is no storageclass in the cluster")
}
for _, sc := range scs.Items {
if sc.ObjectMeta.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
return sc.Name, nil
}
}
return scs.Items[0].Name, nil
}
// Assert the status of a resource
func assertResourceStatus(oc *exutil.CLI, kind, name, namespace, jsonpath, exptdStatus string) {
parameters := []string{kind, name, "-o", "jsonpath=" + jsonpath}
if namespace != "" {
parameters = append(parameters, "-n", namespace)
}
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(parameters...).Output()
if err != nil {
return false, err
}
if strings.Compare(status, exptdStatus) != 0 {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s/%s value for %s is not %s", kind, name, jsonpath, exptdStatus))
}
func getRouteAddress(oc *exutil.CLI, ns, routeName string) string {
route, err := oc.AdminRouteClient().RouteV1().Routes(ns).Get(context.Background(), routeName, metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
return route.Spec.Host
}
func getSAToken(oc *exutil.CLI, name, ns string) string {
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", name, "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return token
}
// enableClusterMonitoring add label `openshift.io/cluster-monitoring: "true"` to the project, and create role/prometheus-k8s rolebinding/prometheus-k8s in the namespace
func enableClusterMonitoring(oc *exutil.CLI, namespace string) {
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", namespace, "openshift.io/cluster-monitoring=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
file := exutil.FixturePath("testdata", "logging", "prometheus-k8s-rbac.yaml")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-n", namespace, "-f", file).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// queryPrometheus returns the promtheus metrics which match the query string
// token: the user token used to run the http request, if it's not specified, it will use the token of sa/prometheus-k8s in openshift-monitoring project
// path: the api path, for example: /api/v1/query?
// query: the metric/alert you want to search, e.g.: es_index_namespaces_total
// action: it can be "GET", "get", "Get", "POST", "post", "Post"
func queryPrometheus(oc *exutil.CLI, token string, path string, query string, action string) (*prometheusQueryResult, error) {
var bearerToken string
var err error
if token == "" {
bearerToken = getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
} else {
bearerToken = token
}
address := "https://" + getRouteAddress(oc, "openshift-monitoring", "prometheus-k8s")
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add("Authorization", "Bearer "+bearerToken)
params := url.Values{}
if len(query) > 0 {
params.Add("query", query)
}
var p prometheusQueryResult
resp, err := doHTTPRequest(h, address, path, params.Encode(), action, true, 5, nil, 200)
if err != nil {
return nil, err
}
err = json.Unmarshal(resp, &p)
if err != nil {
return nil, err
}
return &p, nil
}
func getMetric(oc *exutil.CLI, token, query string) ([]metric, error) {
res, err := queryPrometheus(oc, token, "/api/v1/query", query, "GET")
if err != nil {
return []metric{}, err
}
return res.Data.Result, nil
}
func checkMetric(oc *exutil.CLI, token, query string, timeInMinutes int) {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(timeInMinutes)*time.Minute, true, func(context.Context) (done bool, err error) {
metrics, err := getMetric(oc, token, query)
if err != nil {
return false, err
}
if len(metrics) == 0 {
e2e.Logf("no metrics found by query: %s, try next time", query)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't find metrics by %s in %d minutes", query, timeInMinutes))
}
func getAlert(oc *exutil.CLI, token, alertSelector string) ([]alert, error) {
var al []alert
alerts, err := queryPrometheus(oc, token, "/api/v1/alerts", "", "GET")
if err != nil {
return al, err
}
for i := 0; i < len(alerts.Data.Alerts); i++ {
if alerts.Data.Alerts[i].Labels.AlertName == alertSelector {
al = append(al, alerts.Data.Alerts[i])
}
}
return al, nil
}
func checkAlert(oc *exutil.CLI, token, alertName, status string, timeInMinutes int) {
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, time.Duration(timeInMinutes)*time.Minute, true, func(context.Context) (done bool, err error) {
alerts, err := getAlert(oc, token, alertName)
if err != nil {
return false, err
}
for _, alert := range alerts {
if strings.Contains(status, alert.State) {
return true, nil
}
}
e2e.Logf("Waiting for alert %s to be in state %s...", alertName, status)
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s alert is not %s in %d minutes", alertName, status, timeInMinutes))
}
// WaitUntilPodsAreGone waits for pods selected with labelselector to be removed
func WaitUntilPodsAreGone(oc *exutil.CLI, namespace string, labelSelector string) {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector="+labelSelector, "-n", namespace).Output()
if err != nil {
return false, err
}
errstring := fmt.Sprintf("%v", output)
if strings.Contains(errstring, "No resources found") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Error waiting for pods to be removed using label selector %s", labelSelector))
}
// Check logs from resource
func checkLogsFromRs(oc *exutil.CLI, kind, name, namespace, containerName, expected string) {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(kind+`/`+name, "-n", namespace, "-c", containerName).Output()
if err != nil {
e2e.Logf("Can't get logs from resource, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.Match(expected, []byte(output)); !matched {
e2e.Logf("Can't find the expected string\n")
return false, nil
}
e2e.Logf("Check the logs succeed!!\n")
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s is not expected for %s", expected, name))
}
func getCurrentCSVFromPackage(oc *exutil.CLI, source, channel, packagemanifest string) string {
var currentCSV string
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", "-n", "openshift-marketplace", "-l", "catalog="+source, "-ojsonpath={.items}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
packMS := []PackageManifest{}
json.Unmarshal([]byte(output), &packMS)
for _, pm := range packMS {
if pm.Name == packagemanifest {
for _, channels := range pm.Status.Channels {
if channels.Name == channel {
currentCSV = channels.CurrentCSV
break
}
}
}
}
return currentCSV
}
func checkNetworkType(oc *exutil.CLI) string {
output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.defaultNetwork.type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.ToLower(output)
}
func getAppDomain(oc *exutil.CLI) (string, error) {
subDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ingresses.config/cluster", "-ojsonpath={.spec.domain}").Output()
if err != nil {
return "", err
}
return subDomain, nil
}
type certsConf struct {
serverName string
namespace string
passPhrase string //client private key passphrase
}
func (certs certsConf) generateCerts(oc *exutil.CLI, keysPath string) {
generateCertsSH := exutil.FixturePath("testdata", "logging", "external-log-stores", "cert_generation.sh")
domain, err := getAppDomain(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cmd := []string{generateCertsSH, keysPath, certs.namespace, certs.serverName, domain}
if certs.passPhrase != "" {
cmd = append(cmd, certs.passPhrase)
}
err = exec.Command("sh", cmd...).Run()
o.Expect(err).NotTo(o.HaveOccurred())
}
// expect: true means we want the resource contain/compare with the expectedContent, false means the resource is expected not to compare with/contain the expectedContent;
// compare: true means compare the expectedContent with the resource content, false means check if the resource contains the expectedContent;
// args are the arguments used to execute command `oc.AsAdmin.WithoutNamespace().Run("get").Args(args...).Output()`;
func checkResource(oc *exutil.CLI, expect bool, compare bool, expectedContent string, args []string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
if err != nil {
if strings.Contains(output, "NotFound") {
return false, nil
}
return false, err
}
if compare {
res := strings.Compare(output, expectedContent)
if (res == 0 && expect) || (res != 0 && !expect) {
return true, nil
}
return false, nil
}
res := strings.Contains(output, expectedContent)
if (res && expect) || (!res && !expect) {
return true, nil
}
return false, nil
})
if expect {
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The content doesn't match/contain %s", expectedContent))
} else {
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s still exists in the resource", expectedContent))
}
}
type rsyslog struct {
serverName string //the name of the rsyslog server, it's also used to name the svc/cm/sa/secret
namespace string //the namespace where the rsyslog server deployed in
tls bool
secretName string //the name of the secret for the collector to use
loggingNS string //the namespace where the collector pods deployed in
clientKeyPassphrase string //client private key passphrase
}
func (r rsyslog) createPipelineSecret(oc *exutil.CLI, keysPath string) {
secret := resource{"secret", r.secretName, r.loggingNS}
cmd := []string{"secret", "generic", secret.name, "-n", secret.namespace, "--from-file=ca-bundle.crt=" + keysPath + "/ca.crt"}
if r.clientKeyPassphrase != "" {
cmd = append(cmd, "--from-file=tls.key="+keysPath+"/client.key", "--from-file=tls.crt="+keysPath+"/client.crt", "--from-literal=passphrase="+r.clientKeyPassphrase)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(cmd...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
secret.WaitForResourceToAppear(oc)
}
func (r rsyslog) deploy(oc *exutil.CLI) {
// create SA
sa := resource{"serviceaccount", r.serverName, r.namespace}
err := oc.WithoutNamespace().Run("create").Args("serviceaccount", sa.name, "-n", sa.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sa.WaitForResourceToAppear(oc)
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "privileged", fmt.Sprintf("system:serviceaccount:%s:%s", r.namespace, r.serverName), "-n", r.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
filePath := []string{"testdata", "logging", "external-log-stores", "rsyslog"}
// create secrets if needed
if r.tls {
o.Expect(r.secretName).NotTo(o.BeEmpty())
// create a temporary directory
baseDir := exutil.FixturePath("testdata", "logging")
keysPath := filepath.Join(baseDir, "temp"+getRandomString())
defer exec.Command("rm", "-r", keysPath).Output()
err = os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{r.serverName, r.namespace, r.clientKeyPassphrase}
cert.generateCerts(oc, keysPath)
// create pipelinesecret
r.createPipelineSecret(oc, keysPath)
// create secret for rsyslog server
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", r.serverName, "-n", r.namespace, "--from-file=server.key="+keysPath+"/server.key", "--from-file=server.crt="+keysPath+"/server.crt", "--from-file=ca_bundle.crt="+keysPath+"/ca.crt").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
filePath = append(filePath, "secure")
} else {
filePath = append(filePath, "insecure")
}
// create configmap/deployment/svc
cm := resource{"configmap", r.serverName, r.namespace}
cmFilePath := append(filePath, "configmap.yaml")
cmFile := exutil.FixturePath(cmFilePath...)
err = cm.applyFromTemplate(oc, "-f", cmFile, "-n", r.namespace, "-p", "NAMESPACE="+r.namespace, "-p", "NAME="+r.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
deploy := resource{"deployment", r.serverName, r.namespace}
deployFilePath := append(filePath, "deployment.yaml")
deployFile := exutil.FixturePath(deployFilePath...)
err = deploy.applyFromTemplate(oc, "-f", deployFile, "-n", r.namespace, "-p", "NAMESPACE="+r.namespace, "-p", "NAME="+r.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, r.namespace, r.serverName)
svc := resource{"svc", r.serverName, r.namespace}
svcFilePath := append(filePath, "svc.yaml")
svcFile := exutil.FixturePath(svcFilePath...)
err = svc.applyFromTemplate(oc, "-f", svcFile, "-n", r.namespace, "-p", "NAMESPACE="+r.namespace, "-p", "NAME="+r.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (r rsyslog) remove(oc *exutil.CLI) {
resource{"serviceaccount", r.serverName, r.namespace}.clear(oc)
if r.tls {
resource{"secret", r.serverName, r.namespace}.clear(oc)
resource{"secret", r.secretName, r.loggingNS}.clear(oc)
}
resource{"configmap", r.serverName, r.namespace}.clear(oc)
resource{"deployment", r.serverName, r.namespace}.clear(oc)
resource{"svc", r.serverName, r.namespace}.clear(oc)
}
func (r rsyslog) getPodName(oc *exutil.CLI) string {
pods, err := oc.AdminKubeClient().CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "component=" + r.serverName})
o.Expect(err).NotTo(o.HaveOccurred())
var names []string
for i := 0; i < len(pods.Items); i++ {
names = append(names, pods.Items[i].Name)
}
return names[0]
}
func (r rsyslog) checkData(oc *exutil.CLI, expect bool, filename string) {
cmd := "ls -l /var/log/clf/" + filename
if expect {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
stdout, err := e2eoutput.RunHostCmdWithRetries(r.namespace, r.getPodName(oc), cmd, 3*time.Second, 15*time.Second)
if err != nil {
if strings.Contains(err.Error(), "No such file or directory") {
return false, nil
}
return false, err
}
return strings.Contains(stdout, filename), nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s doesn't exist", filename))
} else {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
stdout, err := e2eoutput.RunHostCmdWithRetries(r.namespace, r.getPodName(oc), cmd, 3*time.Second, 15*time.Second)
if err != nil {
if strings.Contains(err.Error(), "No such file or directory") {
return true, nil
}
return false, err
}
return strings.Contains(stdout, "No such file or directory"), nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s exists", filename))
}
}
type fluentdServer struct {
serverName string //the name of the fluentd server, it's also used to name the svc/cm/sa/secret
namespace string //the namespace where the fluentd server deployed in
serverAuth bool
clientAuth bool // only can be set when serverAuth is true
clientPrivateKeyPassphrase string //only can be set when clientAuth is true
sharedKey string //if it's not empty, means the shared_key is set, only works when serverAuth is true
secretName string //the name of the secret for the collector to use
loggingNS string //the namespace where the collector pods deployed in
inPluginType string //forward or http
}
func (f fluentdServer) createPipelineSecret(oc *exutil.CLI, keysPath string) {
secret := resource{"secret", f.secretName, f.loggingNS}
cmd := []string{"secret", "generic", secret.name, "-n", secret.namespace, "--from-file=ca-bundle.crt=" + keysPath + "/ca.crt"}
if f.clientAuth {
cmd = append(cmd, "--from-file=tls.key="+keysPath+"/client.key", "--from-file=tls.crt="+keysPath+"/client.crt")
}
if f.clientPrivateKeyPassphrase != "" {
cmd = append(cmd, "--from-literal=passphrase="+f.clientPrivateKeyPassphrase)
}
if f.sharedKey != "" {
cmd = append(cmd, "--from-literal=shared_key="+f.sharedKey)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(cmd...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
secret.WaitForResourceToAppear(oc)
}
func (f fluentdServer) deploy(oc *exutil.CLI) {
// create SA
sa := resource{"serviceaccount", f.serverName, f.namespace}
err := oc.WithoutNamespace().Run("create").Args("serviceaccount", sa.name, "-n", sa.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sa.WaitForResourceToAppear(oc)
//err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "privileged", fmt.Sprintf("system:serviceaccount:%s:%s", f.namespace, f.serverName), "-n", f.namespace).Execute()
//o.Expect(err).NotTo(o.HaveOccurred())
filePath := []string{"testdata", "logging", "external-log-stores", "fluentd"}
// create secrets if needed
if f.serverAuth {
o.Expect(f.secretName).NotTo(o.BeEmpty())
filePath = append(filePath, "secure")
// create a temporary directory
baseDir := exutil.FixturePath("testdata", "logging")
keysPath := filepath.Join(baseDir, "temp"+getRandomString())
defer exec.Command("rm", "-r", keysPath).Output()
err = os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
//generate certs
cert := certsConf{f.serverName, f.namespace, f.clientPrivateKeyPassphrase}
cert.generateCerts(oc, keysPath)
//create pipelinesecret
f.createPipelineSecret(oc, keysPath)
//create secret for fluentd server
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", f.serverName, "-n", f.namespace, "--from-file=ca-bundle.crt="+keysPath+"/ca.crt", "--from-file=tls.key="+keysPath+"/server.key", "--from-file=tls.crt="+keysPath+"/server.crt", "--from-file=ca.key="+keysPath+"/ca.key").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
filePath = append(filePath, "insecure")
}
// create configmap/deployment/svc
cm := resource{"configmap", f.serverName, f.namespace}
//when prefix is http-, the fluentdserver using http inplugin.
cmFilePrefix := ""
if f.inPluginType == "http" {
cmFilePrefix = "http-"
}
var cmFileName string
if !f.serverAuth {
cmFileName = cmFilePrefix + "configmap.yaml"
} else {
if f.clientAuth {
if f.sharedKey != "" {
cmFileName = "cm-mtls-share.yaml"
} else {
cmFileName = cmFilePrefix + "cm-mtls.yaml"
}
} else {
if f.sharedKey != "" {
cmFileName = "cm-serverauth-share.yaml"
} else {
cmFileName = cmFilePrefix + "cm-serverauth.yaml"
}
}
}
cmFilePath := append(filePath, cmFileName)
cmFile := exutil.FixturePath(cmFilePath...)
cCmCmd := []string{"-f", cmFile, "-n", f.namespace, "-p", "NAMESPACE=" + f.namespace, "-p", "NAME=" + f.serverName}
if f.sharedKey != "" {
cCmCmd = append(cCmCmd, "-p", "SHARED_KEY="+f.sharedKey)
}
err = cm.applyFromTemplate(oc, cCmCmd...)
o.Expect(err).NotTo(o.HaveOccurred())
deploy := resource{"deployment", f.serverName, f.namespace}
deployFilePath := append(filePath, "deployment.yaml")
deployFile := exutil.FixturePath(deployFilePath...)
err = deploy.applyFromTemplate(oc, "-f", deployFile, "-n", f.namespace, "-p", "NAMESPACE="+f.namespace, "-p", "NAME="+f.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, f.namespace, f.serverName)
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("-n", f.namespace, "deployment", f.serverName, "--name="+f.serverName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (f fluentdServer) remove(oc *exutil.CLI) {
resource{"serviceaccount", f.serverName, f.namespace}.clear(oc)
if f.serverAuth {
resource{"secret", f.serverName, f.namespace}.clear(oc)
resource{"secret", f.secretName, f.loggingNS}.clear(oc)
}
resource{"configmap", f.serverName, f.namespace}.clear(oc)
resource{"deployment", f.serverName, f.namespace}.clear(oc)
resource{"svc", f.serverName, f.namespace}.clear(oc)
}
func (f fluentdServer) getPodName(oc *exutil.CLI) string {
pods, err := oc.AdminKubeClient().CoreV1().Pods(f.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "component=" + f.serverName})
o.Expect(err).NotTo(o.HaveOccurred())
var names []string
for i := 0; i < len(pods.Items); i++ {
names = append(names, pods.Items[i].Name)
}
return names[0]
}
// check the data in fluentd server
// filename is the name of a file you want to check
// expect true means you expect the file to exist, false means the file is not expected to exist
func (f fluentdServer) checkData(oc *exutil.CLI, expect bool, filename string) {
cmd := "ls -l /fluentd/log/" + filename
if expect {
err := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
stdout, err := e2eoutput.RunHostCmdWithRetries(f.namespace, f.getPodName(oc), cmd, 3*time.Second, 15*time.Second)
if err != nil {
if strings.Contains(err.Error(), "No such file or directory") {
return false, nil
}
return false, err
}
return strings.Contains(stdout, filename), nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s doesn't exist", filename))
} else {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
stdout, err := e2eoutput.RunHostCmdWithRetries(f.namespace, f.getPodName(oc), cmd, 3*time.Second, 15*time.Second)
if err != nil {
if strings.Contains(err.Error(), "No such file or directory") {
return true, nil
}
return false, err
}
return strings.Contains(stdout, "No such file or directory"), nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s exists", filename))
}
}
// return the infrastructureName. For example: anli922-jglp4
func getInfrastructureName(oc *exutil.CLI) string {
infrastructureName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.infrastructureName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return infrastructureName
}
func getDataFromKafkaConsumerPod(oc *exutil.CLI, kafkaNS, consumerPod string) ([]LogEntity, error) {
e2e.Logf("get logs from kakfa consumerPod %s", consumerPod)
var logs []LogEntity
//wait up to 5 minutes for logs appear in consumer pod
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", kafkaNS, consumerPod, "--since=30s", "--tail=30").Output()
if err != nil {
e2e.Logf("error when oc logs consumer pod, continue")
return false, nil
}
for _, line := range strings.Split(strings.TrimSuffix(output, "\n"), "\n") {
//exclude those kafka-consumer logs, for exampe:
//[2024-11-09 07:25:47,953] WARN [Consumer clientId=consumer-console-consumer-99163-1, groupId=console-consumer-99163] Error while fetching metadata with correlation id 165
//: {topic-logging-app=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
r, _ := regexp.Compile(`^{"@timestamp":.*}`)
if r.MatchString(line) {
var log LogEntity
err = json.Unmarshal([]byte(line), &log)
if err != nil {
continue
}
logs = append(logs, log)
} else {
continue
}
}
if len(logs) > 0 {
return true, nil
} else {
e2e.Logf("can not find logs in consumerPod %s, continue", consumerPod)
return false, nil
}
})
if err != nil {
return logs, fmt.Errorf("can not find consumer logs in 3 minutes")
}
return logs, nil
}
func getDataFromKafkaByNamespace(oc *exutil.CLI, kafkaNS, consumerPod, namespace string) ([]LogEntity, error) {
data, err := getDataFromKafkaConsumerPod(oc, kafkaNS, consumerPod)
if err != nil {
return nil, err
}
var logs []LogEntity
for _, log := range data {
if log.Kubernetes.NamespaceName == namespace {
logs = append(logs, log)
}
}
return logs, nil
}
type kafka struct {
namespace string
kafkasvcName string
zoosvcName string
authtype string //Name the kafka folders under testdata same as the authtype (options: plaintext-ssl, sasl-ssl, sasl-plaintext)
pipelineSecret string //the name of the secret for collectors to use
collectorType string //must be specified when auth type is sasl-ssl/sasl-plaintext
loggingNS string //the namespace where the collector pods are deployed in
}
func (k kafka) deployZookeeper(oc *exutil.CLI) {
zookeeperFilePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "kafka", "zookeeper")
//create zookeeper configmap/svc/StatefulSet
configTemplate := filepath.Join(zookeeperFilePath, "configmap.yaml")
if k.authtype == "plaintext-ssl" {
configTemplate = filepath.Join(zookeeperFilePath, "configmap-ssl.yaml")
}
err := resource{"configmap", k.zoosvcName, k.namespace}.applyFromTemplate(oc, "-n", k.namespace, "-f", configTemplate, "-p", "NAME="+k.zoosvcName, "NAMESPACE="+k.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
zoosvcFile := filepath.Join(zookeeperFilePath, "zookeeper-svc.yaml")
zoosvc := resource{"Service", k.zoosvcName, k.namespace}
err = zoosvc.applyFromTemplate(oc, "-n", k.namespace, "-f", zoosvcFile, "-p", "NAME="+k.zoosvcName, "-p", "NAMESPACE="+k.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
zoosfsFile := filepath.Join(zookeeperFilePath, "zookeeper-statefulset.yaml")
zoosfs := resource{"StatefulSet", k.zoosvcName, k.namespace}
err = zoosfs.applyFromTemplate(oc, "-n", k.namespace, "-f", zoosfsFile, "-p", "NAME="+k.zoosvcName, "-p", "NAMESPACE="+k.namespace, "-p", "SERVICENAME="+zoosvc.name, "-p", "CM_NAME="+k.zoosvcName)
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, k.namespace, "app="+k.zoosvcName)
}
func (k kafka) deployKafka(oc *exutil.CLI) {
kafkaFilePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "kafka")
kafkaConfigmapTemplate := filepath.Join(kafkaFilePath, k.authtype, "kafka-configmap.yaml")
consumerConfigmapTemplate := filepath.Join(kafkaFilePath, k.authtype, "consumer-configmap.yaml")
var keysPath string
if k.authtype == "sasl-ssl" || k.authtype == "plaintext-ssl" {
baseDir := exutil.FixturePath("testdata", "logging")
keysPath = filepath.Join(baseDir, "temp"+getRandomString())
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
generateCertsSH := filepath.Join(kafkaFilePath, "cert_generation.sh")
stdout, err := exec.Command("sh", generateCertsSH, keysPath, k.namespace).Output()
if err != nil {
e2e.Logf("error generating certs: %s", string(stdout))
e2e.Failf("error generating certs: %v", err)
}
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "kafka-cluster-cert", "-n", k.namespace, "--from-file=ca_bundle.jks="+keysPath+"/ca/ca_bundle.jks", "--from-file=cluster.jks="+keysPath+"/cluster/cluster.jks").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
pipelineSecret := resource{"secret", k.pipelineSecret, k.loggingNS}
kafkaClientCert := resource{"secret", "kafka-client-cert", k.namespace}
//create kafka secrets and confimap
cmdPipeline := []string{"secret", "generic", pipelineSecret.name, "-n", pipelineSecret.namespace}
cmdClient := []string{"secret", "generic", kafkaClientCert.name, "-n", kafkaClientCert.namespace}
switch k.authtype {
case "sasl-plaintext":
{
cmdClient = append(cmdClient, "--from-literal=username=admin", "--from-literal=password=admin-secret")
cmdPipeline = append(cmdPipeline, "--from-literal=username=admin", "--from-literal=password=admin-secret")
if k.collectorType == "vector" {
cmdPipeline = append(cmdPipeline, "--from-literal=sasl.enable=True", "--from-literal=sasl.mechanisms=PLAIN")
}
}
case "sasl-ssl":
{
cmdClient = append(cmdClient, "--from-file=ca-bundle.jks="+keysPath+"/ca/ca_bundle.jks", "--from-file=ca-bundle.crt="+keysPath+"/ca/ca_bundle.crt", "--from-file=tls.crt="+keysPath+"/client/client.crt", "--from-file=tls.key="+keysPath+"/client/client.key", "--from-literal=username=admin", "--from-literal=password=admin-secret")
cmdPipeline = append(cmdPipeline, "--from-file=ca-bundle.crt="+keysPath+"/ca/ca_bundle.crt", "--from-literal=username=admin", "--from-literal=password=admin-secret")
switch k.collectorType {
case "fluentd":
{
cmdPipeline = append(cmdPipeline, "--from-literal=sasl_over_ssl=true")
}
case "vector":
{
cmdPipeline = append(cmdPipeline, "--from-literal=sasl.enable=True", "--from-literal=sasl.mechanisms=PLAIN", "--from-file=tls.crt="+keysPath+"/client/client.crt", "--from-file=tls.key="+keysPath+"/client/client.key")
}
}
}
case "plaintext-ssl":
{
cmdClient = append(cmdClient, "--from-file=ca-bundle.jks="+keysPath+"/ca/ca_bundle.jks", "--from-file=ca-bundle.crt="+keysPath+"/ca/ca_bundle.crt", "--from-file=tls.crt="+keysPath+"/client/client.crt", "--from-file=tls.key="+keysPath+"/client/client.key")
cmdPipeline = append(cmdPipeline, "--from-file=ca-bundle.crt="+keysPath+"/ca/ca_bundle.crt", "--from-file=tls.crt="+keysPath+"/client/client.crt", "--from-file=tls.key="+keysPath+"/client/client.key")
}
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(cmdClient...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafkaClientCert.WaitForResourceToAppear(oc)
err = oc.AsAdmin().WithoutNamespace().Run("create").Args(cmdPipeline...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
pipelineSecret.WaitForResourceToAppear(oc)
consumerConfigmap := resource{"configmap", "kafka-client", k.namespace}
err = consumerConfigmap.applyFromTemplate(oc, "-n", k.namespace, "-f", consumerConfigmapTemplate, "-p", "NAME="+consumerConfigmap.name, "NAMESPACE="+consumerConfigmap.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
kafkaConfigmap := resource{"configmap", k.kafkasvcName, k.namespace}
err = kafkaConfigmap.applyFromTemplate(oc, "-n", k.namespace, "-f", kafkaConfigmapTemplate, "-p", "NAME="+kafkaConfigmap.name, "NAMESPACE="+kafkaConfigmap.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
//create ClusterRole and ClusterRoleBinding
rbacFile := filepath.Join(kafkaFilePath, "kafka-rbac.yaml")
output, err := oc.AsAdmin().WithoutNamespace().Run("process").Args("-n", k.namespace, "-f", rbacFile, "-p", "NAMESPACE="+k.namespace).OutputToFile(getRandomString() + ".json")
o.Expect(err).NotTo(o.HaveOccurred())
oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", output, "-n", k.namespace).Execute()
//create kafka svc
svcFile := filepath.Join(kafkaFilePath, "kafka-svc.yaml")
svc := resource{"Service", k.kafkasvcName, k.namespace}
err = svc.applyFromTemplate(oc, "-f", svcFile, "-n", svc.namespace, "-p", "NAME="+svc.name, "NAMESPACE="+svc.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
//create kafka StatefulSet
sfsFile := filepath.Join(kafkaFilePath, k.authtype, "kafka-statefulset.yaml")
sfs := resource{"StatefulSet", k.kafkasvcName, k.namespace}
err = sfs.applyFromTemplate(oc, "-f", sfsFile, "-n", k.namespace, "-p", "NAME="+sfs.name, "-p", "NAMESPACE="+sfs.namespace, "-p", "CM_NAME="+k.kafkasvcName)
o.Expect(err).NotTo(o.HaveOccurred())
waitForStatefulsetReady(oc, sfs.namespace, sfs.name)
//create kafka-consumer deployment
deployFile := filepath.Join(kafkaFilePath, k.authtype, "kafka-consumer-deployment.yaml")
deploy := resource{"deployment", "kafka-consumer-" + k.authtype, k.namespace}
err = deploy.applyFromTemplate(oc, "-f", deployFile, "-n", deploy.namespace, "-p", "NAMESPACE="+deploy.namespace, "NAME="+deploy.name, "CM_NAME=kafka-client")
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, deploy.namespace, deploy.name)
}
func (k kafka) removeZookeeper(oc *exutil.CLI) {
resource{"configmap", k.zoosvcName, k.namespace}.clear(oc)
resource{"svc", k.zoosvcName, k.namespace}.clear(oc)
resource{"statefulset", k.zoosvcName, k.namespace}.clear(oc)
}
func (k kafka) removeKafka(oc *exutil.CLI) {
resource{"secret", "kafka-client-cert", k.namespace}.clear(oc)
resource{"secret", "kafka-cluster-cert", k.namespace}.clear(oc)
resource{"secret", k.pipelineSecret, k.loggingNS}.clear(oc)
oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrole/kafka-node-reader").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrolebinding/kafka-node-reader").Execute()
resource{"configmap", k.kafkasvcName, k.namespace}.clear(oc)
resource{"svc", k.kafkasvcName, k.namespace}.clear(oc)
resource{"statefulset", k.kafkasvcName, k.namespace}.clear(oc)
resource{"configmap", "kafka-client", k.namespace}.clear(oc)
resource{"deployment", "kafka-consumer-" + k.authtype, k.namespace}.clear(oc)
}
// deploy amqstream instance, kafka user for predefined topics
// if amqstreams absent, deploy amqstream operator
func (amqi *amqInstance) deploy(oc *exutil.CLI) {
e2e.Logf("deploy amq instance")
//initialize kakfa vars
if amqi.name == "" {
amqi.name = "my-cluster"
}
if amqi.namespace == "" {
e2e.Failf("error, please define a namespace for amqstream instance")
}
if amqi.user == "" {
amqi.user = "my-user"
}
if amqi.topicPrefix == "" {
amqi.topicPrefix = "topic-logging"
}
if amqi.instanceType == "" {
amqi.instanceType = "kafka-sasl-cluster"
}
loggingBaseDir := exutil.FixturePath("testdata", "logging")
operatorDeployed := false
// Wait csv appears up to 3 minutes
wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", "openshift-operators").Output()
if err != nil {
return false, err
}
if strings.Contains(output, "amqstreams") {
operatorDeployed = true
return true, nil
}
return false, nil
})
if !operatorDeployed {
e2e.Logf("deploy amqstream operator")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("operatorhub/cluster", `-ojsonpath='{.status.sources[?(@.name=="redhat-operators")].disabled}'`).Output()
if err != nil {
g.Skip("Can not detect the catalog source/redhat-operators status")
}
if output == "true" {
g.Skip("catalog source/redhat-operators is disabled")
}
catsrc := CatalogSourceObjects{"stable", "redhat-operators", "openshift-marketplace"}
amqs := SubscriptionObjects{
OperatorName: "amq-streams-cluster-operator",
Namespace: amqi.namespace,
PackageName: "amq-streams",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "singlenamespace-og.yaml"),
CatalogSource: catsrc,
}
amqs.SubscribeOperator(oc)
if isFipsEnabled(oc) {
//disable FIPS_MODE due to "java.io.IOException: getPBEAlgorithmParameters failed: PBEWithHmacSHA256AndAES_256 AlgorithmParameters not available"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("sub/"+amqs.PackageName, "-n", amqs.Namespace, "-p", "{\"spec\": {\"config\": {\"env\": [{\"name\": \"FIPS_MODE\", \"value\": \"disabled\"}]}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// before creating kafka, check the existence of crd kafkas.kafka.strimzi.io
checkResource(oc, true, true, "kafka.strimzi.io", []string{"crd", "kafkas.kafka.strimzi.io", "-ojsonpath={.spec.group}"})
kafka := resource{"kafka", amqi.name, amqi.namespace}
kafkaTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", amqi.instanceType+".yaml")
kafka.applyFromTemplate(oc, "-n", kafka.namespace, "-f", kafkaTemplate, "-p", "NAME="+kafka.name)
// wait for kafka cluster to be ready
waitForPodReadyWithLabel(oc, kafka.namespace, "app.kubernetes.io/instance="+kafka.name)
if amqi.instanceType == "kafka-sasl-cluster" {
e2e.Logf("deploy kafka user")
kafkaUser := resource{"kafkauser", amqi.user, amqi.namespace}
kafkaUserTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-sasl-user.yaml")
kafkaUser.applyFromTemplate(oc, "-n", kafkaUser.namespace, "-f", kafkaUserTemplate, "-p", "NAME="+amqi.user, "-p", "KAFKA_NAME="+amqi.name, "-p", "TOPIC_PREFIX="+amqi.topicPrefix)
// get user password from secret my-user
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
secrets, err := oc.AdminKubeClient().CoreV1().Secrets(kafkaUser.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/instance=" + kafkaUser.name})
if err != nil {
e2e.Logf("failed to list secret, continue")
return false, nil
}
count := len(secrets.Items)
if count == 0 {
e2e.Logf("canot not find the secret %s, continues", kafkaUser.name)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not find the kafka user Secret %s", amqi.user))
e2e.Logf("set kafka user password")
amqi.password, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("get").Args("secret", amqi.user, "-n", amqi.namespace, "-o", "jsonpath={.data.password}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
temp, err := base64.StdEncoding.DecodeString(amqi.password)
o.Expect(err).NotTo(o.HaveOccurred())
amqi.password = string(temp)
// get extranal route of amqstream kafka
e2e.Logf("get kafka route")
amqi.route = getRouteAddress(oc, amqi.namespace, amqi.name+"-kafka-external-bootstrap")
amqi.route = amqi.route + ":443"
// get ca for route
e2e.Logf("get kafka routeCA")
amqi.routeCA, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", amqi.name+"-cluster-ca-cert", "-n", amqi.namespace, "-o", `jsonpath={.data.ca\.crt}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
temp, err = base64.StdEncoding.DecodeString(amqi.routeCA)
o.Expect(err).NotTo(o.HaveOccurred())
amqi.routeCA = string(temp)
}
// get internal service URL of amqstream kafka
amqi.service = amqi.name + "-kafka-bootstrap." + amqi.namespace + ".svc:9092"
e2e.Logf("amqstream deployed")
}
// try best to delete resources which will block normal deletion
func (amqi *amqInstance) destroy(oc *exutil.CLI) {
e2e.Logf("delete kakfa resources")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("job", "--all", "-n", amqi.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("kafkauser", "--all", "-n", amqi.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("kafkatopic", "--all", "-n", amqi.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("kafka", amqi.name, "-n", amqi.namespace).Execute()
}
// Create kafka topic, create consumer pod and return consumer pod name
// Note: the topic name must match the amq.topicPrefix
func (amqi amqInstance) createTopicAndConsumber(oc *exutil.CLI, topicName string) string {
e2e.Logf("create kakfa topic %s and consume pod", topicName)
if !strings.HasPrefix(topicName, amqi.topicPrefix) {
e2e.Failf("error, the topic %s must has prefix %s", topicName, amqi.topicPrefix)
}
var (
consumerPodName string
loggingBaseDir = exutil.FixturePath("testdata", "logging")
topicTemplate = filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-topic.yaml")
topic = resource{"Kafkatopic", topicName, amqi.namespace}
)
err := topic.applyFromTemplate(oc, "-n", topic.namespace, "-f", topicTemplate, "-p", "NAMESPACE="+topic.namespace, "-p", "NAME="+topic.name, "CLUSTER_NAME="+amqi.name)
o.Expect(err).NotTo(o.HaveOccurred())
if amqi.instanceType == "kafka-sasl-cluster" {
//create consumers sasl.client.property
truststorePassword, err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("get").Args("secret", amqi.name+"-cluster-ca-cert", "-n", amqi.namespace, "-o", `jsonpath={.data.ca\.password}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
temp, err := base64.StdEncoding.DecodeString(truststorePassword)
o.Expect(err).NotTo(o.HaveOccurred())
truststorePassword = string(temp)
consumerConfigTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-sasl-consumers-config.yaml")
consumerConfig := resource{"configmap", "client-property-" + amqi.user, amqi.namespace}
err = consumerConfig.applyFromTemplate(oc.NotShowInfo(), "-n", consumerConfig.namespace, "-f", consumerConfigTemplate, "-p", "NAME="+consumerConfig.name, "-p", "USER="+amqi.user, "-p", "PASSWORD="+amqi.password, "-p", "TRUSTSTORE_PASSWORD="+truststorePassword, "-p", "KAFKA_NAME="+amqi.name)
o.Expect(err).NotTo(o.HaveOccurred())
//create consumer pod
consumerTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-sasl-consumer-job.yaml")
consumer := resource{"job", topicName + "-consumer", amqi.namespace}
err = consumer.applyFromTemplate(oc, "-n", consumer.namespace, "-f", consumerTemplate, "-p", "NAME="+consumer.name, "-p", "CLUSTER_NAME="+amqi.name, "-p", "TOPIC_NAME="+topicName, "-p", "CLIENT_CONFIGMAP_NAME="+consumerConfig.name, "-p", "CA_SECRET_NAME="+amqi.name+"-cluster-ca-cert")
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, amqi.namespace, "job-name="+consumer.name)
consumerPods, err := oc.AdminKubeClient().CoreV1().Pods(amqi.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "job-name=" + topicName + "-consumer"})
o.Expect(err).NotTo(o.HaveOccurred())
consumerPodName = consumerPods.Items[0].Name
}
if amqi.instanceType == "kafka-no-auth-cluster" {
//create consumer pod
consumerTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-no-auth-consumer-job.yaml")
consumer := resource{"job", topicName + "-consumer", amqi.namespace}
err = consumer.applyFromTemplate(oc, "-n", consumer.namespace, "-f", consumerTemplate, "-p", "NAME="+consumer.name, "-p", "CLUSTER_NAME="+amqi.name, "-p", "TOPIC_NAME="+topicName)
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, amqi.namespace, "job-name="+consumer.name)
consumerPods, err := oc.AdminKubeClient().CoreV1().Pods(amqi.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "job-name=" + topicName + "-consumer"})
o.Expect(err).NotTo(o.HaveOccurred())
consumerPodName = consumerPods.Items[0].Name
}
if consumerPodName == "" {
e2e.Logf("can not get comsumer pod for the topic %s", topicName)
} else {
e2e.Logf("found the comsumer pod %s ", consumerPodName)
}
return consumerPodName
}
type eventRouter struct {
name string
namespace string
template string
}
func (e eventRouter) deploy(oc *exutil.CLI, optionalParameters ...string) {
parameters := []string{"-f", e.template, "-l", "app=eventrouter", "-p", "NAME=" + e.name, "NAMESPACE=" + e.namespace}
if len(optionalParameters) > 0 {
parameters = append(parameters, optionalParameters...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", e.namespace).Execute()
if err != nil {
e2e.Failf("error deploying eventrouter: %v", err)
}
resource{"deployment", e.name, e.namespace}.WaitForResourceToAppear(oc)
WaitForDeploymentPodsToBeReady(oc, e.namespace, e.name)
}
func (e eventRouter) delete(oc *exutil.CLI) {
resources := []resource{{"deployment", e.name, e.namespace}, {"configmaps", e.name, e.namespace}, {"serviceaccounts", e.name, e.namespace}}
for _, r := range resources {
r.clear(oc)
}
oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrole", e.name+"-reader").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrolebindings", e.name+"-reader-binding").Execute()
}
// createSecretForGCL creates a secret for collector pods to forward logs to Google Cloud Logging
func createSecretForGCL(oc *exutil.CLI, name, namespace string) error {
// get gcp-credentials from env var GOOGLE_APPLICATION_CREDENTIALS
gcsCred := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", name, "-n", namespace, "--from-file=google-application-credentials.json="+gcsCred).Execute()
}
type googleApplicationCredentials struct {
CredentialType string `json:"type"`
ProjectID string `json:"project_id"`
ClientID string `json:"client_id"`
}
func getGCPProjectID(oc *exutil.CLI) (string, error) {
platform := exutil.CheckPlatform(oc)
if platform == "gcp" {
return exutil.GetGcpProjectID(oc)
}
credentialFile, present := os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !present {
g.Skip("Skip for the platform is not GCP and there is no GCP credentials")
}
file, err := os.ReadFile(credentialFile)
if err != nil {
return "", fmt.Errorf("can't read google application credentials: %v", err)
}
var gac googleApplicationCredentials
err = json.Unmarshal(file, &gac)
return gac.ProjectID, err
}
type googleCloudLogging struct {
projectID string
logName string
}
// listLogEntries gets the most recent 5 entries
// example: https://cloud.google.com/logging/docs/reference/libraries#list_log_entries
// https://github.com/GoogleCloudPlatform/golang-samples/blob/HEAD/logging/simplelog/simplelog.go
func (gcl googleCloudLogging) listLogEntries(queryString string) ([]*logging.Entry, error) {
ctx := context.Background()
adminClient, err := logadmin.NewClient(ctx, gcl.projectID)
if err != nil {
e2e.Logf("Failed to create logadmin client: %v", err)
}
defer adminClient.Close()
var entries []*logging.Entry
lastHour := time.Now().Add(-1 * time.Hour).Format(time.RFC3339)
filter := fmt.Sprintf(`logName = "projects/%s/logs/%s" AND timestamp > "%s"`, gcl.projectID, gcl.logName, lastHour)
if len(queryString) > 0 {
filter += queryString
}
iter := adminClient.Entries(ctx,
logadmin.Filter(filter),
// Get most recent entries first.
logadmin.NewestFirst(),
)
// Fetch the most recent 5 entries.
for len(entries) < 5 {
entry, err := iter.Next()
if err == iterator.Done {
return entries, nil
}
if err != nil {
return nil, err
}
entries = append(entries, entry)
}
return entries, nil
}
func (gcl googleCloudLogging) getLogByType(logType string) ([]*logging.Entry, error) {
searchString := " AND jsonPayload.log_type = \"" + logType + "\""
return gcl.listLogEntries(searchString)
}
func (gcl googleCloudLogging) getLogByNamespace(namespace string) ([]*logging.Entry, error) {
searchString := " AND jsonPayload.kubernetes.namespace_name = \"" + namespace + "\""
return gcl.listLogEntries(searchString)
}
func extractGoogleCloudLoggingLogs(gclLogs []*logging.Entry) ([]LogEntity, error) {
var (
logs []LogEntity
log LogEntity
)
for _, item := range gclLogs {
if value, ok := item.Payload.(*structpb.Struct); ok {
v, err := value.MarshalJSON()
if err != nil {
return nil, err
}
//e2e.Logf("\noriginal log:\n%s\n\n", string(v))
err = json.Unmarshal(v, &log)
if err != nil {
return nil, err
}
logs = append(logs, log)
}
}
return logs, nil
}
func (gcl googleCloudLogging) removeLogs() error {
ctx := context.Background()
adminClient, err := logadmin.NewClient(ctx, gcl.projectID)
if err != nil {
e2e.Logf("Failed to create logadmin client: %v", err)
}
defer adminClient.Close()
return adminClient.DeleteLog(ctx, gcl.logName)
}
func (gcl googleCloudLogging) waitForLogsAppearByType(logType string) error {
return wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType(logType)
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
}
func (gcl googleCloudLogging) waitForLogsAppearByNamespace(namespace string) error {
return wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByNamespace(namespace)
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
}
// getIndexImageTag retruns a tag of index image
// this is desigend for logging upgrade test, the logging packagemanifests in the cluster may only have the testing version
// to provide a previous version for upgrade test, use clusterversion - 1 as the tag,
// for example: in OCP 4.12, use 4.11 as the tag
// index image: quay.io/openshift-qe-optional-operators/aosqe-index
func getIndexImageTag(oc *exutil.CLI) (string, error) {
version, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-ojsonpath={.status.desired.version}").Output()
if err != nil {
return "", err
}
major := strings.Split(version, ".")[0]
minor := strings.Split(version, ".")[1]
newMinor, err := strconv.Atoi(minor)
if err != nil {
return "", err
}
return major + "." + strconv.Itoa(newMinor-1), nil
}
func getExtLokiSecret() (string, string, error) {
glokiUser := os.Getenv("GLOKIUSER")
glokiPwd := os.Getenv("GLOKIPWD")
if glokiUser == "" || glokiPwd == "" {
return "", "", fmt.Errorf("GLOKIUSER or GLOKIPWD environment variable is not set")
}
return glokiUser, glokiPwd, nil
}
func checkCiphers(oc *exutil.CLI, tlsVer string, ciphers []string, server string, caFile string, cloNS string, timeInSec int) error {
delay := time.Duration(timeInSec) * time.Second
for _, cipher := range ciphers {
e2e.Logf("Testing %s...", cipher)
clPod, err := oc.AdminKubeClient().CoreV1().Pods(cloNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cluster-logging-operator"})
if err != nil {
return fmt.Errorf("failed to get pods: %w", err)
}
cmd := fmt.Sprintf("openssl s_client -%s -cipher %s -CAfile %s -connect %s", tlsVer, cipher, caFile, server)
result, err := e2eoutput.RunHostCmdWithRetries(cloNS, clPod.Items[0].Name, cmd, 3*time.Second, 30*time.Second)
if err != nil {
return fmt.Errorf("failed to run command: %w", err)
}
if strings.Contains(string(result), ":error:") {
errorStr := strings.Split(string(result), ":")[5]
return fmt.Errorf("error: NOT SUPPORTED (%s)", errorStr)
} else if strings.Contains(string(result), fmt.Sprintf("Cipher is %s", cipher)) || strings.Contains(string(result), "Cipher :") {
e2e.Logf("SUPPORTED")
} else {
errorStr := string(result)
return fmt.Errorf("error: UNKNOWN RESPONSE %s", errorStr)
}
time.Sleep(delay)
}
return nil
}
func checkTLSVer(oc *exutil.CLI, tlsVer string, server string, caFile string, cloNS string) error {
e2e.Logf("Testing TLS %s ", tlsVer)
clPod, err := oc.AdminKubeClient().CoreV1().Pods(cloNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cluster-logging-operator"})
if err != nil {
return fmt.Errorf("failed to get pods: %w", err)
}
cmd := fmt.Sprintf("openssl s_client -%s -CAfile %s -connect %s", tlsVer, caFile, server)
result, err := e2eoutput.RunHostCmdWithRetries(cloNS, clPod.Items[0].Name, cmd, 3*time.Second, 30*time.Second)
if err != nil {
return fmt.Errorf("failed to run command: %w", err)
}
if strings.Contains(string(result), ":error:") {
errorStr := strings.Split(string(result), ":")[5]
return fmt.Errorf("error: NOT SUPPORTED (%s)", errorStr)
} else if strings.Contains(string(result), "Cipher is ") || strings.Contains(string(result), "Cipher :") {
e2e.Logf("SUPPORTED")
} else {
errorStr := string(result)
return fmt.Errorf("error: UNKNOWN RESPONSE %s", errorStr)
}
return nil
}
func checkTLSProfile(oc *exutil.CLI, profile string, algo string, server string, caFile string, cloNS string, timeInSec int) bool {
var ciphers []string
var tlsVer string
if profile == "modern" {
e2e.Logf("Modern profile is currently not supported, please select from old, intermediate, custom")
return false
}
if isFipsEnabled(oc) {
switch profile {
case "old":
e2e.Logf("Checking old profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking old profile with TLS v1.2")
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA", "ECDHE-ECDSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256"}
}
tlsVer = "tls1_2"
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
case "intermediate":
e2e.Logf("Setting alogorith to %s", algo)
e2e.Logf("Checking intermediate profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking intermediate ciphers with TLS v1.3")
// as openssl-3.0.7-24.el9 in CLO pod failed as below, no such issue in openssl-3.0.9-2.fc38. use TLS 1.3 to test TSL 1.2 here.
// openssl s_client -tls1_2 -cipher ECDHE-RSA-AES128-GCM-SHA256 -CAfile /run/secrets/kubernetes.io/serviceaccount/service-ca.crt -connect lokistack-sample-gateway-http:8081
// 20B4A391FFFF0000:error:1C8000E9:Provider routines:kdf_tls1_prf_derive:ems not enabled:providers/implementations/kdfs/tls1_prf.c:200:
// 20B4A391FFFF0000:error:0A08010C:SSL routines:tls1_PRF:unsupported:ssl/t1_enc.c:83:
tlsVer = "tls1_3"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-GCM-SHA384"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking intermediate profile with TLS v1.1")
tlsVer = "tls1_1"
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).To(o.HaveOccurred())
case "custom":
e2e.Logf("Setting alogorith to %s", algo)
e2e.Logf("Checking custom profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking custom profile ciphers with TLS v1.3")
// as openssl-3.0.7-24.el9 in CLO pod failed as below, no such issue in openssl-3.0.9-2.fc38. use TLS 1.3 to test TSL 1.2 here.
// openssl s_client -tls1_2 -cipher ECDHE-RSA-AES128-GCM-SHA256 -CAfile /run/secrets/kubernetes.io/serviceaccount/service-ca.crt -connect lokistack-sample-gateway-http:8081
// 20B4A391FFFF0000:error:1C8000E9:Provider routines:kdf_tls1_prf_derive:ems not enabled:providers/implementations/kdfs/tls1_prf.c:200:
// 20B4A391FFFF0000:error:0A08010C:SSL routines:tls1_PRF:unsupported:ssl/t1_enc.c:83:
tlsVer = "tls1_3"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-GCM-SHA256"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking ciphers on in custom profile with TLS v1.3")
tlsVer = "tls1_3"
if algo == "ECDSA" {
ciphers = []string{"TLS_AES_128_GCM_SHA256"}
} else if algo == "RSA" {
ciphers = []string{"TLS_AES_128_GCM_SHA256"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).To(o.HaveOccurred())
}
} else {
switch profile {
case "old":
e2e.Logf("Checking old profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking old profile with TLS v1.2")
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA", "ECDHE-ECDSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-SHA256", "ECDHE-RSA-AES128-SHA", "ECDHE-RSA-AES256-SHA", "AES128-GCM-SHA256", "AES256-GCM-SHA384", "AES128-SHA256", "AES128-SHA", "AES256-SHA"}
}
tlsVer = "tls1_2"
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking old profile with TLS v1.1")
// remove these ciphers as openssl-3.0.7-24.el9 s_client -tls1_1 -cipher <ciphers> failed.
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-SHA", "ECDHE-ECDSA-AES256-SHA"}
} else if algo == "RSA" {
ciphers = []string{"AES128-SHA", "AES256-SHA"}
}
tlsVer = "tls1_1"
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
case "intermediate":
e2e.Logf("Setting alogorith to %s", algo)
e2e.Logf("Checking intermediate profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking intermediate profile ciphers with TLS v1.2")
tlsVer = "tls1_2"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-GCM-SHA384"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking intermediate profile with TLS v1.1")
// replace checkCiphers with checkTLSVer as we needn't check all v1.1 Ciphers
tlsVer = "tls1_1"
err = checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).To(o.HaveOccurred())
case "custom":
e2e.Logf("Setting alogorith to %s", algo)
e2e.Logf("Checking custom profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking custom profile with TLS v1.2")
tlsVer = "tls1_2"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking ciphers not in custom profile with TLS v1.3")
tlsVer = "tls1_3"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256"}
} else if algo == "RSA" {
ciphers = []string{"TLS_AES_128_GCM_SHA256"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).To(o.HaveOccurred())
}
}
return true
}
func checkCollectorConfiguration(oc *exutil.CLI, ns, cmName string, searchStrings ...string) (bool, error) {
// Parse the vector.toml file
dirname := "/tmp/" + oc.Namespace() + "-vectortoml"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
if err != nil {
return false, err
}
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("configmap/"+cmName, "-n", ns, "--confirm", "--to="+dirname).Output()
if err != nil {
return false, err
}
filename := filepath.Join(dirname, "vector.toml")
file, err := os.Open(filename)
if err != nil {
return false, err
}
defer file.Close()
content, err := os.ReadFile(filename)
if err != nil {
return false, err
}
for _, s := range searchStrings {
if !strings.Contains(string(content), s) {
e2e.Logf("can't find %s in vector.toml", s)
return false, nil
}
}
return true, nil
}
func checkOperatorsRunning(oc *exutil.CLI) (bool, error) {
jpath := `{range .items[*]}{.metadata.name}:{.status.conditions[?(@.type=='Available')].status}{':'}{.status.conditions[?(@.type=='Degraded')].status}{'\n'}{end}`
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperators.config.openshift.io", "-o", "jsonpath="+jpath).Output()
if err != nil {
return false, fmt.Errorf("failed to execute 'oc get clusteroperators.config.openshift.io' command: %v", err)
}
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
for _, line := range lines {
e2e.Logf("%s", line)
parts := strings.Split(line, ":")
available := parts[1] == "True"
degraded := parts[2] == "False"
if !available || !degraded {
return false, nil
}
}
return true, nil
}
func waitForOperatorsRunning(oc *exutil.CLI) {
e2e.Logf("Wait a minute to allow the cluster to reconcile the config changes.")
time.Sleep(1 * time.Minute)
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Minute, 21*time.Minute, true, func(context.Context) (done bool, err error) {
return checkOperatorsRunning(oc)
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to wait for operators to be running: %v", err))
}
func doHTTPRequest(header http.Header, address, path, query, method string, quiet bool, attempts int, requestBody io.Reader, expectedStatusCode int) ([]byte, error) {
us, err := buildURL(address, path, query)
if err != nil {
return nil, err
}
if !quiet {
e2e.Logf("the URL is: %s", us)
}
req, err := http.NewRequest(strings.ToUpper(method), us, requestBody)
if err != nil {
return nil, err
}
req.Header = header
var tr *http.Transport
proxy := getProxyFromEnv()
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
}
} else {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
client := &http.Client{Transport: tr}
var resp *http.Response
success := false
for attempts > 0 {
attempts--
resp, err = client.Do(req)
if err != nil {
e2e.Logf("error sending request %v", err)
continue
}
if resp.StatusCode != expectedStatusCode {
buf, _ := io.ReadAll(resp.Body) // nolint
e2e.Logf("Error response from server: %s %s (%v), attempts remaining: %d", resp.Status, string(buf), err, attempts)
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body: %v", err)
}
// sleep 5 second before doing next request
time.Sleep(5 * time.Second)
continue
}
success = true
break
}
if !success {
return nil, fmt.Errorf("run out of attempts while querying the server")
}
defer func() {
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body: %v", err)
}
}()
return io.ReadAll(resp.Body)
}
// buildURL concats a url `http://foo/bar` with a path `/buzz`.
func buildURL(u, p, q string) (string, error) {
url, err := url.Parse(u)
if err != nil {
return "", err
}
url.Path = path.Join(url.Path, p)
url.RawQuery = q
return url.String(), nil
}
// GetIPVersionStackType gets IP-version Stack type of the cluster
func GetIPVersionStackType(oc *exutil.CLI) (ipvStackType string) {
svcNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.serviceNetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Count(svcNetwork, ":") >= 2 && strings.Count(svcNetwork, ".") >= 2 {
ipvStackType = "dualstack"
} else if strings.Count(svcNetwork, ":") >= 2 {
ipvStackType = "ipv6single"
} else if strings.Count(svcNetwork, ".") >= 2 {
ipvStackType = "ipv4single"
}
e2e.Logf("The test cluster IP-version Stack type is :\"%s\".", ipvStackType)
return ipvStackType
}
// convertInterfaceToArray converts interface{} to []string
/*
example of interface{}:
[
timestamp,
log data
],
[
timestamp,
count
]
*/
func convertInterfaceToArray(t interface{}) []string {
var data []string
switch reflect.TypeOf(t).Kind() {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(t)
for i := 0; i < s.Len(); i++ {
data = append(data, fmt.Sprint(s.Index(i)))
}
}
return data
}
// send logs over http
func postDataToHttpserver(oc *exutil.CLI, clfNS string, httpURL string, postJsonString string) bool {
collectorPods, err := oc.AdminKubeClient().CoreV1().Pods(clfNS).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/component=collector"})
if err != nil || len(collectorPods.Items) < 1 {
e2e.Logf("failed to get pods by label app.kubernetes.io/component=collector")
return false
}
//ToDo, send logs to httpserver using service ca, oc get cm/openshift-service-ca.crt -o json |jq '.data."service-ca.crt"'
cmd := `curl -s -k -w "%{http_code}" ` + httpURL + " -d '" + postJsonString + "'"
result, err := e2eoutput.RunHostCmdWithRetries(clfNS, collectorPods.Items[0].Name, cmd, 3*time.Second, 30*time.Second)
if err != nil {
e2e.Logf("Show more status as data can not be sent to httpserver")
oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", clfNS, "endpoints").Output()
oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", clfNS, "pods").Output()
return false
}
if result == "200" {
return true
} else {
e2e.Logf("Show result as return code is not 200")
e2e.Logf("result=%v", result)
return false
}
}
// create job for rapiddast test
// Run a job to do rapiddast, the scan result will be written into pod logs and store in artifactdirPath
func rapidastScan(oc *exutil.CLI, ns, configFile string, scanPolicyFile string, apiGroupName string) (bool, error) {
//update the token and create a new config file
content, err := os.ReadFile(configFile)
jobName := "rapidast-" + getRandomString()
if err != nil {
e2e.Logf("rapidastScan abort! Open file %s failed", configFile)
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-user", "cluster-admin", fmt.Sprintf("system:serviceaccount:%s:default", ns)).Execute()
oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-admin", fmt.Sprintf("system:serviceaccount:%s:default", ns)).Execute()
token := getSAToken(oc, "default", ns)
originConfig := string(content)
targetConfig := strings.Replace(originConfig, "Bearer sha256~xxxxxxxx", "Bearer "+token, -1)
newConfigFile := "/tmp/logdast" + getRandomString()
f, err := os.Create(newConfigFile)
if err != nil {
e2e.Logf("rapidastScan abort! prepare configfile %s failed", newConfigFile)
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
defer f.Close()
defer exec.Command("rm", newConfigFile).Output()
f.WriteString(targetConfig)
//Create configmap
err = oc.WithoutNamespace().Run("create").Args("-n", ns, "configmap", jobName, "--from-file=rapidastconfig.yaml="+newConfigFile, "--from-file=customscan.policy="+scanPolicyFile).Execute()
if err != nil {
e2e.Logf("rapidastScan abort! create configmap rapidast-configmap failed")
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
//Create job
loggingBaseDir := exutil.FixturePath("testdata", "logging")
jobTemplate := filepath.Join(loggingBaseDir, "rapidast/job_rapidast.yaml")
rapidastJob := resource{"job", jobName, ns}
err = rapidastJob.applyFromTemplate(oc, "-f", jobTemplate, "-n", ns, "-p", "NAME="+jobName)
if err != nil {
e2e.Logf("rapidastScan abort! create rapidast job failed")
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
//Waiting up to 3 minutes until pod Failed or Success
wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 3*time.Minute, true, func(context.Context) (done bool, err error) {
jobStatus, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ns, "pod", "-l", "job-name="+jobName, "-ojsonpath={.items[0].status.phase}").Output()
e2e.Logf(" rapidast Job status %s ", jobStatus)
if err1 != nil {
return false, nil
}
if jobStatus == "Pending" || jobStatus == "Running" {
return false, nil
}
if jobStatus == "Failed" {
e2e.Logf("rapidast-job %s failed", jobName)
return true, nil
}
if jobStatus == "Succeeded" {
return true, nil
}
return false, nil
})
// Get the rapidast pod name
jobPods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: "job-name=" + jobName})
if err != nil {
e2e.Logf("rapidastScan abort! can not find rapidast scan job ")
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
podLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ns, jobPods.Items[0].Name).Output()
if err != nil {
e2e.Logf("rapidastScan abort! can not fetch logs from rapidast-scan pod %s", jobPods.Items[0].Name)
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
// Copy DAST Report into $ARTIFACT_DIR
artifactAvaiable := true
artifactdirPath := os.Getenv("ARTIFACT_DIR")
if artifactdirPath == "" {
artifactAvaiable = false
}
info, err := os.Stat(artifactdirPath)
if err != nil {
e2e.Logf("%s doesn't exist", artifactdirPath)
artifactAvaiable = false
} else if !info.IsDir() {
e2e.Logf("%s isn't a directory", artifactdirPath)
artifactAvaiable = false
}
if artifactAvaiable {
rapidastResultsSubDir := artifactdirPath + "/rapiddastresultslogging"
err = os.MkdirAll(rapidastResultsSubDir, 0755)
if err != nil {
e2e.Logf("failed to create %s", rapidastResultsSubDir)
}
artifactFile := rapidastResultsSubDir + "/" + apiGroupName + "_rapidast.result.txt"
e2e.Logf("Write report into %s", artifactFile)
f1, err := os.Create(artifactFile)
if err != nil {
e2e.Logf("failed to create artifactFile %s", artifactFile)
}
defer f1.Close()
_, err = f1.WriteString(podLogs)
if err != nil {
e2e.Logf("failed to write logs into artifactFile %s", artifactFile)
}
} else {
// print pod logs if artifactdirPath is not writable
e2e.Logf("#oc logs -n %s %s \n %s", jobPods.Items[0].Name, ns, podLogs)
}
//return false, if high risk is reported
podLogA := strings.Split(podLogs, "\n")
riskHigh := 0
riskMedium := 0
re1 := regexp.MustCompile(`"riskdesc": .*High`)
re2 := regexp.MustCompile(`"riskdesc": .*Medium`)
for _, item := range podLogA {
if re1.MatchString(item) {
riskHigh++
}
if re2.MatchString(item) {
riskMedium++
}
}
e2e.Logf("rapidast result: riskHigh=%v riskMedium=%v", riskHigh, riskMedium)
if riskHigh > 0 {
return false, fmt.Errorf("high risk alert, please check the scan result report")
}
return true, nil
}
// Get OIDC provider for the cluster
func getOIDC(oc *exutil.CLI) (string, error) {
oidc, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("authentication.config", "cluster", "-o=jsonpath={.spec.serviceAccountIssuer}").Output()
if err != nil {
return "", err
}
return strings.TrimPrefix(oidc, "https://"), nil
}
func getPoolID(oc *exutil.CLI) (string, error) {
// pool_id="$(oc get authentication cluster -o json | jq -r .spec.serviceAccountIssuer | sed 's/.*\/\([^\/]*\)-oidc/\1/')"
issuer, err := getOIDC(oc)
if err != nil {
return "", err
}
return strings.Split(strings.Split(issuer, "/")[1], "-oidc")[0], nil
}
// Create a linux audit policy to generate audit logs in one schedulable worker
func genLinuxAuditLogsOnWorker(oc *exutil.CLI) (string, error) {
workerNodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
if err != nil || len(workerNodes) == 0 {
return "", fmt.Errorf("can not find schedulable worker to enable audit policy")
}
result, err := exutil.DebugNodeWithChroot(oc, workerNodes[0].Name, "bash", "-c", "auditctl -w /var/log/pods/ -p rwa -k logging-qe-test-read-write-pod-logs")
if err != nil && strings.Contains(result, "Rule exists") {
//Note: we still provide the nodeName here, the policy will be deleted if `defer deleteLinuxAuditPolicyFromNodes` is called.
return workerNodes[0].Name, nil
}
return workerNodes[0].Name, err
}
// delete the linux audit policy
func deleteLinuxAuditPolicyFromNode(oc *exutil.CLI, nodeName string) error {
if nodeName == "" {
return fmt.Errorf("nodeName can not be empty")
}
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", "auditctl -W /var/log/pods/ -p rwa -k logging-qe-test-read-write-pod-logs")
return err
}
func hasMaster(oc *exutil.CLI) bool {
masterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "node-role.kubernetes.io/master="})
if err != nil {
e2e.Logf("hit error when listing master nodes: %v", err)
}
return len(masterNodes.Items) > 0
}
| package logging | ||||
function | openshift/openshift-tests-private | bb4b6530-c840-42d4-8e9a-db7f5295c623 | getRandomString | ['"math/rand"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
} | logging | ||||
function | openshift/openshift-tests-private | 4ab04ae3-d1b6-4cd3-9123-b34902558d05 | contain | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func contain(a []string, b string) bool {
for _, c := range a {
if c == b {
return true
}
}
return false
} | logging | |||||
function | openshift/openshift-tests-private | 66fdd0a3-75ac-40e3-b956-2c11eaac1474 | containSubstring | ['"fmt"', '"reflect"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func containSubstring(a interface{}, b string) bool {
switch reflect.TypeOf(a).Kind() {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(a)
for i := 0; i < s.Len(); i++ {
if strings.Contains(fmt.Sprintln(s.Index(i)), b) {
return true
}
}
}
return false
} | logging | ||||
function | openshift/openshift-tests-private | 95edc769-db63-4cc4-a41c-1eab5ddc8a32 | processTemplate | ['"context"', '"encoding/json"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func processTemplate(oc *exutil.CLI, parameters ...string) (string, error) {
var configFile string
filename := getRandomString() + ".json"
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 15*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(filename)
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
if err != nil {
return configFile, fmt.Errorf("failed to process template with the provided parameters")
}
return configFile, nil
} | logging | ||||
function | openshift/openshift-tests-private | ccb15a93-1881-4392-a30f-08f4cc93ea2e | getProxyFromEnv | ['"os"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getProxyFromEnv() string {
var proxy string
if os.Getenv("http_proxy") != "" {
proxy = os.Getenv("http_proxy")
} else if os.Getenv("http_proxy") != "" {
proxy = os.Getenv("https_proxy")
}
return proxy
} | logging | ||||
function | openshift/openshift-tests-private | 97f0a839-1943-4e08-a7b4-0c26daf3e821 | getClusterID | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getClusterID(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-ojsonpath={.spec.clusterID}").Output()
} | logging | |||||
function | openshift/openshift-tests-private | ddc91ece-fcd3-46e7-bbd0-2f7939b6c29e | isFipsEnabled | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func isFipsEnabled(oc *exutil.CLI) bool {
nodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
fips, err := exutil.DebugNodeWithChroot(oc, nodes[0].Name, "bash", "-c", "fips-mode-setup --check")
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Contains(fips, "FIPS mode is enabled.")
} | logging | ||||
function | openshift/openshift-tests-private | 73478b2c-97a1-47aa-88f6-777fbeb97f0d | waitForPackagemanifestAppear | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['SubscriptionObjects'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (so *SubscriptionObjects) waitForPackagemanifestAppear(oc *exutil.CLI, chSource bool) {
args := []string{"-n", so.CatalogSource.SourceNamespace, "packagemanifests"}
if chSource {
args = append(args, "-l", "catalog="+so.CatalogSource.SourceName)
} else {
args = append(args, so.PackageName)
}
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
packages, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
if err != nil {
msg := fmt.Sprintf("%v", err)
if strings.Contains(msg, "No resources found") || strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
if strings.Contains(packages, so.PackageName) {
return true, nil
}
e2e.Logf("Waiting for packagemanifest/%s to appear", so.PackageName)
return false, nil
})
if err != nil {
if so.SkipCaseWhenFailed {
g.Skip(fmt.Sprintf("Skip the case for can't find packagemanifest/%s", so.PackageName))
} else {
e2e.Failf("Packagemanifest %s is not available", so.PackageName)
}
}
//check channel
args = append(args, `-ojsonpath={.items[?(@.metadata.name=="`+so.PackageName+`")].status.channels[*].name}`)
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
channels := strings.Split(output, " ")
if !contain(channels, so.CatalogSource.Channel) {
e2e.Logf("Find channels %v from packagemanifest/%s", channels, so.PackageName)
if so.SkipCaseWhenFailed {
g.Skip(fmt.Sprintf("Skip the case for packagemanifest/%s doesn't have target channel %s", so.PackageName, so.CatalogSource.Channel))
} else {
e2e.Failf("Packagemanifest %s doesn't have target channel %s", so.PackageName, so.CatalogSource.Channel)
}
}
} | logging | |||
function | openshift/openshift-tests-private | f9c58555-18ac-490c-92cf-6f5385ada51f | setCatalogSourceObjects | ['"strings"', '"cloud.google.com/go/logging"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['SubscriptionObjects'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (so *SubscriptionObjects) setCatalogSourceObjects(oc *exutil.CLI) {
// set channel
if so.CatalogSource.Channel == "" {
so.CatalogSource.Channel = "stable-6.2"
}
// set source namespace
if so.CatalogSource.SourceNamespace == "" {
so.CatalogSource.SourceNamespace = "openshift-marketplace"
}
// set source and check if the packagemanifest exists or not
if so.CatalogSource.SourceName != "" {
so.waitForPackagemanifestAppear(oc, true)
} else {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("catsrc", "-n", so.CatalogSource.SourceNamespace, "-ojsonpath={.items[*].metadata.name}").Output()
if err != nil {
e2e.Logf("can't list catalog source in project %s: %v", so.CatalogSource.SourceNamespace, err)
}
catsrcs := strings.Split(output, " ")
if contain(catsrcs, "auto-release-app-registry") {
if contain(catsrcs, "redhat-operators") {
// do not subscribe source auto-release-app-registry as we want to test GAed logging in auto release jobs
so.CatalogSource.SourceName = "redhat-operators"
so.waitForPackagemanifestAppear(oc, true)
} else {
if so.SkipCaseWhenFailed {
g.Skip("skip the case because the cluster doesn't have proper catalog source for logging")
}
}
} else if contain(catsrcs, "qe-app-registry") {
so.CatalogSource.SourceName = "qe-app-registry"
so.waitForPackagemanifestAppear(oc, true)
} else {
so.waitForPackagemanifestAppear(oc, false)
source, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests", so.PackageName, "-o", "jsonpath={.status.catalogSource}").Output()
if err != nil {
e2e.Logf("error getting catalog source name: %v", err)
}
so.CatalogSource.SourceName = source
}
}
} | logging | |||
function | openshift/openshift-tests-private | 8e11c204-a385-4b79-a05b-03062014b7c7 | SubscribeOperator | ['"context"', '"fmt"', '"os"', '"strings"', '"time"', '"cloud.google.com/go/logging"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['SubscriptionObjects'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (so *SubscriptionObjects) SubscribeOperator(oc *exutil.CLI) {
// check if the namespace exists, if it doesn't exist, create the namespace
if so.OperatorPodLabel == "" {
so.OperatorPodLabel = "name=" + so.OperatorName
}
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), so.Namespace, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("The project %s is not found, create it now...", so.Namespace)
namespaceTemplate := exutil.FixturePath("testdata", "logging", "subscription", "namespace.yaml")
namespaceFile, err := processTemplate(oc, "-f", namespaceTemplate, "-p", "NAMESPACE_NAME="+so.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.Remove(namespaceFile)
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", namespaceFile).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create project %s", so.Namespace))
}
}
// check the operator group, if no object found, then create an operator group in the project
og, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "og").Output()
o.Expect(err).NotTo(o.HaveOccurred())
msg := fmt.Sprintf("%v", og)
if strings.Contains(msg, "No resources found") {
// create operator group
ogFile, err := processTemplate(oc, "-n", so.Namespace, "-f", so.OperatorGroup, "-p", "OG_NAME="+so.Namespace, "NAMESPACE="+so.Namespace)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.Remove(ogFile)
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", ogFile, "-n", so.Namespace).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create operatorgroup %s in %s project", so.Namespace, so.Namespace))
}
// check subscription, if there is no subscription objets, then create one
sub, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", so.Namespace, so.PackageName).Output()
if err != nil {
msg := fmt.Sprint("v%", sub)
if strings.Contains(msg, "NotFound") {
so.setCatalogSourceObjects(oc)
//create subscription object
subscriptionFile, err := processTemplate(oc, "-n", so.Namespace, "-f", so.Subscription, "-p", "PACKAGE_NAME="+so.PackageName, "NAMESPACE="+so.Namespace, "CHANNEL="+so.CatalogSource.Channel, "SOURCE="+so.CatalogSource.SourceName, "SOURCE_NAMESPACE="+so.CatalogSource.SourceNamespace)
if err != nil {
if so.SkipCaseWhenFailed {
g.Skip("hit error when processing subscription template: " + err.Error() + ", skip the case")
} else {
e2e.Failf("hit error when processing subscription template: %v", err)
}
}
defer os.Remove(subscriptionFile)
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().Run("apply").Args("-f", subscriptionFile, "-n", so.Namespace).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
return true, nil
}
return false, err
}
return true, nil
})
if err != nil {
if so.SkipCaseWhenFailed {
g.Skip("hit error when creating subscription, skip the case")
} else {
e2e.Failf("hit error when creating subscription")
}
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't create subscription %s in %s project", so.PackageName, so.Namespace))
// check status in subscription
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 120*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "sub", so.PackageName, `-ojsonpath={.status.state}`).Output()
if err != nil {
e2e.Logf("error getting subscription/%s: %v", so.PackageName, err)
return false, nil
}
return strings.Contains(output, "AtLatestKnown"), nil
})
if err != nil {
out, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "sub", so.PackageName, `-ojsonpath={.status.conditions}`).Output()
e2e.Logf("subscription/%s is not ready, conditions: %v", so.PackageName, out)
if so.SkipCaseWhenFailed {
g.Skip(fmt.Sprintf("Skip the case for the operator %s is not ready", so.OperatorName))
} else {
e2e.Failf("can't deploy operator %s", so.OperatorName)
}
}
}
}
// check pod status
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 240*time.Second, true, func(context.Context) (done bool, err error) {
pods, err := oc.AdminKubeClient().CoreV1().Pods(so.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: so.OperatorPodLabel})
if err != nil {
e2e.Logf("Hit error %v when getting pods", err)
return false, nil
}
if len(pods.Items) == 0 {
e2e.Logf("Waiting for pod with label %s to appear\n", so.OperatorPodLabel)
return false, nil
}
ready := true
for _, pod := range pods.Items {
if pod.Status.Phase != "Running" {
ready = false
e2e.Logf("Pod %s is not running: %v", pod.Name, pod.Status.Phase)
break
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
ready = false
e2e.Logf("Container %s in pod %s is not ready", containerStatus.Name, pod.Name)
break
}
}
}
return ready, nil
})
if err != nil {
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", so.Namespace, "-l", so.OperatorPodLabel).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", so.Namespace, "-l", so.OperatorPodLabel, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", so.Namespace, "-l", so.OperatorPodLabel, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Logf("pod with label %s is not ready:\nconditions: %s\ncontainer status: %s", so.OperatorPodLabel, podStatus, containerStatus)
if so.SkipCaseWhenFailed {
g.Skip(fmt.Sprintf("Skip the case for the operator %s is not ready", so.OperatorName))
} else {
e2e.Failf("can't deploy operator %s", so.OperatorName)
}
}
} | logging | |||
function | openshift/openshift-tests-private | 1aedf725-8151-4c98-9dd1-7dcfc1ff529b | uninstallOperator | ['"strings"', '"cloud.google.com/go/logging"'] | ['SubscriptionObjects', 'resource'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (so *SubscriptionObjects) uninstallOperator(oc *exutil.CLI) {
//csv, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "sub/"+so.PackageName, "-ojsonpath={.status.installedCSV}").Output()
resource{"subscription", so.PackageName, so.Namespace}.clear(oc)
//_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", so.Namespace, "csv", csv).Execute()
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", so.Namespace, "csv", "-l", "operators.coreos.com/"+so.PackageName+"."+so.Namespace+"=").Execute()
// do not remove namespace openshift-logging and openshift-operators-redhat, and preserve the operatorgroup as there may have several operators deployed in one namespace
// for example: loki-operator
if so.Namespace != "openshift-logging" && so.Namespace != "openshift-operators-redhat" && !strings.HasPrefix(so.Namespace, "e2e-test-") {
deleteNamespace(oc, so.Namespace)
}
} | logging | |||
function | openshift/openshift-tests-private | b1e15f41-95d2-40af-90b4-2cc5fb6c5315 | getInstalledCSV | ['SubscriptionObjects'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (so *SubscriptionObjects) getInstalledCSV(oc *exutil.CLI) string {
installedCSV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", so.Namespace, "sub", so.PackageName, "-ojsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return installedCSV
} | logging | ||||
function | openshift/openshift-tests-private | 0088012f-8a11-47c0-a9af-1ed1cf65fbfe | WaitForDeploymentPodsToBeReady | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func WaitForDeploymentPodsToBeReady(oc *exutil.CLI, namespace string, name string) {
var selectors map[string]string
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
deployment, err := oc.AdminKubeClient().AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for deployment/%s to appear\n", name)
return false, nil
}
return false, err
}
selectors = deployment.Spec.Selector.MatchLabels
if deployment.Status.AvailableReplicas == *deployment.Spec.Replicas && deployment.Status.UpdatedReplicas == *deployment.Spec.Replicas {
e2e.Logf("Deployment %s available (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s deployment (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return false, nil
})
if err != nil && len(selectors) > 0 {
var labels []string
for k, v := range selectors {
labels = append(labels, k+"="+v)
}
label := strings.Join(labels, ",")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("deployment %s is not ready:\nconditions: %s\ncontainer status: %s", name, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("deployment %s is not available", name))
} | logging | ||||
function | openshift/openshift-tests-private | f54b80d0-6ed2-4aae-90a4-a6a404803585 | waitForStatefulsetReady | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func waitForStatefulsetReady(oc *exutil.CLI, namespace string, name string) {
var selectors map[string]string
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
ss, err := oc.AdminKubeClient().AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for statefulset/%s to appear\n", name)
return false, nil
}
return false, err
}
selectors = ss.Spec.Selector.MatchLabels
if ss.Status.ReadyReplicas == *ss.Spec.Replicas && ss.Status.UpdatedReplicas == *ss.Spec.Replicas {
e2e.Logf("statefulset %s available (%d/%d)\n", name, ss.Status.ReadyReplicas, *ss.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s statefulset (%d/%d)\n", name, ss.Status.ReadyReplicas, *ss.Spec.Replicas)
return false, nil
})
if err != nil && len(selectors) > 0 {
var labels []string
for k, v := range selectors {
labels = append(labels, k+"="+v)
}
label := strings.Join(labels, ",")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("statefulset %s is not ready:\nconditions: %s\ncontainer status: %s", name, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("statefulset %s is not available", name))
} | logging | ||||
function | openshift/openshift-tests-private | 8baac608-8b46-4299-9c38-b56cb1d25a02 | WaitForDaemonsetPodsToBeReady | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func WaitForDaemonsetPodsToBeReady(oc *exutil.CLI, ns string, name string) {
var selectors map[string]string
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
daemonset, err := oc.AdminKubeClient().AppsV1().DaemonSets(ns).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for daemonset/%s to appear\n", name)
return false, nil
}
return false, err
}
selectors = daemonset.Spec.Selector.MatchLabels
if daemonset.Status.DesiredNumberScheduled > 0 && daemonset.Status.NumberReady == daemonset.Status.DesiredNumberScheduled && daemonset.Status.UpdatedNumberScheduled == daemonset.Status.DesiredNumberScheduled {
e2e.Logf("Daemonset/%s is available (%d/%d)\n", name, daemonset.Status.NumberReady, daemonset.Status.DesiredNumberScheduled)
return true, nil
}
e2e.Logf("Waiting for full availability of %s daemonset (%d/%d)\n", name, daemonset.Status.NumberReady, daemonset.Status.DesiredNumberScheduled)
return false, nil
})
if err != nil && len(selectors) > 0 {
var labels []string
for k, v := range selectors {
labels = append(labels, k+"="+v)
}
label := strings.Join(labels, ",")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("daemonset %s is not ready:\nconditions: %s\ncontainer status: %s", name, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Daemonset %s is not available", name))
} | logging | ||||
function | openshift/openshift-tests-private | 3c906ea5-ddc5-4866-8e87-e7d2f4a4b45a | waitForPodReadyWithLabel | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func waitForPodReadyWithLabel(oc *exutil.CLI, ns string, label string) {
var count int
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
pods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: label})
if err != nil {
return false, err
}
count = len(pods.Items)
if count == 0 {
e2e.Logf("Waiting for pod with label %s to appear\n", label)
return false, nil
}
ready := true
for _, pod := range pods.Items {
if pod.Status.Phase != "Running" {
ready = false
break
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
ready = false
break
}
}
}
if !ready {
e2e.Logf("Waiting for pod with label %s to be ready...\n", label)
}
return ready, nil
})
if err != nil && count != 0 {
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Execute()
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.containerStatuses}").Output()
e2e.Failf("pod with label %s is not ready:\nconditions: %s\ncontainer status: %s", label, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("pod with label %s is not ready", label))
} | logging | ||||
function | openshift/openshift-tests-private | df8f4cff-f577-4198-bb12-e3e91aa5a952 | getPodNames | ['"context"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getPodNames(oc *exutil.CLI, ns, label string) ([]string, error) {
var names []string
pods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: label})
if err != nil {
return names, err
}
if len(pods.Items) == 0 {
return names, fmt.Errorf("no pod(s) match label %s in namespace %s", label, ns)
}
for _, pod := range pods.Items {
names = append(names, pod.Name)
}
return names, nil
} | logging | ||||
function | openshift/openshift-tests-private | 2b343401-4af8-4815-b2b7-59f50877c2e4 | WaitUntilResourceIsGone | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['resource'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (r resource) WaitUntilResourceIsGone(oc *exutil.CLI) error {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", output)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") {
return true, nil
}
return true, err
}
return false, nil
})
if err != nil {
return fmt.Errorf("can't remove %s/%s in %s project", r.kind, r.name, r.namespace)
}
return nil
} | logging | |||
function | openshift/openshift-tests-private | fa24ee55-7394-476e-8713-6468e9d344ff | clear | ['"fmt"', '"strings"'] | ['resource'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (r resource) clear(oc *exutil.CLI) error {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
errstring := fmt.Sprintf("%v", msg)
if strings.Contains(errstring, "NotFound") || strings.Contains(errstring, "the server doesn't have a resource type") {
return nil
}
return err
}
err = r.WaitUntilResourceIsGone(oc)
return err
} | logging | |||
function | openshift/openshift-tests-private | f7674f1f-a6b3-4736-aef6-533218c2addb | WaitForResourceToAppear | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['resource'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (r resource) WaitForResourceToAppear(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
e2e.Logf("wait %s %s ready ... ", r.kind, r.name)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", r.namespace, r.kind, r.name).Output()
if err != nil {
msg := fmt.Sprintf("%v", output)
if strings.Contains(msg, "NotFound") {
return false, nil
}
return false, err
}
e2e.Logf("found %s %s", r.kind, r.name)
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource %s/%s is not appear", r.kind, r.name))
} | logging | |||
function | openshift/openshift-tests-private | f0ca0ca5-19c6-4f61-a892-9d7f420a17b4 | applyFromTemplate | ['"fmt"', '"os"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['resource'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (r resource) applyFromTemplate(oc *exutil.CLI, parameters ...string) error {
parameters = append(parameters, "-n", r.namespace)
file, err := processTemplate(oc, parameters...)
defer os.Remove(file)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not process %v", parameters))
output, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", r.namespace).Output()
if err != nil {
return fmt.Errorf("can't apply resource: %s", output)
}
r.WaitForResourceToAppear(oc)
return nil
} | logging | |||
function | openshift/openshift-tests-private | 13e12170-74b5-4c3e-b322-d3d01e53d4a6 | create | ['"io"', '"os"'] | ['resource', 'clusterlogforwarder'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (clf *clusterlogforwarder) create(oc *exutil.CLI, optionalParameters ...string) {
//parameters := []string{"-f", clf.templateFile, "--ignore-unknown-parameters=true", "-p", "NAME=" + clf.name, "NAMESPACE=" + clf.namespace}
parameters := []string{"-f", clf.templateFile, "-p", "NAME=" + clf.name, "NAMESPACE=" + clf.namespace}
if clf.secretName != "" {
parameters = append(parameters, "SECRET_NAME="+clf.secretName)
}
if clf.serviceAccountName != "" {
clf.createServiceAccount(oc)
parameters = append(parameters, "SERVICE_ACCOUNT_NAME="+clf.serviceAccountName)
}
if len(optionalParameters) > 0 {
parameters = append(parameters, optionalParameters...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", clf.namespace).Execute()
if err != nil {
e2e.Failf("error creating clusterlogforwarder: %v", err)
}
resource{"clusterlogforwarders.observability.openshift.io", clf.name, clf.namespace}.WaitForResourceToAppear(oc)
if clf.waitForPodReady {
clf.waitForCollectorPodsReady(oc)
}
if clf.namespace != cloNS && clf.namespace != loNS && clf.enableMonitoring {
enableClusterMonitoring(oc, clf.namespace)
}
} | logging | |||
function | openshift/openshift-tests-private | 434b210f-24d7-43f9-9cd1-f83756dcdb71 | createServiceAccount | ['"context"'] | ['clusterlogforwarder'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (clf *clusterlogforwarder) createServiceAccount(oc *exutil.CLI) {
_, err := oc.AdminKubeClient().CoreV1().ServiceAccounts(clf.namespace).Get(context.Background(), clf.serviceAccountName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = createServiceAccount(oc, clf.namespace, clf.serviceAccountName)
if err != nil {
e2e.Failf("can't create the serviceaccount: %v", err)
}
}
if clf.collectApplicationLogs {
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-application-logs")
o.Expect(err).NotTo(o.HaveOccurred())
}
if clf.collectInfrastructureLogs {
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-infrastructure-logs")
o.Expect(err).NotTo(o.HaveOccurred())
}
if clf.collectAuditLogs {
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-audit-logs")
o.Expect(err).NotTo(o.HaveOccurred())
}
} | logging | |||
function | openshift/openshift-tests-private | 2df135ad-d6a7-43b0-9230-e09834a92936 | createServiceAccount | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func createServiceAccount(oc *exutil.CLI, namespace, name string) error {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("serviceaccount", name, "-n", namespace).Execute()
return err
} | logging | |||||
function | openshift/openshift-tests-private | 1f5f739b-3a4e-4493-b91e-f5ae1e2f435a | addClusterRoleToServiceAccount | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func addClusterRoleToServiceAccount(oc *exutil.CLI, namespace, serviceAccountName, clusterRole string) error {
return oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", clusterRole, fmt.Sprintf("system:serviceaccount:%s:%s", namespace, serviceAccountName)).Execute()
} | logging | ||||
function | openshift/openshift-tests-private | 6f8d0013-4b2f-4201-9ede-3cc1937d4da7 | removeClusterRoleFromServiceAccount | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func removeClusterRoleFromServiceAccount(oc *exutil.CLI, namespace, serviceAccountName, clusterRole string) error {
return oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-user", clusterRole, fmt.Sprintf("system:serviceaccount:%s:%s", namespace, serviceAccountName)).Execute()
} | logging | ||||
function | openshift/openshift-tests-private | c3ff34e6-48b6-4728-bc55-b503973817cb | update | ['"io"', '"os"'] | ['clusterlogforwarder'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (clf *clusterlogforwarder) update(oc *exutil.CLI, template string, patches ...string) {
var err error
if template != "" {
//parameters := []string{"-f", template, "--ignore-unknown-parameters=true", "-p", "NAME=" + clf.name, "NAMESPACE=" + clf.namespace}
parameters := []string{"-f", template, "-p", "NAME=" + clf.name, "NAMESPACE=" + clf.namespace}
if clf.secretName != "" {
parameters = append(parameters, "SECRET_NAME="+clf.secretName)
}
parameters = append(parameters, "SERVICE_ACCOUNT_NAME="+clf.serviceAccountName)
if len(patches) > 0 {
parameters = append(parameters, patches...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", clf.namespace).Execute()
} else {
parameters := []string{"clusterlogforwarders.observability.openshift.io/" + clf.name, "-n", clf.namespace, "-p"}
parameters = append(parameters, patches...)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(parameters...).Execute()
}
if err != nil {
e2e.Failf("error updating clusterlogforwarder: %v", err)
}
} | logging | |||
function | openshift/openshift-tests-private | da556fa5-eb5e-452e-b603-66561e63f88c | patch | ['"encoding/json"', '"io"'] | ['clusterlogforwarder'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (clf *clusterlogforwarder) patch(oc *exutil.CLI, patch_string string) (string, error) {
parameters := []string{"clusterlogforwarders.observability.openshift.io/" + clf.name, "-n", clf.namespace, "-p"}
parameters = append(parameters, patch_string, "--type=json")
return oc.AsAdmin().WithoutNamespace().Run("patch").Args(parameters...).Output()
} | logging | |||
function | openshift/openshift-tests-private | 2eea13ca-9f16-4e7e-84d2-2d32ef877b38 | delete | ['"fmt"', '"io"'] | ['resource', 'clusterlogforwarder'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (clf *clusterlogforwarder) delete(oc *exutil.CLI) {
err := resource{"clusterlogforwarders.observability.openshift.io", clf.name, clf.namespace}.clear(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("clusterlogforwarder/%s in project/%s is not deleted", clf.name, clf.namespace))
if len(clf.serviceAccountName) > 0 {
if clf.collectApplicationLogs {
removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-application-logs")
}
if clf.collectInfrastructureLogs {
removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-infrastructure-logs")
}
if clf.collectAuditLogs {
removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-audit-logs")
}
resource{"serviceaccount", clf.serviceAccountName, clf.namespace}.clear(oc)
}
err = resource{"daemonset", clf.name, clf.namespace}.WaitUntilResourceIsGone(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("daemonset/%s in project/%s is not deleted", clf.name, clf.namespace))
} | logging | |||
function | openshift/openshift-tests-private | 25a02186-e3ca-44d2-b209-2829467800a8 | waitForCollectorPodsReady | ['clusterlogforwarder'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (clf *clusterlogforwarder) waitForCollectorPodsReady(oc *exutil.CLI) {
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
} | logging | ||||
function | openshift/openshift-tests-private | 1b4094e2-bb7c-434b-b3f9-b63cab92651b | getCollectorNodeNames | ['"context"', '"io"'] | ['clusterlogforwarder'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (clf *clusterlogforwarder) getCollectorNodeNames(oc *exutil.CLI) ([]string, error) {
var nodes []string
pods, err := oc.AdminKubeClient().CoreV1().Pods(clf.namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/component=collector,app.kubernetes.io/instance=" + clf.name})
for _, pod := range pods.Items {
nodes = append(nodes, pod.Spec.NodeName)
}
return nodes, err
} | logging | |||
function | openshift/openshift-tests-private | adb320f5-1946-43d6-b659-9c050a259edc | create | ['"os"', '"cloud.google.com/go/logging"'] | ['resource', 'logFileMetricExporter'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (lfme *logFileMetricExporter) create(oc *exutil.CLI, optionalParameters ...string) {
if lfme.name == "" {
lfme.name = "instance"
}
if lfme.namespace == "" {
lfme.namespace = loggingNS
}
if lfme.template == "" {
lfme.template = exutil.FixturePath("testdata", "logging", "logfilemetricexporter", "lfme.yaml")
}
parameters := []string{"-f", lfme.template, "-p", "NAME=" + lfme.name, "NAMESPACE=" + lfme.namespace}
if len(optionalParameters) > 0 {
parameters = append(parameters, optionalParameters...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", lfme.namespace).Execute()
if err != nil {
e2e.Failf("error creating logfilemetricexporter: %v", err)
}
resource{"logfilemetricexporter", lfme.name, lfme.namespace}.WaitForResourceToAppear(oc)
if lfme.waitPodsReady {
WaitForDaemonsetPodsToBeReady(oc, lfme.namespace, "logfilesmetricexporter")
}
} | logging | |||
function | openshift/openshift-tests-private | 565508d1-0adc-454a-806e-c3aa61ae0faa | delete | ['"fmt"'] | ['resource', 'logFileMetricExporter'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (lfme *logFileMetricExporter) delete(oc *exutil.CLI) {
err := resource{"logfilemetricexporter", lfme.name, lfme.namespace}.clear(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("logfilemetricexporter/%s in project/%s is not deleted", lfme.name, lfme.namespace))
err = resource{"daemonset", "logfilesmetricexporter", lfme.namespace}.WaitUntilResourceIsGone(oc)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("ds/logfilesmetricexporter in project/%s is not deleted", lfme.namespace))
} | logging | |||
function | openshift/openshift-tests-private | 0e17d7bf-37d4-4108-9e77-7ba063d10175 | deleteNamespace | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func deleteNamespace(oc *exutil.CLI, ns string) {
err := oc.AdminKubeClient().CoreV1().Namespaces().Delete(context.Background(), ns, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
_, err = oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), ns, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, err
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Namespace %s is not deleted in 3 minutes", ns))
} | logging | ||||
function | openshift/openshift-tests-private | cb436f4b-de2a-4624-b3a2-abe036729a45 | getStorageClassName | ['"context"', '"fmt"', '"io"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getStorageClassName(oc *exutil.CLI) (string, error) {
scs, err := oc.AdminKubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{})
if err != nil {
return "", err
}
if len(scs.Items) == 0 {
return "", fmt.Errorf("there is no storageclass in the cluster")
}
for _, sc := range scs.Items {
if sc.ObjectMeta.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
return sc.Name, nil
}
}
return scs.Items[0].Name, nil
} | logging | ||||
function | openshift/openshift-tests-private | dd489ad4-929a-454b-b3fa-e9e98b6195c5 | assertResourceStatus | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func assertResourceStatus(oc *exutil.CLI, kind, name, namespace, jsonpath, exptdStatus string) {
parameters := []string{kind, name, "-o", "jsonpath=" + jsonpath}
if namespace != "" {
parameters = append(parameters, "-n", namespace)
}
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(parameters...).Output()
if err != nil {
return false, err
}
if strings.Compare(status, exptdStatus) != 0 {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s/%s value for %s is not %s", kind, name, jsonpath, exptdStatus))
} | logging | ||||
function | openshift/openshift-tests-private | 2eb75d7f-4184-4b25-a3c3-b62f912416f6 | getRouteAddress | ['"context"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getRouteAddress(oc *exutil.CLI, ns, routeName string) string {
route, err := oc.AdminRouteClient().RouteV1().Routes(ns).Get(context.Background(), routeName, metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
return route.Spec.Host
} | logging | ||||
function | openshift/openshift-tests-private | a7245a45-4f79-4aa0-b756-2687a8a8fbbe | getSAToken | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getSAToken(oc *exutil.CLI, name, ns string) string {
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", name, "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return token
} | logging | |||||
function | openshift/openshift-tests-private | 5ba86d82-0b93-4243-aac5-b00115fa3be0 | enableClusterMonitoring | ['"io"', '"cloud.google.com/go/logging"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func enableClusterMonitoring(oc *exutil.CLI, namespace string) {
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", namespace, "openshift.io/cluster-monitoring=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
file := exutil.FixturePath("testdata", "logging", "prometheus-k8s-rbac.yaml")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-n", namespace, "-f", file).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | ||||
function | openshift/openshift-tests-private | 0b4bce4b-41a3-425a-84fa-f37ade985c8f | queryPrometheus | ['"encoding/json"', '"net/http"', '"net/url"', '"path"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func queryPrometheus(oc *exutil.CLI, token string, path string, query string, action string) (*prometheusQueryResult, error) {
var bearerToken string
var err error
if token == "" {
bearerToken = getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
} else {
bearerToken = token
}
address := "https://" + getRouteAddress(oc, "openshift-monitoring", "prometheus-k8s")
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add("Authorization", "Bearer "+bearerToken)
params := url.Values{}
if len(query) > 0 {
params.Add("query", query)
}
var p prometheusQueryResult
resp, err := doHTTPRequest(h, address, path, params.Encode(), action, true, 5, nil, 200)
if err != nil {
return nil, err
}
err = json.Unmarshal(resp, &p)
if err != nil {
return nil, err
}
return &p, nil
} | logging | ||||
function | openshift/openshift-tests-private | a2bb5e7b-eb7d-4815-9296-ea144f8d470c | getMetric | ['metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getMetric(oc *exutil.CLI, token, query string) ([]metric, error) {
res, err := queryPrometheus(oc, token, "/api/v1/query", query, "GET")
if err != nil {
return []metric{}, err
}
return res.Data.Result, nil
} | logging | ||||
function | openshift/openshift-tests-private | a062e09f-7c4f-4cf6-afef-c61474ee2c3d | checkMetric | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func checkMetric(oc *exutil.CLI, token, query string, timeInMinutes int) {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(timeInMinutes)*time.Minute, true, func(context.Context) (done bool, err error) {
metrics, err := getMetric(oc, token, query)
if err != nil {
return false, err
}
if len(metrics) == 0 {
e2e.Logf("no metrics found by query: %s, try next time", query)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't find metrics by %s in %d minutes", query, timeInMinutes))
} | logging | ||||
function | openshift/openshift-tests-private | 053ccd32-4d16-45a7-8e85-6a0beca44261 | getAlert | ['metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getAlert(oc *exutil.CLI, token, alertSelector string) ([]alert, error) {
var al []alert
alerts, err := queryPrometheus(oc, token, "/api/v1/alerts", "", "GET")
if err != nil {
return al, err
}
for i := 0; i < len(alerts.Data.Alerts); i++ {
if alerts.Data.Alerts[i].Labels.AlertName == alertSelector {
al = append(al, alerts.Data.Alerts[i])
}
}
return al, nil
} | logging | ||||
function | openshift/openshift-tests-private | fe304f5a-8450-4539-97a1-8ba39538dd33 | checkAlert | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func checkAlert(oc *exutil.CLI, token, alertName, status string, timeInMinutes int) {
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, time.Duration(timeInMinutes)*time.Minute, true, func(context.Context) (done bool, err error) {
alerts, err := getAlert(oc, token, alertName)
if err != nil {
return false, err
}
for _, alert := range alerts {
if strings.Contains(status, alert.State) {
return true, nil
}
}
e2e.Logf("Waiting for alert %s to be in state %s...", alertName, status)
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s alert is not %s in %d minutes", alertName, status, timeInMinutes))
} | logging | ||||
function | openshift/openshift-tests-private | 7823c410-1b68-4876-ac4f-df94114ae240 | WaitUntilPodsAreGone | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func WaitUntilPodsAreGone(oc *exutil.CLI, namespace string, labelSelector string) {
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "--selector="+labelSelector, "-n", namespace).Output()
if err != nil {
return false, err
}
errstring := fmt.Sprintf("%v", output)
if strings.Contains(errstring, "No resources found") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Error waiting for pods to be removed using label selector %s", labelSelector))
} | logging | ||||
function | openshift/openshift-tests-private | cc94e7d5-843c-4441-8cdc-4c1b0328999a | checkLogsFromRs | ['"context"', '"fmt"', '"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['resource'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func checkLogsFromRs(oc *exutil.CLI, kind, name, namespace, containerName, expected string) {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(kind+`/`+name, "-n", namespace, "-c", containerName).Output()
if err != nil {
e2e.Logf("Can't get logs from resource, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.Match(expected, []byte(output)); !matched {
e2e.Logf("Can't find the expected string\n")
return false, nil
}
e2e.Logf("Check the logs succeed!!\n")
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s is not expected for %s", expected, name))
} | logging | |||
function | openshift/openshift-tests-private | 0d7e8ac9-4def-4818-b70f-00ef3023742d | getCurrentCSVFromPackage | ['"encoding/json"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getCurrentCSVFromPackage(oc *exutil.CLI, source, channel, packagemanifest string) string {
var currentCSV string
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", "-n", "openshift-marketplace", "-l", "catalog="+source, "-ojsonpath={.items}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
packMS := []PackageManifest{}
json.Unmarshal([]byte(output), &packMS)
for _, pm := range packMS {
if pm.Name == packagemanifest {
for _, channels := range pm.Status.Channels {
if channels.Name == channel {
currentCSV = channels.CurrentCSV
break
}
}
}
}
return currentCSV
} | logging | ||||
function | openshift/openshift-tests-private | 90fa5668-129b-4d33-8493-831b41aa0834 | checkNetworkType | ['"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func checkNetworkType(oc *exutil.CLI) string {
output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.defaultNetwork.type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.ToLower(output)
} | logging | ||||
function | openshift/openshift-tests-private | 63ae616d-6ddf-4230-ad02-77696c8a3b29 | getAppDomain | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func getAppDomain(oc *exutil.CLI) (string, error) {
subDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ingresses.config/cluster", "-ojsonpath={.spec.domain}").Output()
if err != nil {
return "", err
}
return subDomain, nil
} | logging | |||||
function | openshift/openshift-tests-private | 6f64f93c-e9ce-46ad-8d1d-3fd73c1ba881 | generateCerts | ['"os/exec"', '"cloud.google.com/go/logging"'] | ['certsConf'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (certs certsConf) generateCerts(oc *exutil.CLI, keysPath string) {
generateCertsSH := exutil.FixturePath("testdata", "logging", "external-log-stores", "cert_generation.sh")
domain, err := getAppDomain(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cmd := []string{generateCertsSH, keysPath, certs.namespace, certs.serverName, domain}
if certs.passPhrase != "" {
cmd = append(cmd, certs.passPhrase)
}
err = exec.Command("sh", cmd...).Run()
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | |||
function | openshift/openshift-tests-private | 9c4db0dd-2eaf-44ee-b508-b8079ed84468 | checkResource | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['resource'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func checkResource(oc *exutil.CLI, expect bool, compare bool, expectedContent string, args []string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output()
if err != nil {
if strings.Contains(output, "NotFound") {
return false, nil
}
return false, err
}
if compare {
res := strings.Compare(output, expectedContent)
if (res == 0 && expect) || (res != 0 && !expect) {
return true, nil
}
return false, nil
}
res := strings.Contains(output, expectedContent)
if (res && expect) || (!res && !expect) {
return true, nil
}
return false, nil
})
if expect {
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The content doesn't match/contain %s", expectedContent))
} else {
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s still exists in the resource", expectedContent))
}
} | logging | |||
function | openshift/openshift-tests-private | 7bd5ccf0-f179-4674-a5b5-544d73a18169 | createPipelineSecret | ['"crypto/tls"'] | ['resource', 'rsyslog'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (r rsyslog) createPipelineSecret(oc *exutil.CLI, keysPath string) {
secret := resource{"secret", r.secretName, r.loggingNS}
cmd := []string{"secret", "generic", secret.name, "-n", secret.namespace, "--from-file=ca-bundle.crt=" + keysPath + "/ca.crt"}
if r.clientKeyPassphrase != "" {
cmd = append(cmd, "--from-file=tls.key="+keysPath+"/client.key", "--from-file=tls.crt="+keysPath+"/client.crt", "--from-literal=passphrase="+r.clientKeyPassphrase)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(cmd...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
secret.WaitForResourceToAppear(oc)
} | logging | |||
function | openshift/openshift-tests-private | 2043e540-b65f-4eb6-bc8c-d3bbdf36cc89 | deploy | ['"crypto/tls"', '"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"cloud.google.com/go/logging"'] | ['resource', 'certsConf', 'rsyslog'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (r rsyslog) deploy(oc *exutil.CLI) {
// create SA
sa := resource{"serviceaccount", r.serverName, r.namespace}
err := oc.WithoutNamespace().Run("create").Args("serviceaccount", sa.name, "-n", sa.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sa.WaitForResourceToAppear(oc)
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "privileged", fmt.Sprintf("system:serviceaccount:%s:%s", r.namespace, r.serverName), "-n", r.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
filePath := []string{"testdata", "logging", "external-log-stores", "rsyslog"}
// create secrets if needed
if r.tls {
o.Expect(r.secretName).NotTo(o.BeEmpty())
// create a temporary directory
baseDir := exutil.FixturePath("testdata", "logging")
keysPath := filepath.Join(baseDir, "temp"+getRandomString())
defer exec.Command("rm", "-r", keysPath).Output()
err = os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{r.serverName, r.namespace, r.clientKeyPassphrase}
cert.generateCerts(oc, keysPath)
// create pipelinesecret
r.createPipelineSecret(oc, keysPath)
// create secret for rsyslog server
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", r.serverName, "-n", r.namespace, "--from-file=server.key="+keysPath+"/server.key", "--from-file=server.crt="+keysPath+"/server.crt", "--from-file=ca_bundle.crt="+keysPath+"/ca.crt").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
filePath = append(filePath, "secure")
} else {
filePath = append(filePath, "insecure")
}
// create configmap/deployment/svc
cm := resource{"configmap", r.serverName, r.namespace}
cmFilePath := append(filePath, "configmap.yaml")
cmFile := exutil.FixturePath(cmFilePath...)
err = cm.applyFromTemplate(oc, "-f", cmFile, "-n", r.namespace, "-p", "NAMESPACE="+r.namespace, "-p", "NAME="+r.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
deploy := resource{"deployment", r.serverName, r.namespace}
deployFilePath := append(filePath, "deployment.yaml")
deployFile := exutil.FixturePath(deployFilePath...)
err = deploy.applyFromTemplate(oc, "-f", deployFile, "-n", r.namespace, "-p", "NAMESPACE="+r.namespace, "-p", "NAME="+r.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, r.namespace, r.serverName)
svc := resource{"svc", r.serverName, r.namespace}
svcFilePath := append(filePath, "svc.yaml")
svcFile := exutil.FixturePath(svcFilePath...)
err = svc.applyFromTemplate(oc, "-f", svcFile, "-n", r.namespace, "-p", "NAMESPACE="+r.namespace, "-p", "NAME="+r.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | |||
function | openshift/openshift-tests-private | f192f726-232e-46a2-8e3a-d10cadf046bb | remove | ['"crypto/tls"'] | ['resource', 'rsyslog'] | github.com/openshift/openshift-tests-private/test/extended/logging/utils.go | func (r rsyslog) remove(oc *exutil.CLI) {
resource{"serviceaccount", r.serverName, r.namespace}.clear(oc)
if r.tls {
resource{"secret", r.serverName, r.namespace}.clear(oc)
resource{"secret", r.secretName, r.loggingNS}.clear(oc)
}
resource{"configmap", r.serverName, r.namespace}.clear(oc)
resource{"deployment", r.serverName, r.namespace}.clear(oc)
resource{"svc", r.serverName, r.namespace}.clear(oc)
} | logging |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.